repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast/rec_update_fwm_tanh/__init__.py
# Adaptation of the original code from # https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>, # Apoorv Vyas <avyas@idiap.ch> # Modifications Copyright (c) 2021 Kazuki Irie import os import torch from torch.utils.cpp_extension import load # Just in time import # https://pytorch.org/tutorials/advanced/cpp_extens dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'rec_update_fwm_tanh.cu') fwd_cuda = load( extra_cuda_cflags=['--ftemplate-depth=1024'], name="rec_update_fwm_tanh_forward", sources=[filename], verbose=True) bwd_cuda = load( extra_cuda_cflags=['--ftemplate-depth=1024'], name="rec_update_fwm_tanh_backward", sources=[filename], verbose=True) causal_dot_product_cuda = fwd_cuda.rec_update_fwm_tanh_forward causal_dot_backward_cuda = bwd_cuda.rec_update_fwm_tanh_backward class FastWeightRecUpdateTanh(torch.autograd.Function): """Compute the weighted sum of values but attending only to previous values.""" dot = { # "cpu": causal_dot_product_cpu, "cuda": causal_dot_product_cuda } dot_backward = { # "cpu": causal_dot_backward_cpu, "cuda": causal_dot_backward_cuda } @staticmethod def forward(ctx, qx, kx, vx, bx, R_q, R_k, R_v, r_b, W, h0): # Shape of x: (B, len, D) # Shape of W_q: (n_head, D, E) where n_head * E = D (typically) device = qx.device N, H, L, E = qx.shape _, _, _, M = vx.shape assert E == M, "Current kernel only support this case." assert R_q.shape == (1, H, M, M), "Reshape/unsqueeze if needed." assert R_k.shape == (1, H, M, M), "Reshape/unsqueeze if needed." assert R_v.shape == (1, H, M, M), "Reshape/unsqueeze if needed." assert r_b.shape == (1, H, 1, M), "Reshape/unsqueeze if needed." out = torch.zeros((N, H, L, M), device=device, dtype=qx.dtype) v_old = torch.zeros((N, H, L, M), device=device, dtype=qx.dtype) q_out = torch.zeros((N, H, L, E), device=device, dtype=qx.dtype) k_out = torch.zeros((N, H, L, E), device=device, dtype=qx.dtype) v_out = torch.zeros((N, H, L, M), device=device, dtype=qx.dtype) beta_out = torch.zeros((N, H, L, 1), device=device, dtype=qx.dtype) h_init = h0.detach().clone() # for backward pass. FastWeightRecUpdateTanh.dot[device.type]( qx.data, kx.data, vx.data, bx.data, R_q.data, R_k.data, R_v.data, r_b.data, q_out, k_out, v_out, beta_out, v_old, h0.data, W, out ) ctx.save_for_backward(out, q_out, k_out, v_out, beta_out, v_old, R_q, R_k, R_v, r_b, W, h_init) return out @staticmethod def backward(ctx, grad_out): # Extract the saved tensors (out, Q, K, V, beta, V_old, R_q, R_k, R_v, r_b, W, h0) = ctx.saved_tensors # qx, kx, vx, bx, R_q, R_k, R_v, r_b, W, h0 # Allocate memory for the gradients grad_Q = torch.zeros_like(Q) grad_K = torch.zeros_like(K) grad_V = torch.zeros_like(V) grad_beta = torch.zeros_like(beta) # R_q, R_k, R_v, r_b grad_R_q = torch.zeros_like(R_q) grad_R_k = torch.zeros_like(R_k) grad_R_v = torch.zeros_like(R_v) grad_r_b = torch.zeros_like(r_b) out_delayed = torch.tanh(torch.cat([h0, out[:, :, :-1]], dim=2)) # Compute the gradients FastWeightRecUpdateTanh.dot_backward[out.device.type]( Q.data, K.data, V.data, beta.data, V_old.data, out_delayed, grad_out, R_q.data, R_k.data, R_v.data, r_b.data, W.data, grad_Q, grad_K, grad_V, grad_beta, grad_R_q, grad_R_k, grad_R_v, grad_r_b ) return (grad_Q, grad_K, grad_V, grad_beta, grad_R_q, grad_R_k, grad_R_v, grad_r_b, None, None) # Alias the autograd functions to python style snake case naming rec_update_fwm_tanh = FastWeightRecUpdateTanh.apply if __name__ == '__main__': import torch import torch.nn.functional as F torch.manual_seed(111) # Tests pass if the relative difference compared with # the corresponding torch autograd computation # is smaller than a threshold. # Ideally should be tested with double... rel_threshold = 1e-2 # rel_threshold = 10 # from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py def max_relative_error(a, b, eps=1e-6): return float(torch.abs((b - a) / (torch.abs(b) + eps)).max().item()) print('##########################') print('# Test forward pass') print('##########################') bsz, n_head, slen, d_head = 3, 5, 11, 32 v_dim = d_head tot_dim = d_head R_q0 = torch.cuda.FloatTensor( 1, n_head, tot_dim, tot_dim, device='cuda').uniform_(-1., 1.) R_k0 = torch.cuda.FloatTensor( 1, n_head, tot_dim, tot_dim, device='cuda').uniform_(-1., 1.) R_v0 = torch.cuda.FloatTensor( 1, n_head, tot_dim, tot_dim, device='cuda').uniform_(-1., 1.) r_b0 = torch.cuda.FloatTensor( 1, n_head, 1, tot_dim, device='cuda').uniform_(-1., 1.) q0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') k0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') v0 = torch.rand(bsz, n_head, slen, v_dim, device='cuda') beta0 = torch.rand(bsz, n_head, slen, 1, device='cuda') h0 = torch.zeros(bsz, n_head, 1, v_dim, device='cuda') R_q1 = torch.zeros(1, n_head, tot_dim, tot_dim, requires_grad=True, device='cuda') R_k1 = torch.zeros(1, n_head, tot_dim, tot_dim, requires_grad=True, device='cuda') R_v1 = torch.zeros(1, n_head, tot_dim, tot_dim, requires_grad=True, device='cuda') r_b1 = torch.zeros(1, n_head, 1, tot_dim, requires_grad=True, device='cuda') R_q1.data = R_q0.data R_k1.data = R_k0.data R_v1.data = R_v0.data r_b1.data = r_b0.data q1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') k1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v1 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta1 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') q1.data = q0.data k1.data = k0.data v1.data = v0.data beta1.data = beta0.data W1 = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda') print("Forwarding custom kernel...") out1 = rec_update_fwm_tanh( q1, k1, v1, beta1, R_q1, R_k1, R_v1, r_b1, W1, h0) print("done.") # Compute it using torch Rq2 = torch.zeros( 1, n_head, tot_dim, tot_dim, requires_grad=True, device='cuda') Rk2 = torch.zeros( 1, n_head, tot_dim, tot_dim, requires_grad=True, device='cuda') Rv2 = torch.zeros( 1, n_head, tot_dim, tot_dim, requires_grad=True, device='cuda') rb2 = torch.zeros( 1, n_head, 1, tot_dim, requires_grad=True, device='cuda') Rq2.data = R_q0.data Rk2.data = R_k0.data Rv2.data = R_v0.data rb2.data = r_b0.data R_q2 = Rq2.squeeze() R_k2 = Rk2.squeeze() R_v2 = Rv2.squeeze() r_b2 = rb2.squeeze().unsqueeze(2) q2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') k2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v2 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta2 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') q2.data = q0.data k2.data = k0.data v2.data = v0.data beta2.data = beta0.data q_2 = q2.permute(2, 0, 1, 3) slen, bsz, n_head, d_head = q_2.shape q_2 = q_2.reshape(slen, bsz * n_head, d_head) k_2 = k2.permute(2, 0, 1, 3) k_2 = k_2.reshape(slen, bsz * n_head, d_head) v_2 = v2.permute(2, 0, 1, 3) v_2 = v_2.reshape(slen, bsz * n_head, v_dim) beta_2 = beta2.permute(2, 0, 1, 3) beta_2 = beta_2.reshape(slen, bsz * n_head, 1) W = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda') out = torch.zeros(n_head, bsz, v_dim, device='cuda') out_list = [] print("Forwarding PyTorch code...") for pos in range(slen): out = torch.tanh(out) # query # R_q2 = R_q2.transpose(-1, -2) qr = torch.bmm(out, R_q2).squeeze().transpose(0, 1) qr = qr.reshape(bsz * n_head, d_head) q_t = q_2[pos] + qr q_t = F.softmax(q_t, dim=-1) # key # R_k2 = R_k2.transpose(-1, -2) kr = torch.bmm(out, R_k2).squeeze().transpose(0, 1) kr = kr.reshape(bsz * n_head, d_head) k_t = k_2[pos] + kr k_t = F.softmax(k_t, dim=-1) # value # R_v2 = R_v2.transpose(-1, -2) vr = torch.bmm(out, R_v2).squeeze().transpose(0, 1) vr = vr.reshape(bsz * n_head, d_head) v_t = v_2[pos] + vr # beta br = torch.bmm(out, r_b2).transpose(0, 1) br = br.reshape(bsz * n_head, 1) b_t = torch.sigmoid(beta_2[pos] + br) # retrieve currently stored value v_old = torch.bmm(W, k_t.unsqueeze(2)).squeeze() v_insert = b_t * (v_t - v_old) W = W + torch.bmm(v_insert.unsqueeze(2), k_t.unsqueeze(1)) out_t = torch.bmm(W, q_t.unsqueeze(2)).squeeze() out_t = out_t.reshape(bsz, n_head, v_dim) out = out_t.transpose(0, 1) out_list.append(out_t.clone()) print("done.") out2 = torch.stack(out_list) out2 = out2.view(slen, bsz, n_head, v_dim) out1 = out1.permute(2, 0, 1, 3) for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"out: {out1[s][b][h]}") print(f"ref: {out2[s][b][h]}") assert max_relative_error( out1[s][b][h], out2[s][b][h]) < rel_threshold print("pass!") print("==> Forward pass test done.") print('##########################') print('# Test Backward pass') print('##########################') # grad loss1 = out1.sum() R_q1.retain_grad() R_k1.retain_grad() R_v1.retain_grad() r_b1.retain_grad() q1.retain_grad() k1.retain_grad() v1.retain_grad() beta1.retain_grad() loss1.backward() loss2 = out2.sum() R_q2.retain_grad() R_k2.retain_grad() R_v2.retain_grad() r_b2.retain_grad() q2.retain_grad() k2.retain_grad() v2.retain_grad() beta2.retain_grad() loss2.backward() print('##########################') print('# Gradients input') print('##########################') for s in reversed(range(slen)): for b in range(bsz): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"grad q out: {q1.grad[b][h][s]}") print(f"grad q ref: {q2.grad[b][h][s]}") assert max_relative_error( q1.grad[b][h][s], q2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad key out: {k1.grad[b][h][s]}") print(f"grad key ref: {k2.grad[b][h][s]}") assert max_relative_error( k1.grad[b][h][s], k2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad values out: {v1.grad[b][h][s]}") print(f"grad values ref: {v2.grad[b][h][s]}") assert max_relative_error( v1.grad[b][h][s], v2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad beta out: {beta1.grad[b][h][s]}") print(f"grad beta ref: {beta2.grad[b][h][s]}") assert max_relative_error( beta1.grad[b][h][s], beta2.grad[b][h][s]) < rel_threshold print("pass!") print('##########################') print('# Gradients weights') print('##########################') for h in range(n_head): print(f"h={h} ------------------------") print(f"grad rb out: {r_b1.grad[0][h]}") print(f"grad rb ref: {rb2.grad[0][h]}") assert max_relative_error( r_b1.grad[0][h], rb2.grad[0][h]) < rel_threshold print("pass!") print(f"grad Rq out: {R_q1.grad[0][h]}") print(f"grad Rq ref: {Rq2.grad[0][h]}") assert max_relative_error( R_q1.grad[0][h], Rq2.grad[0][h]) < rel_threshold print("pass!") print(f"grad Rk out: {R_k1.grad[0][h]}") print(f"grad Rk ref: {Rk2.grad[0][h]}") assert max_relative_error( R_k1.grad[0][h], Rk2.grad[0][h]) < rel_threshold print("pass!") print(f"grad Rv out: {R_v1.grad[0][h]}") print(f"grad Rv ref: {Rv2.grad[0][h]}") assert max_relative_error( R_v1.grad[0][h], Rv2.grad[0][h]) < rel_threshold print("pass!") print("==> All tests pass!")
13,684
32.055556
113
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast/fast_weight/__init__.py
# Adaptation of the original code from # https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>, # Apoorv Vyas <avyas@idiap.ch> # Modifications Copyright (c) 2021 Kazuki Irie import os import torch from torch.utils.cpp_extension import load # Just in time import # https://pytorch.org/tutorials/advanced/cpp_extens dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'fast_weight_cuda.cu') mod_causal_dot_product_cuda = load( name="fast_weight_forward", sources=[filename], verbose=True) mod_causal_dot_backward_cuda = load( name="fast_weight_backward", sources=[filename], verbose=True) causal_dot_product_cuda = mod_causal_dot_product_cuda.fast_weight_forward causal_dot_backward_cuda = mod_causal_dot_backward_cuda.fast_weight_backward class DeltaFastWeight(torch.autograd.Function): """Fast weight using the delta update rule.""" dot = { # "cpu": causal_dot_product_cpu, "cuda": causal_dot_product_cuda } dot_backward = { # "cpu": causal_dot_backward_cpu, "cuda": causal_dot_backward_cuda } @staticmethod def forward(ctx, Q, K, V, beta, W): assert Q.device.type is not "cpu" assert K.device.type is not "cpu" assert V.device.type is not "cpu" assert beta.device.type is not "cpu" assert W.device.type is not "cpu" # Create the output tensor device = Q.device N, H, L, E = Q.shape _, _, _, M = V.shape out = torch.zeros((N, H, L, M), device=device, dtype=Q.dtype) # W = torch.zeros((N, H, E, M), device=device, dtype=Q.dtype) V_old = torch.zeros((N, H, L, M), device=device, dtype=Q.dtype) V_insert = torch.zeros((N, H, L, M), device=device, dtype=Q.dtype) DeltaFastWeight.dot[device.type]( Q.data, K.data, V.data, beta.data, V_old, V_insert, W, out ) ctx.save_for_backward(Q, K, V, beta, V_old, V_insert, W) return out @staticmethod def backward(ctx, grad_out): # Extract the saved tensors Q, K, V, beta, V_old, V_insert, W = ctx.saved_tensors # Allocate memory for the gradients grad_Q = torch.zeros_like(Q) grad_K = torch.zeros_like(K) grad_V = torch.zeros_like(V) grad_beta = torch.zeros_like(beta) assert Q.device.type is not "cpu" # Compute the gradients DeltaFastWeight.dot_backward[Q.device.type]( Q.data, K.data, V.data, beta.data, V_old.data, V_insert.data, grad_out, W.data, grad_Q, grad_K, grad_V, grad_beta ) return grad_Q, grad_K, grad_V, grad_beta, None # Alias the autograd functions to python style snake case naming fast_weight_delta = DeltaFastWeight.apply if __name__ == '__main__': import torch torch.manual_seed(111) # Tests pass if the relative difference compared with # the corresponding torch autograd computation # is smaller than a threshold. # Ideally should be tested with double... rel_threshold = 1e-3 # from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py def max_relative_error(a, b, eps=1e-6): return torch.abs((b - a) / (torch.abs(b) + eps)).max().item() print('##########################') print('# Test forward pass') print('##########################') bsz, n_head, slen, d_head = 3, 5, 11, 64 v_dim = 64 # (B, H, len, dim) q0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') k0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') v0 = torch.rand(bsz, n_head, slen, v_dim, device='cuda') beta0 = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda')) q0 = q0 / q0.sum(dim=-1, keepdim=True) k0 = k0 / k0.sum(dim=-1, keepdim=True) q1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') k1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v1 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta1 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') q1.data = q0.data k1.data = k0.data v1.data = v0.data beta1.data = beta0.data W1 = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda') print("Forwarding custom kernel...") out1 = fast_weight_delta(q1, k1, v1, beta1, W1) print("done.") # compute using torch q2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') k2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v2 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta2 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') q2.data = q0.data k2.data = k0.data v2.data = v0.data beta2.data = beta0.data # (len, B, H, dim) q_2 = q2.permute(2, 0, 1, 3) slen, bsz, n_head, d_head = q_2.shape q_2 = q_2.reshape(slen, bsz * n_head, d_head) k_2 = k2.permute(2, 0, 1, 3) k_2 = k_2.reshape(slen, bsz * n_head, d_head) v_2 = v2.permute(2, 0, 1, 3) v_2 = v_2.reshape(slen, bsz * n_head, v_dim) beta_2 = beta2.permute(2, 0, 1, 3) beta_2 = beta_2.reshape(slen, bsz * n_head, 1) W = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda') out_list = [] print("Forwarding PyTorch code...") for pos in range(slen): v_old = torch.bmm(W, k_2[pos].unsqueeze(2)).squeeze() v_insert = beta_2[pos] * (v_2[pos] - v_old) W = W + torch.bmm(v_insert.unsqueeze(2), k_2[pos].unsqueeze(1)) out_t = torch.bmm(W, q_2[pos].unsqueeze(2)).squeeze() out_list.append(out_t.clone()) print("done.") out2 = torch.stack(out_list) out2 = out2.view(slen, bsz, n_head, v_dim) out1 = out1.permute(2, 0, 1, 3) for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"out1: {out1[s][b][h]}") print(f"out2: {out2[s][b][h]}") assert max_relative_error( out1[s][b][h], out2[s][b][h]) < rel_threshold print("pass!") print('##########################') print('# Test Backward pass') print('##########################') # grad loss1 = out1.sum() q1.retain_grad() k1.retain_grad() v1.retain_grad() beta1.retain_grad() loss1.backward() loss2 = out2.sum() q2.retain_grad() k2.retain_grad() v2.retain_grad() beta2.retain_grad() loss2.backward() for s in range(slen): for b in reversed(range(bsz)): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"grad query1: {q1.grad[b][h][s]}") print(f"grad query2: {q2.grad[b][h][s]}") assert max_relative_error( q1.grad[b][h][s], q2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad key1: {k1.grad[b][h][s]}") print(f"grad key2: {k2.grad[b][h][s]}") assert max_relative_error( k1.grad[b][h][s], k2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad values1: {v1.grad[b][h][s]}") print(f"grad values2: {v2.grad[b][h][s]}") assert max_relative_error( v1.grad[b][h][s], v2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad beta1: {beta1.grad[b][h][s]}") print(f"grad beta2: {beta2.grad[b][h][s]}") assert max_relative_error( beta1.grad[b][h][s], beta2.grad[b][h][s]) < rel_threshold print("pass!") print("All tests pass.")
8,278
30.720307
113
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast/fast_transformers/__init__.py
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>, # Apoorv Vyas <avyas@idiap.ch> # https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py # Modifications Copyright (c) 2021 Kazuki Irie import os import torch from torch.utils.cpp_extension import load # Just in time import # https://pytorch.org/tutorials/advanced/cpp_extens dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'causal_product_cuda.cu') mod_causal_dot_product_cuda = load( name="causal_dot_product", sources=[filename], verbose=True) mod_causal_dot_backward_cuda = load( name="causal_dot_backward", sources=[filename], verbose=True) causal_dot_product_cuda = mod_causal_dot_product_cuda.causal_dot_product causal_dot_backward_cuda = mod_causal_dot_backward_cuda.causal_dot_backward class FastWeightSum(torch.autograd.Function): """Fast weights with the sum update rule.""" dot = { # "cpu": causal_dot_product_cpu, "cuda": causal_dot_product_cuda } dot_backward = { # "cpu": causal_dot_backward_cpu, "cuda": causal_dot_backward_cuda } @staticmethod def forward(ctx, Q, K, V, W): # Create the output tensor device = Q.device N, H, L, E = Q.shape _, _, _, M = V.shape product = torch.zeros((N, H, L, M), device=device, dtype=Q.dtype) # Actually perform the dot product FastWeightSum.dot[device.type]( Q.data, K.data, V.data, W, product ) ctx.save_for_backward(Q, K, V) return product @staticmethod def backward(ctx, grad_out): # Extract the saved tensors Q, K, V = ctx.saved_tensors # Allocate memory for the gradients grad_Q = torch.zeros_like(Q) grad_K = torch.zeros_like(K) grad_V = torch.zeros_like(V) # Compute the gradients FastWeightSum.dot_backward[Q.device.type]( Q.data, K.data, V.data, grad_out, grad_Q, grad_K, grad_V, ) return grad_Q, grad_K, grad_V, None # Alias the autograd functions to python style snake case naming fast_weight_sum = FastWeightSum.apply if __name__ == '__main__': import torch # Tests pass if the relative difference compared with # the corresponding torch autograd computation # is smaller than a threshold. # Ideally should be tested with double... rel_threshold = 1e-3 # from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py def max_relative_error(a, b, eps=1e-6): return torch.abs((a-b) / (torch.abs(a) + eps)).max().item() print('##########################') print('# Test forward pass') print('##########################') bsz, n_head, slen, d_head = 3, 5, 7, 11 v_dim = 4 # (B, H, len, dim) q0 = torch.rand(3, 5, 7, 11).to(0) k0 = torch.rand(3, 5, 7, 11).to(0) v0 = torch.rand(3, 5, 7, 4).to(0) q0 = q0 / q0.sum(dim=-1, keepdim=True) k0 = k0 / k0.sum(dim=-1, keepdim=True) q1 = torch.zeros(3, 5, 7, 11, requires_grad=True).to(0) k1 = torch.zeros(3, 5, 7, 11, requires_grad=True).to(0) v1 = torch.zeros(3, 5, 7, v_dim, requires_grad=True).to(0) q1.data = q0.data k1.data = k0.data v1.data = v0.data W1 = torch.zeros(3, 5, 11, v_dim).to(0) out1 = fast_weight_sum(q1, k1, v1, W1) # compute using torch q2 = torch.zeros(3, 5, 7, 11, requires_grad=True).to(0) k2 = torch.zeros(3, 5, 7, 11, requires_grad=True).to(0) v2 = torch.zeros(3, 5, 7, v_dim, requires_grad=True).to(0) q2.data = q0.data k2.data = k0.data v2.data = v0.data # (len, B, H, dim) q_2 = q2.permute(2, 0, 1, 3) slen, bsz, n_head, d_head = q_2.shape q_2 = q_2.reshape(slen, bsz * n_head, d_head) k_2 = k2.permute(2, 0, 1, 3) k_2 = k_2.reshape(slen, bsz * n_head, d_head) v_2 = v2.permute(2, 0, 1, 3) v_2 = v_2.reshape(slen, bsz * n_head, v_dim) W = torch.zeros(3 * 5, v_dim, 11).to(0) out_list = [] for pos in range(slen): W = W + torch.bmm(v_2[pos].unsqueeze(2), k_2[pos].unsqueeze(1)) out_t = torch.bmm(W, q_2[pos].unsqueeze(2)).squeeze() out_list.append(out_t.clone()) out2 = torch.stack(out_list) out2 = out2.view(slen, bsz, n_head, v_dim) out1 = out1.permute(2, 0, 1, 3) for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"out1: {out1[s][b][h]}") print(f"out2: {out2[s][b][h]}") assert max_relative_error( out1[s][b][h], out2[s][b][h]) < rel_threshold print("pass!") print('##########################') print('# Test Backward pass') print('##########################') # grad loss1 = out1.sum() q1.retain_grad() k1.retain_grad() v1.retain_grad() loss1.backward() loss2 = out2.sum() q2.retain_grad() k2.retain_grad() v2.retain_grad() loss2.backward() for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"grad query1: {q1.grad[b][h][s]}") print(f"grad query2: {q2.grad[b][h][s]}") assert max_relative_error( q1.grad[b][h][s], q2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad key1: {k1.grad[b][h][s]}") print(f"grad key2: {k2.grad[b][h][s]}") assert max_relative_error( k1.grad[b][h][s], k2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad values1: {v1.grad[b][h][s]}") print(f"grad values2: {v2.grad[b][h][s]}") assert max_relative_error( v1.grad[b][h][s], v2.grad[b][h][s]) < rel_threshold print("pass!") print("All tests pass.")
6,218
28.473934
113
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast/fast_weight_rnn_v2/__init__.py
# Adaptation of the original code from # https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>, # Apoorv Vyas <avyas@idiap.ch> # # Modifications Copyright (c) 2021 Kazuki Irie import os import torch import torch.nn.functional as F from torch.utils.cpp_extension import load # Just in time import # https://pytorch.org/tutorials/advanced/cpp_extens dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'fast_rnn_cuda.cu') mod_causal_dot_product_cuda = load( name="fast_rnn_v2_forward", sources=[filename], verbose=True) mod_causal_dot_backward_cuda = load( name="fast_rnn_v2_backward", sources=[filename], verbose=True) causal_dot_product_cuda = mod_causal_dot_product_cuda.fast_rnn_v2_forward causal_dot_backward_cuda = mod_causal_dot_backward_cuda.fast_rnn_v2_backward class FastRNNv2(torch.autograd.Function): """Fast RNN with the FWM update rule.""" dot = { # "cpu": causal_dot_product_cpu, "cuda": causal_dot_product_cuda } dot_backward = { # "cpu": causal_dot_backward_cpu, "cuda": causal_dot_backward_cuda } @staticmethod def forward(ctx, Z, K, V, W, beta, h0): # Computations: # fast weights with sum update rule: R_t = R_t-1 + v_t (x) k_t # output: h_t = tanh(R_t * h_t-1 + z_t) # z_t is the output of a feed-forward fast weight layer. # h0 is the initial RNN state. # E = M. # Create the output tensor device = Z.device N, H, L, _ = Z.shape _, _, _, M = V.shape assert K.shape == (N, H, L, M) assert V.shape == (N, H, L, M) assert h0.shape == (N, H, 1, M) assert W.shape == (N, H, M, M) rnn_out = torch.zeros((N, H, L, M), device=device, dtype=Z.dtype) rnn_out_nmz = torch.zeros((N, H, L, M), device=device, dtype=Z.dtype) h_init = h0.detach().clone() # W = torch.zeros((N, H, E, M), device=device, dtype=Z.dtype) # h0 = torch.zeros((N, H, M), device=device, dtype=Z.dtype) V_old = torch.zeros((N, H, L, M), device=device, dtype=Z.dtype) # Actually perform the dot product FastRNNv2.dot[device.type]( Z.data, K.data, V.data, beta.data, V_old, h0.data, W, rnn_out, rnn_out_nmz ) ctx.save_for_backward(rnn_out, rnn_out_nmz, Z, K, V, beta, V_old, W, h_init) return rnn_out @staticmethod def backward(ctx, grad_out): # Extract the saved tensors rnn_out, rnn_out_nmz, Z, K, V, beta, V_old, W, h0 = ctx.saved_tensors # Allocate memory for the gradients grad_Z = torch.zeros_like(Z) grad_K = torch.zeros_like(K) grad_V = torch.zeros_like(V) grad_beta = torch.zeros_like(beta) # Prepare delayed RNN outputs # shape of rnn_out: N, H, L, M # dim2 is the time dim. # shape of h0: N, H, 1, M # rnn_out_delayed = torch.cat([h0, rnn_out[:, :, :-1]], dim=2) # Compute the gradients FastRNNv2.dot_backward[Z.device.type]( K.data, V.data, beta.data, V_old.data, grad_out, rnn_out, rnn_out_nmz, # normalized and delayed W.data, grad_Z, grad_K, grad_V, grad_beta, ) return grad_Z, grad_K, grad_V, None, grad_beta, None # Alias the autograd functions to python style snake case naming fast_rnn_v2 = FastRNNv2.apply if __name__ == '__main__': import torch torch.manual_seed(111) # Tests pass if the relative difference compared with # the corresponding torch autograd computation # is smaller than a threshold. # Ideally should be tested with double... rel_threshold = 1e-3 # from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py def max_relative_error(a, b, eps=1e-6): return float(torch.abs((b - a) / (torch.abs(b) + eps)).max().item()) print('##########################') print('# Test forward pass') print('##########################') bsz, n_head, slen, d_head = 3, 5, 11, 64 v_dim = d_head # (B, H, len, dim) k0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') v0 = torch.rand(bsz, n_head, slen, v_dim, device='cuda') beta0 = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda')) h0 = torch.zeros(bsz, n_head, 1, v_dim, device='cuda') z0 = torch.rand(bsz, n_head, slen, v_dim, device='cuda') # k0 = k0 / k0.sum(dim=-1, keepdim=True) k0 = F.softmax(k0, dim=-1) k1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v1 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') z1 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta1 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') # q1.data = q0.data k1.data = k0.data v1.data = v0.data beta1.data = beta0.data z1.data = z0.data W1 = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda') # h0 = torch.zeros(n_head, d_head, v_dim, device='cuda') print("Forwarding custom kernel...") out1 = fast_rnn_v2(z1, k1, v1, W1, beta1, h0) print("done.") # compute using torch z2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') k2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v2 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta2 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') z2.data = z0.data k2.data = k0.data v2.data = v0.data beta2.data = beta0.data # (len, B, H, dim) z_2 = z2.permute(2, 0, 1, 3) slen, bsz, n_head, d_head = z_2.shape z_2 = z_2.reshape(slen, bsz * n_head, d_head) k_2 = k2.permute(2, 0, 1, 3) k_2 = k_2.reshape(slen, bsz * n_head, d_head) v_2 = v2.permute(2, 0, 1, 3) v_2 = v_2.reshape(slen, bsz * n_head, v_dim) beta_2 = beta2.permute(2, 0, 1, 3) beta_2 = beta_2.reshape(slen, bsz * n_head, 1) W = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda') h = torch.zeros(bsz * n_head, d_head, device='cuda') out_list = [] print("Forwarding PyTorch code...") for pos in range(slen): v_old = torch.bmm(W, k_2[pos].unsqueeze(2)).squeeze() v_insert = beta_2[pos] * (v_2[pos] - v_old) W = W + torch.bmm(v_insert.unsqueeze(2), k_2[pos].unsqueeze(1)) rec_part = torch.bmm(W, F.softmax(h, dim=-1).unsqueeze(2)).squeeze() # h = torch.sigmoid(rec_part + z_2[pos]) h = rec_part + z_2[pos] out_list.append(h.clone()) print("done.") out2 = torch.stack(out_list) out2 = out2.view(slen, bsz, n_head, v_dim) out1 = out1.permute(2, 0, 1, 3) for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"forward: s={s} b={b} h={h}") print(f"out: {out1[s][b][h]}") print(f"ref: {out2[s][b][h]}") assert max_relative_error( out1[s][b][h], out2[s][b][h]) < rel_threshold print("pass!") print('##########################') print('# Test Backward pass') print('##########################') # grad loss1 = out1.sum() z1.retain_grad() k1.retain_grad() v1.retain_grad() beta1.retain_grad() loss1.backward() loss2 = out2.sum() z2.retain_grad() k2.retain_grad() v2.retain_grad() beta2.retain_grad() loss2.backward() for s in reversed(range(slen)): for b in reversed(range(bsz)): for h in range(n_head): print(f"backward: s={s}, b={b}, h={h}") print(f"grad input out: {z1.grad[b][h][s]}") print(f"grad input ref: {z2.grad[b][h][s]}") assert max_relative_error( z1.grad[b][h][s], z2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad values out: {v1.grad[b][h][s]}") print(f"grad values ref: {v2.grad[b][h][s]}") assert max_relative_error( v1.grad[b][h][s], v2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad beta out: {beta1.grad[b][h][s]}") print(f"grad beta ref: {beta2.grad[b][h][s]}") assert max_relative_error( beta1.grad[b][h][s], beta2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad key out: {k1.grad[b][h][s]}") print(f"grad key ref: {k2.grad[b][h][s]}") assert max_relative_error( k1.grad[b][h][s], k2.grad[b][h][s]) < rel_threshold print("pass!") print("All tests pass.")
9,336
31.533101
113
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast/self_ref_v1/__init__.py
# Adaptation of the original code from # https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>, # Apoorv Vyas <avyas@idiap.ch> # Modifications Copyright (c) 2021 Kazuki Irie import sys import os import torch import torch.nn.functional as F from torch.utils.cpp_extension import load # Just in time import # https://pytorch.org/tutorials/advanced/cpp_extens dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'self_ref_v1.cu') fwd_cuda = load( extra_cuda_cflags=['--ftemplate-depth=1024'], name="self_ref_forward_v1", sources=[filename], verbose=True) bwd_cuda = load( extra_cuda_cflags=['--ftemplate-depth=1024'], name="self_ref_backward_v1", sources=[filename], verbose=True) self_ref_fwd_cuda = fwd_cuda.self_ref_forward_v1 self_ref_bwd_cuda = bwd_cuda.self_ref_backward_v1 class SelfRefv1(torch.autograd.Function): dot = { # "cpu": causal_dot_product_cpu, "cuda": self_ref_fwd_cuda } dot_backward = { # "cpu": causal_dot_backward_cpu, "cuda": self_ref_bwd_cuda } @staticmethod def forward(ctx, x, W_y, W_q, W_k, w_b): # Shape of x: (B, len, D) # Shape of W_q: (n_head, D, E) where n_head * E = D (typically) device = x.device N, H, L, E = x.shape assert W_y.shape == (N, H, E, 3 * E + 1), "Reshape if needed." assert W_q.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert W_k.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert w_b.shape == (N, H, E, 4), "Reshape/unsqueeze if needed." out = torch.zeros((N, H, L, 3 * E + 1), device=device, dtype=x.dtype) # y q_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) k_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) beta_main = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype) y_diff = torch.zeros( (N, H, L, 3 * E + 1), device=device, dtype=x.dtype) q_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) k_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) beta_diff = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype) # x = F.softmax(x, dim=-1) # apply already softmax to input SelfRefv1.dot[device.type]( x, W_y, W_q, W_k, w_b, q_main, k_main, beta_main, y_diff, q_diff, k_diff, beta_diff, out ) ctx.save_for_backward( x, q_main, k_main, beta_main, y_diff, q_diff, k_diff, beta_diff, W_y, W_q, W_k, w_b) return out @staticmethod def backward(ctx, grad_out): # Extract the saved tensors (x, q, k, beta, y_diff, q_diff, k_diff, beta_diff, W_y, W_q, W_k, w_b) = ctx.saved_tensors # Allocate memory for the gradients grad_x = torch.zeros_like(x) grad_W_y = torch.zeros_like(W_y) grad_W_q = torch.zeros_like(W_q) grad_W_k = torch.zeros_like(W_k) grad_w_b = torch.zeros_like(w_b) # out_delayed = torch.tanh(torch.cat([h0, out[:, :, :-1]], dim=2)) # Compute the gradients SelfRefv1.dot_backward[x.device.type]( x, q, k, beta, y_diff, q_diff, k_diff, beta_diff, grad_out, W_y, W_q, W_k, w_b, grad_x, grad_W_y, grad_W_q, grad_W_k, grad_w_b ) return grad_x, grad_W_y, grad_W_q, grad_W_k, grad_w_b # Alias the autograd functions to python style snake case naming self_ref_v1 = SelfRefv1.apply if __name__ == '__main__': import torch import torch.nn.functional as F torch.manual_seed(111) # Tests pass if the relative difference compared with # the corresponding torch autograd computation # is smaller than a threshold. # Ideally should be tested with double... rel_threshold = 1e-2 # from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py def max_relative_error(a, b, eps=1e-6): return float(torch.abs((b - a) / (torch.abs(b) + eps)).max().item()) print('##########################') print('# Test forward pass') print('##########################') # bsz, n_head, slen, d_head = 3, 5, 11, 8 bsz, n_head, slen, d_head = 3, 5, 11, 8 v_dim = d_head * 3 + 4 print(f"value dim: {v_dim}") # W0 = torch.cuda.FloatTensor( # 1, n_head, d_head, v_dim, device='cuda').uniform_(-1., 1.) # W0 = W0.repeat(bsz, 1, 1, 1) Wy0 = torch.cuda.FloatTensor( bsz, n_head, d_head, 3 * d_head + 1, device='cuda').uniform_(-1., 1.) Wq0 = torch.cuda.FloatTensor( bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.) Wk0 = torch.cuda.FloatTensor( bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.) wb0 = torch.cuda.FloatTensor( bsz, n_head, d_head, 4, device='cuda').uniform_(-1., 1.) x0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') W_y1 = torch.zeros( bsz, n_head, d_head, 3 * d_head + 1, requires_grad=True, device='cuda') W_q1 = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') W_k1 = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') w_b1 = torch.zeros( bsz, n_head, d_head, 4, requires_grad=True, device='cuda') W_y1 = Wy0.detach().clone().requires_grad_(True) W_q1 = Wq0.detach().clone().requires_grad_(True) W_k1 = Wk0.detach().clone().requires_grad_(True) w_b1 = wb0.detach().clone().requires_grad_(True) W_y2_slow = torch.zeros( bsz, n_head, d_head, 3 * d_head + 1, requires_grad=True, device='cuda') W_q2_slow = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') W_k2_slow = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') w_b2_slow = torch.zeros( bsz, n_head, d_head, 4, requires_grad=True, device='cuda') W_y2_slow = Wy0.detach().clone().requires_grad_(True) W_q2_slow = Wq0.detach().clone().requires_grad_(True) W_k2_slow = Wk0.detach().clone().requires_grad_(True) w_b2_slow = wb0.detach().clone().requires_grad_(True) x1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') x1 = x0.detach().clone().requires_grad_(True) print("Forwarding custom kernel...") out1 = self_ref_v1(x1, W_y1, W_q1, W_k1, w_b1) print("done.") x2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') # apply softmax here x2 = F.softmax(x0.detach(), dim=-1).clone().requires_grad_(True) # x2 = x0.detach().clone().requires_grad_(True) x2 = x2.permute(2, 0, 1, 3) # (len, B, H, dim) x2 = x2.reshape(slen, bsz * n_head, d_head) # (len, B*H, dim) # W2 = W2.reshape(bsz * n_head, d_head, v_dim) W_y2 = W_y2_slow.view(bsz * n_head, d_head, 3 * d_head + 1) W_q2 = W_q2_slow.view(bsz * n_head, d_head, d_head) W_k2 = W_k2_slow.view(bsz * n_head, d_head, d_head) w_b2 = w_b2_slow.view(bsz * n_head, d_head, 4) out_list = [] # out = x2[0] # (B * H, D) print("Forwarding PyTorch code...") for pos in range(slen): out = x2[pos].unsqueeze(1) # out = F.softmax(x2[pos], dim=-1).unsqueeze(1) # out: (B * H, 1, D) # W2: (B * H, D, v_dim) # bmm (b,n,M) x (b,M,p) -> (b,n,p) # ykqb before squeeze: (B * H, 1, v_dim) y = torch.bmm(out, W_y2).squeeze(1) out_t = y.reshape(bsz, n_head, 3 * d_head + 1) out_list.append(out_t.clone()) if pos < slen - 1: # no need to update weights at the last time step q = torch.bmm(out, W_q2).squeeze(1) k = torch.bmm(out, W_k2).squeeze(1) beta = torch.bmm(out, w_b2).squeeze(1) beta = torch.sigmoid(beta) beta_y, beta_q, beta_k, beta_beta = torch.split( beta, [1, 1, 1, 1], dim=-1) k = F.softmax(k, dim=-1) q = F.softmax(q, dim=-1) # retrieve currently stored value y_old = torch.bmm(k.unsqueeze(1), W_y2).squeeze(1) q_old = torch.bmm(k.unsqueeze(1), W_q2).squeeze(1) k_old = torch.bmm(k.unsqueeze(1), W_k2).squeeze(1) beta_old = torch.bmm(k.unsqueeze(1), w_b2).squeeze(1) y_new = torch.bmm(q.unsqueeze(1), W_y2).squeeze(1) q_new = torch.bmm(q.unsqueeze(1), W_q2).squeeze(1) k_new = torch.bmm(q.unsqueeze(1), W_k2).squeeze(1) beta_new = torch.bmm(q.unsqueeze(1), w_b2).squeeze(1) # update all weights y_insert = beta_y * (y_new - y_old) q_insert = beta_q * (q_new - q_old) k_insert = beta_k * (k_new - k_old) beta_insert = beta_beta * (beta_new - beta_old) W_y2 = W_y2.clone() + torch.bmm( k.unsqueeze(2), y_insert.unsqueeze(1)) W_q2 = W_q2.clone() + torch.bmm( k.unsqueeze(2), q_insert.unsqueeze(1)) W_k2 = W_k2.clone() + torch.bmm( k.unsqueeze(2), k_insert.unsqueeze(1)) w_b2 = w_b2.clone() + torch.bmm( k.unsqueeze(2), beta_insert.unsqueeze(1)) print("done.") out2 = torch.stack(out_list) out2 = out2.view(slen, bsz, n_head, 3 * d_head + 1) out1 = out1.permute(2, 0, 1, 3) for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"out: {out1[s][b][h]}") print(f"ref: {out2[s][b][h]}") assert max_relative_error( out1[s][b][h], out2[s][b][h]) < rel_threshold print("pass!") print("==> Forward pass test done.") # sys.exit(0) print('##########################') print('# Test Backward pass') print('##########################') # grad loss1 = out1.sum() W_y1.retain_grad() W_q1.retain_grad() W_k1.retain_grad() w_b1.retain_grad() x1.retain_grad() loss1.backward() loss2 = out2.sum() W_y2_slow.retain_grad() W_q2_slow.retain_grad() W_k2_slow.retain_grad() w_b2_slow.retain_grad() x2.retain_grad() loss2.backward() print('##########################') print('# Gradients input') print('##########################') # x2_grad = x2.grad.reshape(slen, bsz, n_head, d_head) # x2_grad = x2_grad.permute(1, 2, 0, 3) # for s in reversed(range(slen)): # for b in range(bsz): # for h in range(n_head): # print(f"s={s}, b={b}, h={h}") # print(f"grad x out: {x1.grad[b][h][s]}") # print(f"grad x ref: {x2_grad[b][h][s]}") # assert max_relative_error( # x1.grad[b][h][s], x2_grad[b][h][s]) < rel_threshold # print("pass!") print('##########################') print('# Gradients weights') print('##########################') W_y2_grad = W_y2_slow.grad.reshape(bsz, n_head, d_head, 3 * d_head + 1) W_q2_grad = W_q2_slow.grad.reshape(bsz, n_head, d_head, d_head) W_k2_grad = W_k2_slow.grad.reshape(bsz, n_head, d_head, d_head) w_b2_grad = w_b2_slow.grad.reshape(bsz, n_head, d_head, 4) print('##########################') print('# Gradient Wy') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad Wy out: {W_y1.grad[b][h][d]}") print(f"grad Wy ref: {W_y2_grad[b][h][d]}") assert max_relative_error( W_y1.grad[b][h][d], W_y2_grad[b][h][d]) < rel_threshold print("pass!") print('##########################') print('# Gradient Wq') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad Wq out: {W_q1.grad[b][h][d]}") print(f"grad Wq ref: {W_q2_grad[b][h][d]}") assert max_relative_error( W_q1.grad[b][h][d], W_q2_grad[b][h][d]) < rel_threshold print("pass!") print('##########################') print('# Gradient Wk') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad Wk out: {W_k1.grad[b][h][d]}") print(f"grad Wk ref: {W_k2_grad[b][h][d]}") assert max_relative_error( W_k1.grad[b][h][d], W_k2_grad[b][h][d]) < rel_threshold print("pass!") print('##########################') print('# Gradient wb') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad wb out: {w_b1.grad[b][h][d]}") print(f"grad wb ref: {w_b2_grad[b][h][d]}") assert max_relative_error( w_b1.grad[b][h][d], w_b2_grad[b][h][d]) < rel_threshold print("pass!") print("==> All tests pass!")
14,018
34.223618
113
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast_procgen/procgen_wrappers.py
# The MIT License # # Copyright (c) 2017 OpenAI (http://openai.com) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Taken from # https://raw.githubusercontent.com/openai/baselines/7c520852d9cf4eaaad326a3d548efc915dc60c10/baselines/common/atari_wrappers.py # and slightly modified. import numpy as np from collections import deque import gym from gym import spaces import cv2 cv2.ocl.setUseOpenCL(False) class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self, **kwargs): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset(**kwargs) if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac) class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self, **kwargs): self.env.reset(**kwargs) obs, _, done, _ = self.env.step(1) if done: self.env.reset(**kwargs) obs, _, done, _ = self.env.step(2) if done: self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac) class OOV2NoOpEnv(gym.Wrapper): def __init__(self, env): """Action .""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def step(self, ac): # Map OOV to NoOP if ac < self.env.action_space.n: return self.env.step(ac) else: return self.env.step(0) def reset(self, **kwargs): return self.env.reset(**kwargs) class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so it's important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self, **kwargs): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset(**kwargs) else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) if i == self._skip - 2: self._obs_buffer[0] = obs if i == self._skip - 1: self._obs_buffer[1] = obs total_reward += reward if done: break # Note that the observation on the done=True frame # doesn't matter max_frame = self._obs_buffer.max(axis=0) return max_frame, total_reward, done, info def reset(self, **kwargs): return self.env.reset(**kwargs) class ClipRewardEnv(gym.RewardWrapper): def __init__(self, env): gym.RewardWrapper.__init__(self, env) def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): """ Warp frames to 84x84 as done in the Nature paper and later work. If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which observation should be warped. """ super().__init__(env) self._width = width self._height = height self._grayscale = grayscale self._key = dict_space_key if self._grayscale: num_colors = 1 else: num_colors = 3 new_space = gym.spaces.Box( low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8, ) if self._key is None: original_space = self.observation_space self.observation_space = new_space else: original_space = self.observation_space.spaces[self._key] self.observation_space.spaces[self._key] = new_space assert original_space.dtype == np.uint8 and len(original_space.shape) == 3 def observation(self, obs): if self._key is None: frame = obs else: frame = obs[self._key] if self._grayscale: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize( frame, (self._width, self._height), interpolation=cv2.INTER_AREA ) if self._grayscale: frame = np.expand_dims(frame, -1) if self._key is None: obs = frame else: obs = obs.copy() obs[self._key] = frame return obs class FrameStack(gym.Wrapper): def __init__(self, env, k): """Stack k last frames. Returns lazy array, which is much more memory efficient. See Also -------- baselines.common.atari_wrappers.LazyFrames """ gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype) def reset(self): ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self._get_ob() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self._get_ob(), reward, done, info def _get_ob(self): assert len(self.frames) == self.k return LazyFrames(list(self.frames)) class ScaledFloatFrame(gym.ObservationWrapper): def __init__(self, env): gym.ObservationWrapper.__init__(self, env) self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32) def observation(self, observation): # careful! This undoes the memory optimization, use # with smaller replay buffers only. return np.array(observation).astype(np.float32) / 255.0 class LazyFrames(object): def __init__(self, frames): """This object ensures that common frames between the observations are only stored once. It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers. This object should only be converted to numpy array before being passed to the model. You'd not believe how complex the previous solution was.""" self._frames = frames self._out = None def _force(self): if self._out is None: self._out = np.concatenate(self._frames, axis=-1) self._frames = None return self._out def __array__(self, dtype=None): out = self._force() if dtype is not None: out = out.astype(dtype) return out def __len__(self): return len(self._force()) def __getitem__(self, i): return self._force()[i] def count(self): frames = self._force() return frames.shape[frames.ndim - 1] def frame(self, i): return self._force()[..., i] def make_atari(env_id, max_episode_steps=None): env = gym.make(env_id) # assert 'NoFrameskip' in env.spec.id env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) assert max_episode_steps is None return env def wrap_deepmind(env, clip_rewards=True): """Configure environment for DeepMind-style Atari. """ # if episode_life: # env = EpisodicLifeEnv(env) # if 'FIRE' in env.unwrapped.get_action_meanings(): # env = FireResetEnv(env) # if allow_oov_action: # env = OOV2NoOpEnv(env) # env = WarpFrame(env) # if scale: # env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) # this is not clipping; bin by sign. # if frame_stack: # env = FrameStack(env, 4) return env # Maybe just copy these below to polybeast_env and delete the file. class ImageToPyTorch(gym.ObservationWrapper): """ Image shape to channels x weight x height """ def __init__(self, env): super(ImageToPyTorch, self).__init__(env) old_shape = self.observation_space.shape self.observation_space = gym.spaces.Box( low=0, high=255, shape=(old_shape[-1], old_shape[0], old_shape[1]), dtype=np.uint8, ) def observation(self, observation): return np.transpose(observation, axes=(2, 0, 1)) def wrap_pytorch(env): return ImageToPyTorch(env)
11,994
31.953297
130
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast_procgen/polybeast.py
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import multiprocessing as mp import numpy as np from torchbeast_procgen import polybeast_learner from torchbeast_procgen import polybeast_env def run_env(flags, actor_id): np.random.seed() # Get new random seed in forked process. polybeast_env.main(flags) def run_learner(flags): polybeast_learner.main(flags) def main(): flags = argparse.Namespace() flags, argv = polybeast_learner.parser.parse_known_args(namespace=flags) flags, argv = polybeast_env.parser.parse_known_args(args=argv, namespace=flags) if argv: # Produce an error message. polybeast_learner.parser.print_usage() print("") polybeast_env.parser.print_usage() print("Unkown args:", " ".join(argv)) return -1 flags.num_servers = flags.num_actors env_processes = [] for actor_id in range(1): p = mp.Process(target=run_env, args=(flags, actor_id)) p.start() env_processes.append(p) run_learner(flags) for p in env_processes: # p.terminate() p.join() if __name__ == "__main__": main()
1,714
27.114754
83
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast_procgen/polybeast_env.py
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import multiprocessing as mp import threading import time import numpy as np import libtorchbeast from torchbeast_procgen import procgen_wrappers import gym # lexicographically sorted list list_procgen_env_lex = [ 'bigfish', 'bossfight', 'caveflyer', 'chaser', 'climber', 'coinrun', 'dodgeball', 'fruitbot', 'heist', 'jumper', 'leaper', 'maze', 'miner', 'ninja', 'plunder', 'starpilot', ] # interesting ones first, we use this list list_procgen_env = [ 'bigfish', 'fruitbot', 'maze', 'leaper', 'plunder', 'starpilot', 'miner', 'bossfight', 'caveflyer', 'chaser', 'climber', 'coinrun', 'dodgeball', 'heist', 'jumper', 'ninja', ] # env with memory mode extension list_procgen_env_mem = [ 'dodgeball', 'heist', 'maze', 'miner', 'caveflyer', 'jumper', ] # yapf: disable parser = argparse.ArgumentParser(description='Remote Environment Server') parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast", help="Basename for the pipes for inter-process communication. " "Has to be of the type unix:/some/path.") parser.add_argument('--num_servers', default=4, type=int, metavar='N', help='Number of environment servers.') parser.add_argument('--env', type=str, default='PongNoFrameskip-v4', help='Gym environment.') parser.add_argument('--multi_env', default=1, type=int, metavar='N', help='number of env to jointly train on.') parser.add_argument('--allow_oov', action="store_true", help='Allow action space larger than the env specific one.' ' All out-of-vocab action will be mapped to NoOp.') parser.add_argument('--num_levels', default=0, type=int, metavar='N', help='Procgen num_levels.') parser.add_argument('--start_level', default=0, type=int, metavar='N', help='Procgen start_level.') parser.add_argument('--distribution_mode', type=str, default='hard', choices=[ 'easy', 'hard', 'extreme', 'memory', 'exploration'], help='distribution mode.') # yapf: enable class Env: def reset(self): print("reset called") return np.ones((4, 84, 84), dtype=np.uint8) def step(self, action): frame = np.zeros((4, 84, 84), dtype=np.uint8) return frame, 0.0, False, {} # First three mandatory. def create_env(env_name, num_levels=0, start_level=0, distribution_mode="hard", rand_seed=None, lock=threading.Lock()): with lock: # Atari isn't threadsafe at construction time. return procgen_wrappers.wrap_pytorch( procgen_wrappers.wrap_deepmind( gym.make(env_name, num_levels=num_levels, start_level=start_level, distribution_mode=distribution_mode, rand_seed=rand_seed), clip_rewards=False, ) ) def serve(env_name, num_levels, start_level, distribution_mode, server_address): init = Env if env_name == "Mock" else lambda: create_env( env_name, num_levels, start_level, distribution_mode) server = libtorchbeast.Server(init, server_address=server_address) server.run() def main(flags): if not flags.pipes_basename.startswith("unix:"): raise Exception( "--pipes_basename has to be of the form unix:/some/path.") if flags.distribution_mode == 'memory': # for multi_env training list_env = list_procgen_env_mem else: list_env = list_procgen_env processes = [] for i in range(flags.num_servers): if flags.multi_env > 1: env_name = list_env[i % flags.multi_env] env_name = f"procgen:procgen-{env_name}-v0" else: env_name = flags.env print(f"Server {i} on {env_name}") # distributed mode and rand_seed left to default. p = mp.Process( target=serve, args=( env_name, flags.num_levels, flags.start_level, flags.distribution_mode, f"{flags.pipes_basename}.{i}"), daemon=True ) p.start() processes.append(p) try: # We are only here to listen to the interrupt. while True: time.sleep(10) except KeyboardInterrupt: pass if __name__ == "__main__": flags = parser.parse_args() print(f"Env: {flags.env}") main(flags)
5,260
28.723164
83
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast_procgen/model.py
import nest import torch from torch import nn from torch.nn import functional as F from torchbeast.layer import DeltaNetLayer from torchbeast.layer import LinearTransformerLayer from torchbeast.layer import FastFFRecUpdateTanhLayer from torchbeast.layer import FastRNNModelLayer from torchbeast.layer import DeltaDeltaNetLayer from torchbeast.layer import SRNetLayer, SMFWPNetLayer, NoCarryOverSRNetLayer, DeeperNetLayer, PseudoSRNetLayer # Baseline model from torchbeast class Net(nn.Module): def __init__(self, num_actions, conv_scale=1, use_lstm=False): super(Net, self).__init__() self.num_actions = num_actions self.use_lstm = use_lstm self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 base_num_ch = [16, 32, 32] scaled_num_ch = [c * conv_scale for c in base_num_ch] for num_ch in scaled_num_ch: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048 * conv_scale, 256) # FC output size + last reward. core_output_size = self.fc.out_features + 1 if use_lstm: self.core = nn.LSTM(core_output_size, 256, num_layers=1) core_output_size = 256 self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): if not self.use_lstm: return tuple() return tuple( torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) for _ in range(2) ) def forward(self, inputs, core_state): x = inputs["frame"] T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) if self.use_lstm: core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, hidden_size) # states: nd = nd.view(1, -1, 1) core_state = nest.map(nd.mul, core_state) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) else: core_output = core_input policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial( F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state # Baseline model from torchbeast + Transformer FF layers class DeeperNet(nn.Module): def __init__(self, num_actions, hidden_size, num_layers, dim_ff, dropout, use_lstm=False): super(DeeperNet, self).__init__() self.num_actions = num_actions self.use_lstm = use_lstm self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. core_output_size = self.fc.out_features + 1 self.trafo_ff_block = DeeperNetLayer(core_output_size, hidden_size, num_layers, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): if not self.use_lstm: return tuple() return tuple( torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) for _ in range(2) ) def forward(self, inputs, core_state): x = inputs["frame"] T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) core_output = self.trafo_ff_block(core_input) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial( F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state class DeltaNetModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=32, dim_ff=512, dropout=0.0): super(DeltaNetModel, self).__init__() self.num_actions = num_actions # output vocab size. self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = DeltaNetLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): state_tuple = tuple(torch.zeros( 1, batch_size, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers)) return state_tuple def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code nd = nd.view(1, -1, 1, 1, 1) core_state = nest.map(nd.mul, core_state) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state class LinearTransformerModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=32, dim_ff=512, dropout=0.0): super(LinearTransformerModel, self).__init__() self.num_actions = num_actions # output vocab size. self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = LinearTransformerLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): state_tuple = tuple(torch.zeros( 1, batch_size, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers)) return state_tuple def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code nd = nd.view(1, -1, 1, 1, 1) core_state = nest.map(nd.mul, core_state) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state class RecDeltaModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=32, dim_ff=512, dropout=0.0): super(RecDeltaModel, self).__init__() self.num_actions = num_actions # output vocab size. self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = FastFFRecUpdateTanhLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): # why not create on device? # add dummy dim0 for inference code compat. fw_state_tuple = tuple( torch.zeros( 1, batch_size, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) rnn_state_tuple = tuple( torch.zeros(1, batch_size, self.num_head, 1, self.dim_head) for _ in range(self.num_layers) ) return (fw_state_tuple, rnn_state_tuple) def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code fw_state, rnn_state = core_state nd = nd.view(1, -1, 1, 1, 1) fw_state = nest.map(nd.mul, fw_state) rnn_state = nest.map(nd.mul, rnn_state) core_state = (fw_state, rnn_state) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state class FastRNNModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=32, dim_ff=512, dropout=0.0): super(FastRNNModel, self).__init__() self.num_actions = num_actions # output vocab size. self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = FastRNNModelLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): fw_state_tuple = tuple( torch.zeros( 1, batch_size, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) rec_fw_state_tuple = tuple( torch.zeros( 1, batch_size, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) rnn_state_tuple = tuple( torch.zeros(1, batch_size, self.num_head, 1, self.dim_head) for _ in range(self.num_layers) ) return (fw_state_tuple, rec_fw_state_tuple, rnn_state_tuple) def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code fw_state, rec_fw_state, rnn_state = core_state nd = nd.view(1, -1, 1, 1, 1) fw_state = nest.map(nd.mul, fw_state) rec_fw_state = nest.map(nd.mul, rec_fw_state) rnn_state = nest.map(nd.mul, rnn_state) core_state = (fw_state, rec_fw_state, rnn_state) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state class DeltaDeltaNetModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=32, dim_ff=512, dropout=0.0, use_xem=False): # use_xem: use cross episode memory super(DeltaDeltaNetModel, self).__init__() self.num_actions = num_actions # output vocab size. self.use_xem = use_xem self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = DeltaDeltaNetLayer( self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): fw_state_tuple = tuple( torch.zeros( 1, batch_size, self.num_head, self.dim_head, 3 * self.dim_head + 1) for _ in range(self.num_layers) ) very_fw_state_tuple = tuple( torch.zeros( 1, batch_size, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) return (fw_state_tuple, very_fw_state_tuple) def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # if not use_xem # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code fw_state, very_fw_state = core_state nd = nd.view(1, -1, 1, 1, 1) if not self.use_xem: fw_state = nest.map(nd.mul, fw_state) else: # save cross episodic fast weights layer_id = 0 for fw_layer in self.core.fwm_layers: fw_layer.cached_fast_weights = fw_state[layer_id][0] layer_id += 1 very_fw_state = nest.map(nd.mul, very_fw_state) core_state = (fw_state, very_fw_state) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state # Outer product based self-referential matrix class SRModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=16, dim_ff=512, dropout=0.0, use_xem=False): super(SRModel, self).__init__() self.num_actions = num_actions # output vocab size. self.use_xem = use_xem self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = SRNetLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, bsz=1): Wy_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) Wq_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) Wk_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) wb_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, 4) for _ in range(self.num_layers) ) return (Wy_state_tuple, Wq_state_tuple, Wk_state_tuple, wb_state_tuple) def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code nd = nd.view(1, -1, 1, 1, 1) # better reset? (Wy_s, Wq_s, Wk_s, wb_s) = core_state if not self.use_xem: Wy_s = nest.map(nd.mul, Wy_s) Wq_s = nest.map(nd.mul, Wq_s) Wk_s = nest.map(nd.mul, Wk_s) wb_s = nest.map(nd.mul, wb_s) core_state = (Wy_s, Wq_s, Wk_s, wb_s) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state # 'Fake SR' in the paper class PseudoSRModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=16, dim_ff=512, dropout=0.0, use_xem=False): super(PseudoSRModel, self).__init__() self.num_actions = num_actions # output vocab size. self.use_xem = use_xem self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = PseudoSRNetLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): return tuple() def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() core_output, _ = self.core(core_input, core_state=None) core_output = torch.flatten(core_output, 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state # Outer product based self-referential matrix, w/o carry over contexts class NoCarryOverSRModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=16, dim_ff=512, dropout=0.0): super(NoCarryOverSRModel, self).__init__() self.num_actions = num_actions # output vocab size. self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = NoCarryOverSRNetLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, bsz=1): return tuple() def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code # nd = nd.view(1, -1, 1, 1, 1) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state # Self-modifying FWP model, 'SR-Delta' class SMFWPModel(nn.Module): def __init__(self, num_actions, hidden_size=128, num_layers=2, num_head=4, dim_head=16, dim_ff=512, dropout=0.0): super(SMFWPModel, self).__init__() self.num_actions = num_actions # output vocab size. self.hidden_size = hidden_size self.num_layers = num_layers self.num_head = num_head self.dim_head = dim_head self.dim_ff = dim_ff self.dropout = dropout self.y_d_head = 3 * dim_head + 1 self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 3 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append( nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(2048, 256) # FC output size + last reward. # core_output_size = self.fc.out_features + 1 self.core = SMFWPNetLayer(self.fc.out_features + 1, hidden_size, num_layers, num_head, dim_head, dim_ff, dropout) core_output_size = hidden_size self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, bsz=1): Wy_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, self.y_d_head) for _ in range(self.num_layers) ) Wq_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) Wk_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) wb_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, 4) for _ in range(self.num_layers) ) fw_state_tuple = tuple( torch.zeros(1, bsz, self.num_head, self.dim_head, self.dim_head) for _ in range(self.num_layers) ) return (Wy_state_tuple, Wq_state_tuple, Wk_state_tuple, wb_state_tuple, fw_state_tuple) def forward(self, inputs, core_state): x = inputs["frame"] assert x.device is not 'cpu' T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) # recurrent component core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (~inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, D, D) # states: # nd = nd.view(1, -1, 1) # needs extra dim0 for compat w/ inference code nd = nd.view(1, -1, 1, 1, 1) # better reset? (Wy_s, Wq_s, Wk_s, wb_s, fw_s) = core_state Wy_s = nest.map(nd.mul, Wy_s) Wq_s = nest.map(nd.mul, Wq_s) Wk_s = nest.map(nd.mul, Wk_s) wb_s = nest.map(nd.mul, wb_s) fw_s = nest.map(nd.mul, fw_s) core_state = (Wy_s, Wq_s, Wk_s, wb_s, fw_s) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state
57,563
33.510791
111
py
modern-srwm
modern-srwm-main/reinforcement_learning/torchbeast_procgen/polybeast_learner.py
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import collections import logging import os import threading import time import timeit import traceback import gym import nest import torch import libtorchbeast from torch import nn from torch.nn import functional as F from torchbeast.core import file_writer from torchbeast.core import vtrace from torchbeast_procgen.model import Net, DeeperNet from torchbeast_procgen.model import DeltaNetModel as DeltaNet from torchbeast_procgen.model import DeltaDeltaNetModel as DDNet from torchbeast_procgen.model import SRModel as SRM from torchbeast_procgen.model import PseudoSRModel as PseudoSRM from torchbeast_procgen.model import NoCarryOverSRModel as NoCarrySRM from torchbeast_procgen.model import SMFWPModel as SMFWP from torchbeast_procgen.model import LinearTransformerModel as LT from torchbeast_procgen.model import RecDeltaModel as RecDelta from torchbeast_procgen.model import FastRNNModel as FastRNN # Necessary for multithreading. os.environ["OMP_NUM_THREADS"] = "1" # Make sure these are consistent with the lists in polybeast_env # lexicographically sorted list list_procgen_env_lex = [ 'bigfish', 'bossfight', 'caveflyer', 'chaser', 'climber', 'coinrun', 'dodgeball', 'fruitbot', 'heist', 'jumper', 'leaper', 'maze', 'miner', 'ninja', 'plunder', 'starpilot', ] # interesting ones first, we use this list list_procgen_env = [ 'bigfish', 'fruitbot', 'maze', 'leaper', 'plunder', 'starpilot', 'miner', 'bossfight', 'caveflyer', 'chaser', 'climber', 'coinrun', 'dodgeball', 'heist', 'jumper', 'ninja', ] # env with memory mode extension list_procgen_env_mem = [ 'dodgeball', 'heist', 'maze', 'miner', 'caveflyer', 'jumper', ] # yapf: disable parser = argparse.ArgumentParser(description="PyTorch Scalable Agent") parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast", help="Basename for the pipes for inter-process communication. " "Has to be of the type unix:/some/path.") parser.add_argument("--mode", default="train", choices=["train", "test", "test_render"], help="Training or test mode.") parser.add_argument("--xpid", default=None, help="Experiment id (default: None).") # Training settings. parser.add_argument("--single_gpu", action="store_true", help="use single gpu.") parser.add_argument("--disable_checkpoint", action="store_true", help="Disable saving checkpoint.") parser.add_argument("--disable_validation", action="store_true", help="Disable validation.") parser.add_argument("--validate_every", default=10, type=int, help="run validation every this *minutes*.") parser.add_argument("--validate_step_every", default=-1, type=int, help="run validation every this *steps*.") parser.add_argument("--save_extra_checkpoint", default=50000000, type=int, help="Save an extra checkpoint at .") parser.add_argument("--eval_extra", action="store_true", help="Eval extra checkpoint.") parser.add_argument("--savedir", default="~/palaas/torchbeast", help="Root dir where experiment data will be saved.") parser.add_argument("--num_actors", default=4, type=int, metavar="N", help="Number of actors.") parser.add_argument("--total_steps", default=100000, type=int, metavar="T", help="Total environment steps to train for.") parser.add_argument("--batch_size", default=8, type=int, metavar="B", help="Learner batch size.") parser.add_argument("--unroll_length", default=80, type=int, metavar="T", help="The unroll length (time dimension).") parser.add_argument("--num_learner_threads", default=2, type=int, metavar="N", help="Number learner threads.") parser.add_argument("--num_inference_threads", default=2, type=int, metavar="N", help="Number learner threads.") parser.add_argument("--disable_cuda", action="store_true", help="Disable CUDA.") parser.add_argument("--num_actions", default=6, type=int, metavar="A", help="Number of actions.") parser.add_argument("--conv_scale", default=1, type=int, help="[ff] scale for num channels in conv layers.") parser.add_argument("--use_lstm", action="store_true", help="Use LSTM in agent model.") parser.add_argument("--use_deep_ff", action="store_true", help="Use deep FF net in agent model.") parser.add_argument("--use_delta_rnn", action="store_true", help="Use Delta RNN in agent model.") parser.add_argument("--use_delta", action="store_true", help="Use Delta Net in agent model.") parser.add_argument("--use_lt", action="store_true", help="Use Linear Trafo in agent model.") parser.add_argument("--use_rec_delta", action="store_true", help="Use Recurrent Delta Net in agent model.") parser.add_argument("--use_dd", action="store_true", help="Use Delta Delta in agent model.") parser.add_argument("--use_sr", action="store_true", help="Use SR matrix in agent model.") parser.add_argument("--use_psr", action="store_true", help="Use pseudoSR matrix in agent model.") parser.add_argument("--use_smfwp", action="store_true", help="Use SR matrix in agent model.") parser.add_argument("--use_no_carry_sr", action="store_true", help="Use SR matrix w/o carry over in agent model.") parser.add_argument("--test_no_carry_sr", action="store_true", help="Use SR matrix w/o carry over in test.") parser.add_argument("--keep_dd", action="store_true", help="Keep delta delta XEM as part of the model.") parser.add_argument("--max_learner_queue_size", default=None, type=int, metavar="N", help="Optional maximum learner queue size. Defaults to batch_size.") # Model settings. parser.add_argument("--hidden_size", default=128, type=int, help="transformer hidden size.") parser.add_argument("--dim_ff", default=512, type=int, help="transformer hidden size.") parser.add_argument("--dim_head", default=32, type=int, help="transformer head size.") parser.add_argument("--num_layers", default=2, type=int, help="tranformer num layers.") parser.add_argument("--num_head", default=4, type=int, help="tranformer num heads.") parser.add_argument("--dropout", default=0.0, type=float, help="tranformer dropout.") # Loss settings. parser.add_argument("--entropy_cost", default=0.0006, type=float, help="Entropy cost/multiplier.") parser.add_argument("--baseline_cost", default=0.5, type=float, help="Baseline cost/multiplier.") parser.add_argument("--discounting", default=0.99, type=float, help="Discounting factor.") parser.add_argument("--reward_clipping", default="abs_one", choices=["abs_one", "none"], help="Reward clipping.") # Optimizer settings. parser.add_argument("--learning_rate", default=0.00048, type=float, metavar="LR", help="Learning rate.") parser.add_argument("--alpha", default=0.99, type=float, help="RMSProp smoothing constant.") parser.add_argument("--momentum", default=0, type=float, help="RMSProp momentum.") parser.add_argument("--epsilon", default=0.01, type=float, help="RMSProp epsilon.") parser.add_argument("--grad_norm_clipping", default=40.0, type=float, help="Global gradient norm clip.") # Misc settings. parser.add_argument("--write_profiler_trace", action="store_true", help="Collect and write a profiler trace " "for chrome://tracing/.") # yapf: enable parser.add_argument('--num_servers', default=4, type=int, metavar='N', help='Number of environment servers.') parser.add_argument('--env', type=str, default='PongNoFrameskip-v4', help='Gym environment.') parser.add_argument('--multi_env', default=1, type=int, metavar='N', help='number of env to jointly train on.') parser.add_argument('--allow_oov', action="store_true", help='Allow action space larger than the env specific one.' ' All out-of-vocab action will be mapped to NoOp.') parser.add_argument('--num_levels', default=0, type=int, metavar='N', help='Procgen num_levels.') parser.add_argument('--start_level', default=0, type=int, metavar='N', help='Procgen start_level.') parser.add_argument('--distribution_mode', type=str, default='hard', choices=[ 'easy', 'hard', 'extreme', 'memory', 'exploration'], help='distribution mode.') parser.add_argument('--valid_num_levels', default=0, type=int, metavar='N', help='Procgen num_levels for validation set.') parser.add_argument('--valid_start_level', default=0, type=int, metavar='N', help='Procgen start_level for validation set.') parser.add_argument('--valid_num_episodes', default=5, type=int, metavar='N', help='number of validation episodes.') parser.add_argument('--valid_num_runs', default=1, type=int, metavar='N', help='number of validation runs ' '(each run is valid_num_episodes episode long).') parser.add_argument('--valid_distribution_mode', type=str, default='hard', choices=[ 'easy', 'hard', 'extreme', 'memory', 'exploration'], help='validation distribution mode.') # For eval parser.add_argument('--test_model_name', type=str, help='model checkpoint suffix for evaluation.') parser.add_argument('--test_num_levels', default=0, type=int, metavar='N', help='Procgen num_levels for validation set.') parser.add_argument('--test_start_level', default=0, type=int, metavar='N', help='Procgen start_level for validation set.') parser.add_argument('--test_distribution_mode', type=str, default='hard', choices=[ 'easy', 'hard', 'extreme', 'memory', 'exploration'], help='test distribution mode.') # Wandb settings parser.add_argument('--project_name', type=str, default=None, help='project name for wandb.') parser.add_argument('--job_name', type=str, default=None, help='job name for wandb.') parser.add_argument('--use_wandb', action='store_true', help='use wandb.') args = parser.parse_args() if args.use_wandb: # configure wandb. import wandb use_wandb = True if args.project_name is None: project_name = (os.uname()[1] + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) else: project_name = args.project_name wandb.init(project=project_name) if args.job_name is None: # wandb.run.name = (os.uname()[1] # + datetime.now().strftime("%Y-%m-%d-%H-%M-%S") # + args.work_dir) wandb.run.name = f"{os.uname()[1]}" \ f"-{args.mode}" \ f"-{args.xpid}" \ f"-{args.disable_checkpoint}" \ f"-{args.savedir}" \ f"-{args.num_actors}" \ f"-{args.total_steps}" \ f"-{args.batch_size}" \ f"-{args.unroll_length}" \ f"-{args.entropy_cost}" \ f"-{args.baseline_cost}" \ f"-{args.discounting}" \ f"-{args.reward_clipping}" \ f"-{args.learning_rate}" \ f"-{args.alpha}" \ f"-{args.momentum}" \ f"-{args.epsilon}" \ f"-{args.grad_norm_clipping}" else: wandb.run.name = f"{os.uname()[1]}//{args.job_name}" config = wandb.config config.host = os.uname()[1] # host node name config.mode=args.mode config.xpid=args.xpid config.disable_checkpoint=args.disable_checkpoint config.savedir=args.savedir config.num_actors=args.num_actors config.total_steps=args.total_steps config.batch_size=args.batch_size config.unroll_length=args.unroll_length config.disable_cuda=args.disable_cuda config.use_lstm=args.use_lstm config.entropy_cost=args.entropy_cost config.baseline_cost=args.baseline_cost config.discounting=args.discounting config.reward_clipping=args.reward_clipping config.learning_rate=args.learning_rate config.alpha=args.alpha config.momentum=args.momentum config.epsilon=args.epsilon config.grad_norm_clipping=args.grad_norm_clipping else: use_wandb = False logging.basicConfig( format=( "[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s" ), level=0, ) def compute_baseline_loss(advantages): return 0.5 * torch.sum(advantages ** 2) def compute_entropy_loss(logits): """Return the entropy loss, i.e., the negative entropy of the policy.""" policy = F.softmax(logits, dim=-1) log_policy = F.log_softmax(logits, dim=-1) return torch.sum(policy * log_policy) def compute_policy_gradient_loss(logits, actions, advantages): cross_entropy = F.nll_loss( F.log_softmax(torch.flatten(logits, 0, 1), dim=-1), target=torch.flatten(actions, 0, 1), reduction="none", ) cross_entropy = cross_entropy.view_as(advantages) return torch.sum(cross_entropy * advantages.detach()) def inference(flags, inference_batcher, model, lock=threading.Lock()): # noqa: B008 with torch.no_grad(): for batch in inference_batcher: batched_env_outputs, agent_state = batch.get_inputs() frame, reward, done, *_ = batched_env_outputs frame = frame.to(flags.actor_device, non_blocking=True) reward = reward.to(flags.actor_device, non_blocking=True) done = done.to(flags.actor_device, non_blocking=True) agent_state = nest.map( lambda t: t.to(flags.actor_device, non_blocking=True), agent_state ) with lock: outputs = model( dict(frame=frame, reward=reward, done=done), agent_state ) outputs = nest.map(lambda t: t.cpu(), outputs) batch.set_outputs(outputs) EnvOutput = collections.namedtuple( "EnvOutput", "frame rewards done episode_step episode_return" ) AgentOutput = collections.namedtuple("AgentOutput", "action policy_logits baseline") Batch = collections.namedtuple("Batch", "env agent") def learn( flags, learner_queue, model, actor_model, optimizer, scheduler, stats, plogger, lock=threading.Lock(), ): for tensors in learner_queue: tensors = nest.map(lambda t: t.to(flags.learner_device), tensors) batch, initial_agent_state = tensors env_outputs, actor_outputs = batch frame, reward, done, *_ = env_outputs lock.acquire() # Only one thread learning at a time. learner_outputs, unused_state = model( dict(frame=frame, reward=reward, done=done), initial_agent_state ) # Take final value function slice for bootstrapping. learner_outputs = AgentOutput._make(learner_outputs) bootstrap_value = learner_outputs.baseline[-1] # Move from obs[t] -> action[t] to action[t] -> obs[t]. batch = nest.map(lambda t: t[1:], batch) learner_outputs = nest.map(lambda t: t[:-1], learner_outputs) # Turn into namedtuples again. env_outputs, actor_outputs = batch env_outputs = EnvOutput._make(env_outputs) actor_outputs = AgentOutput._make(actor_outputs) learner_outputs = AgentOutput._make(learner_outputs) if flags.reward_clipping == "abs_one": clipped_rewards = torch.clamp(env_outputs.rewards, -1, 1) elif flags.reward_clipping == "none": clipped_rewards = env_outputs.rewards discounts = (~env_outputs.done).float() * flags.discounting vtrace_returns = vtrace.from_logits( behavior_policy_logits=actor_outputs.policy_logits, target_policy_logits=learner_outputs.policy_logits, actions=actor_outputs.action, discounts=discounts, rewards=clipped_rewards, values=learner_outputs.baseline, bootstrap_value=bootstrap_value, ) pg_loss = compute_policy_gradient_loss( learner_outputs.policy_logits, actor_outputs.action, vtrace_returns.pg_advantages, ) baseline_loss = flags.baseline_cost * compute_baseline_loss( vtrace_returns.vs - learner_outputs.baseline ) entropy_loss = flags.entropy_cost * compute_entropy_loss( learner_outputs.policy_logits ) total_loss = pg_loss + baseline_loss + entropy_loss optimizer.zero_grad() total_loss.backward() nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping) optimizer.step() scheduler.step() actor_model.load_state_dict(model.state_dict()) episode_returns = env_outputs.episode_return[env_outputs.done] stats["step"] = stats.get("step", 0) + flags.unroll_length * flags.batch_size stats["episode_returns"] = tuple(episode_returns.cpu().numpy()) stats["mean_episode_return"] = torch.mean(episode_returns).item() stats["mean_episode_step"] = torch.mean(env_outputs.episode_step.float()).item() stats["total_loss"] = total_loss.item() stats["pg_loss"] = pg_loss.item() stats["baseline_loss"] = baseline_loss.item() stats["entropy_loss"] = entropy_loss.item() stats["learner_queue_size"] = learner_queue.size() if use_wandb: wandb.log({"episode_returns": stats["episode_returns"]}) wandb.log({"mean_episode_step": stats["mean_episode_step"]}) wandb.log({"mean_episode_return": stats["mean_episode_return"]}) wandb.log({"total_loss": stats["total_loss"]}) wandb.log({"pg_loss": stats["pg_loss"]}) wandb.log({"baseline_loss": stats["baseline_loss"]}) wandb.log({"entropy_loss": stats["entropy_loss"]}) plogger.log(stats) if not len(episode_returns): # Hide the mean-of-empty-tuple NaN as it scares people. stats["mean_episode_return"] = None lock.release() def train(flags): if flags.xpid is None: flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S") plogger = file_writer.FileWriter( xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir ) checkpointpath = os.path.expandvars( os.path.expanduser( "%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar")) ) best_valid_checkpointpath = os.path.expandvars( os.path.expanduser( "%s/%s/%s" % (flags.savedir, flags.xpid, "model_best_val.tar")) ) if flags.save_extra_checkpoint > 0: extra_checkpointpath = os.path.expandvars( os.path.expanduser( "%s/%s/%s" % (flags.savedir, flags.xpid, "model_extra.tar"))) if flags.single_gpu: logging.info("Using single GPU.") flags.learner_device = torch.device("cuda:0") flags.actor_device = torch.device("cuda:0") elif not flags.disable_cuda and torch.cuda.is_available(): logging.info("Using CUDA.") flags.learner_device = torch.device("cuda:0") flags.actor_device = torch.device("cuda:1") else: logging.info("Not using CUDA.") flags.learner_device = torch.device("cpu") flags.actor_device = torch.device("cpu") if flags.max_learner_queue_size is None: flags.max_learner_queue_size = flags.batch_size # The queue the learner threads will get their data from. # Setting `minimum_batch_size == maximum_batch_size` # makes the batch size static. learner_queue = libtorchbeast.BatchingQueue( batch_dim=1, minimum_batch_size=flags.batch_size, maximum_batch_size=flags.batch_size, check_inputs=True, maximum_queue_size=flags.max_learner_queue_size, ) # The "batcher", a queue for the inference call. Will yield # "batch" objects with `get_inputs` and `set_outputs` methods. # The batch size of the tensors will be dynamic. inference_batcher = libtorchbeast.DynamicBatcher( batch_dim=1, minimum_batch_size=1, maximum_batch_size=512, timeout_ms=100, check_outputs=True, ) addresses = [] connections_per_server = 1 pipe_id = 0 while len(addresses) < flags.num_actors: for _ in range(connections_per_server): addresses.append(f"{flags.pipes_basename}.{pipe_id}") if len(addresses) == flags.num_actors: break pipe_id += 1 if flags.use_delta: model = DeltaNet( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = DeltaNet( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_lt: model = LT(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = LT( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_delta_rnn: model = FastRNN(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = FastRNN( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_rec_delta: model = RecDelta(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = RecDelta( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_dd: model = DDNet(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = DDNet( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_sr: model = SRM(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = SRM( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_psr: model = PseudoSRM(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = PseudoSRM( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_smfwp: model = SMFWP(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = SMFWP( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_no_carry_sr: model = NoCarrySRM( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = NoCarrySRM( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) actor_model.to(device=flags.actor_device) elif flags.use_deep_ff: model = DeeperNet(num_actions=flags.num_actions, use_lstm=flags.use_lstm, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, dropout=flags.dropout) logging.info(model) model = model.to(device=flags.learner_device) actor_model = DeeperNet(num_actions=flags.num_actions, use_lstm=flags.use_lstm, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, dropout=flags.dropout) actor_model.to(device=flags.actor_device) else: model = Net(num_actions=flags.num_actions, conv_scale=flags.conv_scale, use_lstm=flags.use_lstm) logging.info(model) model = model.to(device=flags.learner_device) actor_model = Net( num_actions=flags.num_actions, conv_scale=flags.conv_scale, use_lstm=flags.use_lstm) actor_model.to(device=flags.actor_device) # The ActorPool that will run `flags.num_actors` many loops. actors = libtorchbeast.ActorPool( unroll_length=flags.unroll_length, learner_queue=learner_queue, inference_batcher=inference_batcher, env_server_addresses=addresses, initial_agent_state=actor_model.initial_state(), ) def run(): try: actors.run() except Exception as e: logging.error("Exception in actorpool thread!") traceback.print_exc() print() raise e actorpool_thread = threading.Thread(target=run, name="actorpool-thread") optimizer = torch.optim.RMSprop( model.parameters(), lr=flags.learning_rate, momentum=flags.momentum, eps=flags.epsilon, alpha=flags.alpha, ) def lr_lambda(epoch): return ( 1 - min(epoch * flags.unroll_length * flags.batch_size, flags.total_steps) / flags.total_steps ) scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda) stats = {} best_valid_return = 0 # Load state from a checkpoint, if possible. if os.path.exists(checkpointpath): checkpoint_states = torch.load( checkpointpath, map_location=flags.learner_device ) model.load_state_dict(checkpoint_states["model_state_dict"]) optimizer.load_state_dict(checkpoint_states["optimizer_state_dict"]) scheduler.load_state_dict(checkpoint_states["scheduler_state_dict"]) stats = checkpoint_states["stats"] best_valid_return = checkpoint_states["best_valid_return"] logging.info(f"Resuming preempted job, current stats:\n{stats}") # Initialize actor model like learner model. actor_model.load_state_dict(model.state_dict()) learner_threads = [ threading.Thread( target=learn, name="learner-thread-%i" % i, args=( flags, learner_queue, model, actor_model, optimizer, scheduler, stats, plogger, ), ) for i in range(flags.num_learner_threads) ] inference_threads = [ threading.Thread( target=inference, name="inference-thread-%i" % i, args=(flags, inference_batcher, actor_model), ) for i in range(flags.num_inference_threads) ] actorpool_thread.start() for t in learner_threads + inference_threads: t.start() def checkpoint(path): if flags.disable_checkpoint: return logging.info("Saving checkpoint to %s", path) torch.save( { "model_state_dict": model.state_dict(), "optimizer_state_dict": optimizer.state_dict(), "scheduler_state_dict": scheduler.state_dict(), "stats": stats, "flags": vars(flags), "best_valid_return": best_valid_return, }, path, ) def old_validate(num_episodes=5, num_runs=1): from torchbeast_procgen import procgen_wrappers from torchbeast.core import environment def create_valid_env(): # better always validate on the same levels via rand_seed? return procgen_wrappers.wrap_pytorch( procgen_wrappers.wrap_deepmind( gym.make(flags.env, num_levels=flags.valid_num_levels, start_level=flags.valid_start_level, distribution_mode=flags.valid_distribution_mode, rand_seed=None), clip_rewards=False, ) ) gym_env = create_valid_env() env = environment.Environment(gym_env) # model.eval() device = flags.learner_device # move to actor device?? which should be less busy? and copy back observation = env.initial() all_returns = [] core_state = model.initial_state() core_state = nest.map(lambda t: t.to(device), core_state) logging.info("Start validation") with torch.no_grad(): while len(all_returns) < num_runs: returns = [] while len(returns) < num_episodes: # (action, policy_logits, baseline), core_state observation = nest.map(lambda t: t.to(device), observation) agent_outputs, core_state = model(observation, core_state) action, _, _ = agent_outputs # action = action.to('cpu') # policy_outputs, _ = agent_outputs observation = env.step(action) # observation = env.step(policy_outputs["action"]) if observation["done"].item(): returns.append(observation["episode_return"].item()) logging.info( "Episode ended after %d steps. Return: %.1f", observation["episode_step"].item(), observation["episode_return"].item(), ) logging.info( "Average returns over %i episodes: %.1f", num_episodes, sum(returns) / len(returns) ) all_returns.append(sum(returns) / len(returns)) env.close() import numpy as np mean_valid_return = np.mean(all_returns) logging.info( f"[validation] Average returns over " f"{num_episodes} for {num_runs} runs: {all_returns}") logging.info( f"[validation] Mean return: " f"{mean_valid_return:.1f}, std: {np.std(all_returns):.1f}") model.train() if use_wandb: wandb.log({"validation return": mean_valid_return}) return mean_valid_return # validate in all env def validate(num_episodes=5, num_runs=1): from torchbeast_procgen import procgen_wrappers from torchbeast.core import environment def create_valid_env(env_name): # better always validate on the same levels via rand_seed? return procgen_wrappers.wrap_pytorch( procgen_wrappers.wrap_deepmind( gym.make(env_name, num_levels=flags.valid_num_levels, start_level=flags.valid_start_level, distribution_mode=flags.valid_distribution_mode, rand_seed=None), clip_rewards=False, ) ) if flags.distribution_mode == 'memory': # for multi_env training list_env = list_procgen_env_mem else: list_env = list_procgen_env cross_env_avg = 0. for i in range(flags.multi_env): if flags.multi_env == 1: env_name = flags.env gym_env = create_valid_env(f"{env_name}") is_multi_task = False else: is_multi_task = True env_name = list_env[i] gym_env = create_valid_env(f"procgen:procgen-{env_name}-v0") env = environment.Environment(gym_env) # model.eval() device = flags.learner_device # move to actor device?? which should be less busy? and copy back observation = env.initial() all_returns = [] core_state = model.initial_state() core_state = nest.map(lambda t: t.to(device), core_state) logging.info(f"Start validation on {env_name}") with torch.no_grad(): while len(all_returns) < num_runs: returns = [] while len(returns) < num_episodes: # (action, policy_logits, baseline), core_state observation = nest.map( lambda t: t.to(device), observation) agent_outputs, core_state = model( observation, core_state) action, _, _ = agent_outputs observation = env.step(action) if observation["done"].item(): returns.append( observation["episode_return"].item()) logging.info( "Episode ended after %d steps. Return: %.1f", observation["episode_step"].item(), observation["episode_return"].item(), ) logging.info( "Average returns over %i episodes: %.1f", num_episodes, sum(returns) / len(returns) ) all_returns.append(sum(returns) / len(returns)) env.close() import numpy as np if num_runs == 1: mean_valid_return = np.mean(returns) logging.info( f"[validation, {env_name}] Average returns over " f"{num_episodes} for {num_runs} runs: {returns}") logging.info( f"[validation, {env_name}] Mean return: " f"{mean_valid_return:.1f}, std: {np.std(returns):.1f}") else: mean_valid_return = np.mean(all_returns) logging.info( f"[validation, {env_name}] Average returns over " f"{num_episodes} for {num_runs} runs: {all_returns}") logging.info( f"[validation, {env_name}] Mean return: " f"{mean_valid_return:.1f}, std: {np.std(all_returns):.1f}") if use_wandb: wandb.log({f"{env_name} validation return": mean_valid_return}) cross_env_avg += mean_valid_return model.train() if is_multi_task: cross_env_avg = cross_env_avg / flags.multi_env logging.info( f"[validation, joint] Cross env avg return {cross_env_avg}") if use_wandb: wandb.log({"Cross env validation return": cross_env_avg}) return cross_env_avg def format_value(x): return f"{x:1.5}" if isinstance(x, float) else str(x) try: if flags.save_extra_checkpoint > 0: saved_extra = False last_checkpoint_time = timeit.default_timer() while True: start_time = timeit.default_timer() start_step = stats.get("step", 0) if start_step >= flags.total_steps: break time.sleep(5) end_step = stats.get("step", 0) if timeit.default_timer() - last_checkpoint_time > flags.validate_every * 60 or start_step % flags.validate_step_every == 0: # Validate every 10 min. if not flags.disable_validation: val_start = timeit.default_timer() mean_valid_return = validate( num_episodes=flags.valid_num_episodes, num_runs=flags.valid_num_runs) val_dur = val_start - timeit.default_timer() logging.info(f"[validation] duration: " f"{val_dur:.1f} sec.") if use_wandb: wandb.log({"validation time": val_dur}) if mean_valid_return > best_valid_return: # save if best best_valid_return = mean_valid_return checkpoint(best_valid_checkpointpath) # Always save latest checkpoint checkpoint(checkpointpath) last_checkpoint_time = timeit.default_timer() logging.info( "Step %i @ %.1f SPS. Inference batcher size: %i." " Learner queue size: %i." " Other stats: (%s)", end_step, (end_step - start_step) / (timeit.default_timer() - start_time), inference_batcher.size(), learner_queue.size(), ", ".join( f"{key} = {format_value(value)}" for key, value in stats.items() ), ) if flags.save_extra_checkpoint > 0: if saved_extra is False and end_step > flags.save_extra_checkpoint: logging.info(f"Step {end_step} Saving EXTRA checkpoint to {extra_checkpointpath}") torch.save( { "model_state_dict": model.state_dict(), "optimizer_state_dict": optimizer.state_dict(), "scheduler_state_dict": scheduler.state_dict(), "stats": stats, "flags": vars(flags), "best_valid_return": best_valid_return, }, extra_checkpointpath, ) saved_extra = True except KeyboardInterrupt: pass # Close properly. else: logging.info("Learning finished after %i steps.", stats["step"]) checkpoint(checkpointpath) # Done with learning. Stop all the ongoing work. inference_batcher.close() learner_queue.close() actorpool_thread.join() for t in learner_threads + inference_threads: t.join() # def test(flags, num_episodes=30, num_runs=5, device='cuda'): def test(flags, num_episodes=200, num_runs=1, device='cuda'): if flags.xpid is None: checkpointpath = "./latest/model.tar" elif flags.eval_extra: checkpointpath = os.path.expandvars( os.path.expanduser( "%s/%s/%s" % (flags.savedir, flags.xpid, "model_extra.tar")) ) elif flags.test_model_name: checkpointpath = os.path.expandvars( os.path.expanduser( "%s/%s/%s" % ( flags.savedir, flags.xpid, flags.test_model_name)) ) else: checkpointpath = os.path.expandvars( os.path.expanduser( "%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar")) ) from torchbeast.core import environment from torchbeast_procgen import procgen_wrappers def create_test_env(env_name, num_levels=0, start_level=0, distribution_mode="hard", rand_seed=None, lock=threading.Lock()): with lock: # Atari isn't threadsafe at construction time. return procgen_wrappers.wrap_pytorch( procgen_wrappers.wrap_deepmind( gym.make(env_name, num_levels=num_levels, start_level=start_level, distribution_mode=distribution_mode, rand_seed=rand_seed), clip_rewards=False, ) ) # gym_env = create_test_env(flags) gym_env = create_test_env(flags.env, num_levels=flags.test_num_levels, start_level=flags.test_start_level, distribution_mode=flags.test_distribution_mode) env = environment.Environment(gym_env) if flags.use_delta: model = DeltaNet( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_lt: model = LT(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_delta_rnn: model = FastRNN(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_rec_delta: model = RecDelta(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_dd: model = DDNet(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_sr: model = SRM(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_psr: model = PseudoSRM(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_no_carry_sr: model = NoCarrySRM( num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_smfwp: model = SMFWP(num_actions=flags.num_actions, dim_head=flags.dim_head, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, num_head=flags.num_head, dropout=flags.dropout) elif flags.use_deep_ff: model = DeeperNet(num_actions=flags.num_actions, use_lstm=flags.use_lstm, hidden_size=flags.hidden_size, dim_ff=flags.dim_ff, num_layers=flags.num_layers, dropout=flags.dropout) else: model = Net(num_actions=flags.num_actions, conv_scale=flags.conv_scale, use_lstm=flags.use_lstm) print(model) print(f"# params: " f"{sum(p.numel() for p in model.parameters() if p.requires_grad)}") model = model.to(device) model.eval() checkpoint = torch.load(checkpointpath, map_location=device) model.load_state_dict(checkpoint["model_state_dict"]) observation = env.initial() # returns = [] all_returns = [] core_state = model.initial_state() core_state = nest.map(lambda t: t.to(device), core_state) logging.info("Start eval") with torch.no_grad(): while len(all_returns) < num_runs: returns = [] step_counter = 0 while len(returns) < num_episodes: if flags.mode == "test_render": env.gym_env.render() # (action, policy_logits, baseline), core_state observation = nest.map(lambda t: t.to(device), observation) agent_outputs, core_state = model(observation, core_state) step_counter += 1 if flags.test_no_carry_sr and step_counter == flags.unroll_length: # need to reset state every unroll_length steps step_counter = 0 core_state = model.initial_state() core_state = nest.map(lambda t: t.to(device), core_state) action, _, _ = agent_outputs # action = action.to('cpu') # policy_outputs, _ = agent_outputs observation = env.step(action) # observation = env.step(policy_outputs["action"]) if observation["done"].item(): returns.append(observation["episode_return"].item()) logging.info( "Episode ended after %d steps. Return: %.1f", observation["episode_step"].item(), observation["episode_return"].item(), ) logging.info( "Average returns over %i episodes: %.1f", num_episodes, sum(returns) / len(returns) ) all_returns.append(sum(returns) / len(returns)) env.close() import numpy as np logging.info(f"Average returns over {num_episodes} for {num_runs} runs: {all_returns}") logging.info(f"{flags.env}, Mean return: {np.mean(all_returns):.1f}, std: {np.std(all_returns):.1f}") def main(flags): if not flags.pipes_basename.startswith("unix:"): raise Exception("--pipes_basename has to be of the form unix:/some/path.") if flags.mode == "train": if flags.write_profiler_trace: logging.info("Running with profiler.") with torch.autograd.profiler.profile() as prof: train(flags) filename = "chrome-%s.trace" % time.strftime("%Y%m%d-%H%M%S") logging.info("Writing profiler trace to '%s.gz'", filename) prof.export_chrome_trace(filename) os.system("gzip %s" % filename) else: train(flags) else: test(flags) if __name__ == "__main__": flags = parser.parse_args() main(flags)
51,532
39.705371
136
py
modern-srwm
modern-srwm-main/supervised_learning/eval_delay_multi_sequential.py
# main file to be executed to evaluate models in sequential multi-task few shot # learning import os import sys import json import time from datetime import datetime import argparse import logging import numpy as np import random import torch from torchmeta_local.utils.data import BatchMetaDataLoader from torchmeta_local.datasets.helpers import omniglot_rgb84x84_norm from torchmeta_local.datasets.helpers import miniimagenet_norm from model_few_shot import ( ConvLSTMModel, ConvDeltaModel, ConvSRWMModel, Res12LSTMModel, Res12DeltaModel, Res12SRWMModel) from utils_few_shot import eval_per_pos_model_delayed_label_multi_sequential parser = argparse.ArgumentParser( description='Sequential multi-task adaptation.') parser.add_argument('--data_dir', type=str, default='./data', help='location of the data corpus') parser.add_argument('--name_dataset', type=str, default='miniimagenet_norm', choices=['miniimagenet_norm']) parser.add_argument('--num_worker', default=12, type=int, help='for dataloader.') parser.add_argument('--work_dir', default='save_models', type=str, help='where to save model ckpt.') parser.add_argument('--load_from', default='save_models/aaa/', type=str, help='dir from where to load model ckpt.') parser.add_argument('--model_type', type=str, default='lstm', choices=['lstm', 'deltanet', 'srwm', 'res12_lstm', 'res12_deltanet', 'res12_srwm'], help='model architecture') parser.add_argument('--seed', default=1, type=int, help='Seed.') parser.add_argument('--valid_seed', default=0, type=int, help='Seed.') parser.add_argument('--test_seed', default=0, type=int, help='Seed.') parser.add_argument('--disable_eval_shuffling', action='store_true', help='disable shuffling of valid/test sets. Only useful ' 'to reproduce old/buggy behavior.') # model hyper-parameters: parser.add_argument('--num_layer', default=1, type=int, help='number of layers. for both LSTM and Trafo.') parser.add_argument('--hidden_size', default=512, type=int, help='hidden size. for both LSTM and Trafo.') parser.add_argument('--n_head', default=8, type=int, help='Transformer number of heads.') parser.add_argument('--ff_factor', default=4, type=int, help='Transformer ff dim to hidden dim ratio.') parser.add_argument('--dropout', default=0.0, type=float, help='dropout rate.') parser.add_argument('--vision_dropout', default=0.0, type=float, help='dropout rate in the vision feat extractor.') parser.add_argument('--srwm_beta_init', default=0.0, type=float, help='beta bias for srwm.') parser.add_argument('--use_input_softmax', action='store_true', help='input softmax for srwm.') # few shot learning setting parser.add_argument('--n_way', default=5, type=int, help='number of possible classes per train/test episode.') parser.add_argument('--k_shot', default=15, type=int, help='number of examples in the `train` part of torchmeta') parser.add_argument('--test_per_class', default=1, type=int, help='param for torchmeta') parser.add_argument('--max_trim', default=None, type=int, help='maximum number of positions to be removed. if None, ' 'computed based on `n_way` and `k_shot`.') # training hyper-parameters: parser.add_argument('--total_train_steps', default=100000, type=int, help='Number of training steps to train on') parser.add_argument('--valid_size', default=100, type=int, help='Number of valid batches to validate on') parser.add_argument('--test_size', default=100, type=int, help='Number of test batches to test on') parser.add_argument('--num_test', default=1, type=int, help='Number of times we run test on random test set') parser.add_argument('--imagenet_first', action='store_true', help='imagenet then omniglot.') parser.add_argument('--batch_size', default=16, type=int, help='batch size.') parser.add_argument('--learning_rate', default=1e-3, type=float, help='batch size.') parser.add_argument('--grad_cummulate', default=1, type=int, help='number of gradient accumulation steps.') parser.add_argument('--report_every', default=100, type=int, help='Report log every this steps (not used).') parser.add_argument('--validate_every', default=1000, type=int, help='Report log every this steps (not used).') parser.add_argument('--clip', default=0.0, type=float, help='global norm clipping threshold.') # for wandb parser.add_argument('--project_name', type=str, default=None, help='project name for wandb.') parser.add_argument('--job_name', type=str, default=None, help='job name for wandb.') parser.add_argument('--use_wandb', action='store_true', help='use wandb.') args = parser.parse_args() model_name = args.model_type # Set work directory args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S')) if not os.path.exists(args.work_dir): os.makedirs(args.work_dir) work_dir_key = '/'.join(os.path.abspath(args.work_dir).split('/')[-3:]) # logging log_file_name = f"{args.work_dir}/log.txt" handlers = [logging.FileHandler(log_file_name), logging.StreamHandler()] logging.basicConfig( level=logging.INFO, format='%(message)s', handlers=handlers) loginf = logging.info loginf(f"torch version: {torch.__version__}") loginf(f"Work dir: {args.work_dir}") # end wandb # save args loginf(f"Command executed: {sys.argv[:]}") loginf(f"Args: {json.dumps(args.__dict__, indent=2)}") with open(f'{args.work_dir}/args.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) # set seed loginf(f"Seed: {args.seed}") seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) valid_seed = args.valid_seed test_seed = args.test_seed loginf(f"Valid seed: {valid_seed}, Test seed: {test_seed}") shuffled_eval = not args.disable_eval_shuffling if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True # set dataset batch_size = args.batch_size n_way = args.n_way k_shot_train = args.k_shot test_per_class = args.test_per_class loginf(f"Dataset/Task: omniglot + miniimagenet_norm") task_id_to_name = {0: 'omniglot', 1: 'imagenet'} # Omniglot omniglot_dataset = omniglot_rgb84x84_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_train=True, download=True, shuffle=True, seed=seed) omniglot_dataloader = BatchMetaDataLoader( omniglot_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) omniglot_val_dataset = omniglot_rgb84x84_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_val=True, shuffle=shuffled_eval, seed=valid_seed) omniglot_val_dataloader = BatchMetaDataLoader( omniglot_val_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) # Mini-imagenet imagenet_dataset = miniimagenet_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_train=True, download=True, shuffle=True, seed=seed) imagenet_dataloader = BatchMetaDataLoader( imagenet_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) imagenet_val_dataset = miniimagenet_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_val=True, shuffle=shuffled_eval, seed=valid_seed) imagenet_val_dataloader = BatchMetaDataLoader( imagenet_val_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) val_dataloader = { 'omniglot': omniglot_val_dataloader, 'miniimagenet': imagenet_val_dataloader, } device = 'cuda' # setting model if args.max_trim is None: assert args.k_shot > 6, f'k_shot too small {args.k_shot}' max_trim = args.k_shot - 6 # to see at least 5 shot performance else: max_trim = args.max_trim hidden_size = args.hidden_size num_classes = args.n_way num_layer = args.num_layer n_head = args.n_head dim_head = hidden_size // n_head dim_ff = hidden_size * args.ff_factor dropout_rate = args.dropout vision_dropout = args.vision_dropout is_imagenet = args.name_dataset != 'omniglot' if model_name == 'lstm': # conv lstm loginf("Model: LSTM") model = ConvLSTMModel(hidden_size, num_classes, num_layer=num_layer, vision_dropout=vision_dropout, imagenet=is_imagenet) elif model_name == 'deltanet': loginf("Model: DeltaNet") model = ConvDeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, imagenet=is_imagenet) elif model_name == 'srwm': loginf("Model: Self-Referential learning") model = ConvSRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet) elif model_name == 'res12_lstm': loginf("Model: Resnet12 + LSTM") model = Res12LSTMModel(hidden_size=hidden_size, num_layers=num_layer, dropout=dropout_rate, vision_dropout=vision_dropout, num_classes=num_classes, imagenet=is_imagenet) elif model_name == 'res12_deltanet': assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + Deltanet") model = Res12DeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, imagenet=is_imagenet) elif model_name == 'res12_srwm': assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + SRWM") model = Res12SRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet) loginf(f"Number of trainable params: {model.num_params()}") loginf(f"{model}") model = model.to(device) best_model_path = os.path.join(args.load_from, 'best_model.pt') lastest_model_path = os.path.join(args.load_from, 'lastest_model.pt') loginf(f"[{datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] Start Eval") loginf(f"Loading model from {best_model_path}") checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() omniglot_test_dataset = omniglot_rgb84x84_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_test=True, download=True, shuffle=shuffled_eval, seed=test_seed) omniglot_test_dataloader = BatchMetaDataLoader( omniglot_test_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) imagenet_test_dataset = miniimagenet_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_test=True, download=True, shuffle=shuffled_eval, seed=test_seed) imagenet_test_dataloader = BatchMetaDataLoader( imagenet_test_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) test_dataloader = { 'omniglot': omniglot_test_dataloader, 'miniimagenet': imagenet_test_dataloader, } num_test = args.num_test test_size = args.test_size omniglot_first = not args.imagenet_first test_acc_dict = {} with torch.no_grad(): v_total, task_wise_acc, val_acc_dict, per_pos_acc = ( eval_per_pos_model_delayed_label_multi_sequential( model, test_dataloader['omniglot'], test_dataloader['miniimagenet'], n_way=n_way, k_shot=k_shot_train, num_steps=args.test_size, omniglot_first=omniglot_first)) log_str = f"[test {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " loginf(log_str) for key in val_acc_dict.keys(): log_str = "" log_str += f'{task_id_to_name[key]} val total {task_wise_acc[key]:.2f} %, ' for shot in range(k_shot_train): log_str += f"test_{shot}: {val_acc_dict[key][shot]:.2f} %, " loginf(log_str) log_str = 'Position-wise accuracy:' loginf(log_str) log_str = "" for pos in range(k_shot_train * n_way * 2): log_str += f"Acc_pos{pos}: {per_pos_acc[pos]:.2f} %, " loginf(log_str)
13,569
40.371951
83
py
modern-srwm
modern-srwm-main/supervised_learning/layer.py
# Contain basic layers import math import torch import torch.nn as nn import torch.nn.functional as F from fast_weight import fast_weight_delta from self_ref_v0 import self_ref_v0, stateful_self_ref_v0 @torch.jit.script def elu_p1(x): return F.elu(x, 1., False) + 1. @torch.jit.script def sum_norm(x): return x / x.sum(-1, keepdim=True) # A block of residual feed-forward layers in Transformer class TransformerFFlayers(nn.Module): def __init__(self, ff_dim, res_dim, dropout, use_layernorm=True): super(TransformerFFlayers, self).__init__() self.res_dim = res_dim self.ff_dim = ff_dim self.dropout = dropout self.use_layernorm = use_layernorm self.ff_layers = nn.Sequential( nn.Linear(res_dim, ff_dim), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(ff_dim, res_dim), nn.Dropout(dropout), ) if use_layernorm: self.layer_norm = nn.LayerNorm(res_dim) def forward(self, x): out = self.layer_norm(x) if self.use_layernorm else x out = self.ff_layers(out) + x return out # Fast weight layer with feed-forward fast net class FastFFlayer(nn.Module): def __init__(self, num_head, dim_head, in_dim, dropout): super(FastFFlayer, self).__init__() self.num_head = num_head self.dim_head = dim_head self.in_dim = in_dim self.fw_layer = fast_weight_delta self.slow_net = nn.Linear( in_dim, num_head * (3 * dim_head + 1), bias=False) self.layer_norm = nn.LayerNorm(in_dim) self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False) self.drop = nn.Dropout(dropout) def forward(self, x): # x shape: (len, B, n_head * d_head) slen, bsz, _ = x.size() out = self.layer_norm(x) qkvb = self.slow_net(out) qkvb = qkvb.view(slen, bsz, self.num_head, 3 * self.dim_head + 1) head_q, head_k, head_v, head_beta = torch.split( qkvb, (self.dim_head,) * 3 + (1,), -1) head_beta = torch.sigmoid(head_beta) # reshape to (B, heads, len, dim) head_q = head_q.permute(1, 2, 0, 3) head_k = head_k.permute(1, 2, 0, 3) head_v = head_v.permute(1, 2, 0, 3) head_beta = head_beta.permute(1, 2, 0, 3) head_q = elu_p1(head_q) head_k = elu_p1(head_k) # normalize k and q, crucial for stable training. head_k = sum_norm(head_k) head_q = sum_norm(head_q) fast_weights = torch.zeros( bsz, self.num_head, self.dim_head, self.dim_head, device=head_k.device) out = self.fw_layer(head_q, head_k, head_v, head_beta, fast_weights) out = out.transpose(1, 2) out = out.reshape(bsz, slen, self.num_head * self.dim_head) out = out.transpose(0, 1) # expect [qlen, B, n_head * d_head] # linear projection out = self.out_linear(out) out = self.drop(out) out = x + out return out # self referential weight matrix layer class SRWMlayer(nn.Module): def __init__(self, num_head, dim_head, in_dim, dropout, use_ln=True, use_input_softmax=False, beta_init=-1.0, stateful=False): super(SRWMlayer, self).__init__() self.num_head = num_head self.dim_head = dim_head self.in_dim = in_dim self.use_ln = use_ln self.use_input_softmax = use_input_softmax self.stateful = stateful if stateful: self.sr_layer = stateful_self_ref_v0 else: self.sr_layer = self_ref_v0 n_head = num_head d_head = dim_head self.W_y = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head), requires_grad=True) self.W_q = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head), requires_grad=True) self.W_k = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head), requires_grad=True) self.w_b = nn.Parameter(torch.Tensor(1, n_head, d_head, 4), requires_grad=True) if use_ln: self.layer_norm = nn.LayerNorm(in_dim) self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False) self.drop = nn.Dropout(dropout) self.reset_parameters(beta_init) def reset_parameters(self, beta_init): std = 1.0 / math.sqrt(self.dim_head) nn.init.normal_(self.W_y, mean=0., std=std) nn.init.normal_(self.W_q, mean=0., std=std) nn.init.normal_(self.W_k, mean=0., std=std) # tried -1 for beta but 0 seems to be better # nn.init.normal_(self.w_b, mean=-5., std=std) nn.init.normal_(self.w_b, mean=beta_init, std=std) def forward(self, h, state=None, get_state=False): # x shape: (len, B, n_head * d_head) slen, bsz, _ = h.size() # out = self.layer_norm(x) x = h.reshape(slen, bsz, self.num_head, self.dim_head) if self.use_input_softmax: x = F.softmax(x, dim=-1) # reshape to (B, heads, len, dim) x = x.permute(1, 2, 0, 3) if state is not None: # state stores the shift from the base weights. W_y_bc, W_q_bc, W_k_bc, w_b_bc = state W_y_bc = W_y_bc + self.W_y.repeat(bsz, 1, 1, 1) W_q_bc = W_q_bc + self.W_q.repeat(bsz, 1, 1, 1) W_k_bc = W_k_bc + self.W_k.repeat(bsz, 1, 1, 1) w_b_bc = w_b_bc + self.w_b.repeat(bsz, 1, 1, 1) else: W_y_bc = self.W_y.repeat(bsz, 1, 1, 1) W_q_bc = self.W_q.repeat(bsz, 1, 1, 1) W_k_bc = self.W_k.repeat(bsz, 1, 1, 1) w_b_bc = self.w_b.repeat(bsz, 1, 1, 1) if self.stateful: out, W_y_bc, W_q_bc, W_k_bc, w_b_bc = self.sr_layer(x, W_y_bc, W_q_bc, W_k_bc, w_b_bc) else: out = self.sr_layer(x, W_y_bc, W_q_bc, W_k_bc, w_b_bc) out = out.transpose(1, 2) out = out.reshape(bsz, slen, self.num_head * self.dim_head) out = out.transpose(0, 1) # expect [qlen, B, n_head * d_head] # linear projection out = self.out_linear(out) out = self.drop(out) if self.use_ln: out = self.layer_norm(h) + out else: out = h + out # out = self.layer_norm(h) + out # compute the new shift (not very efficient; get it better from kernel) # if state is not None and get_state: if get_state: W_y_bc = W_y_bc - self.W_y.repeat(bsz, 1, 1, 1) W_q_bc = W_q_bc - self.W_q.repeat(bsz, 1, 1, 1) W_k_bc = W_k_bc - self.W_k.repeat(bsz, 1, 1, 1) w_b_bc = w_b_bc - self.w_b.repeat(bsz, 1, 1, 1) state = (W_y_bc, W_q_bc, W_k_bc, w_b_bc) return out, state return out
6,957
31.976303
98
py
modern-srwm
modern-srwm-main/supervised_learning/main_few_shot_sync.py
# Main file to be executed to train models for few shot learning in the # synchrous-label setting import os import sys import json import time from datetime import datetime import argparse import logging import numpy as np import random import torch import torch.nn as nn from warmup_lr import WarmupWrapper from torchmeta_local.utils.data import BatchMetaDataLoader from model_few_shot import ( ConvLSTMModel, ConvDeltaModel, ConvSRWMModel, Res12LSTMModel, Res12DeltaModel, Res12SRWMModel) from utils_few_shot import eval_model_label_sync parser = argparse.ArgumentParser( description='N-way K-shot learning based on label synchronous ' 'seq-processing NNs with only predicting (N*K+1)th image.') parser.add_argument('--data_dir', type=str, default='./data', help='location of the data corpus') parser.add_argument('--name_dataset', type=str, default='omniglot', choices=['omniglot', 'miniimagenet', 'omniglot_rgb84x84', 'omniglot_rgb84x84_norm', 'omniglot_norm', 'miniimagenet_norm', 'fc100', 'fc100_norm']) parser.add_argument('--num_worker', default=12, type=int, help='for dataloader.') parser.add_argument('--work_dir', default='save_models', type=str, help='where to save model ckpt.') parser.add_argument('--model_type', type=str, default='lstm', choices=['lstm', 'deltanet', 'srwm', 'res12_lstm', 'res12_deltanet', 'res12_srwm'], help='model architecture') parser.add_argument('--seed', default=1, type=int, help='Seed.') parser.add_argument('--valid_seed', default=0, type=int, help='Seed.') parser.add_argument('--test_seed', default=0, type=int, help='Seed.') parser.add_argument('--disable_eval_shuffling', action='store_true', help='disable shuffling of valid/test sets. Only useful ' 'to reproduce old/buggy behavior.') parser.add_argument('--fixed_valid', action='store_true', help='use fixed validation set.') parser.add_argument('--fixed_test', action='store_true', help='use fixed test set.') parser.add_argument('--total_epoch', default=1, type=int, help='iterate more than one epoch.') parser.add_argument('--train_acc_stop', default=120, type=int, help='stopping based on train acc.') # model hyper-parameters: parser.add_argument('--num_layer', default=1, type=int, help='number of layers. for both LSTM and Trafo.') parser.add_argument('--hidden_size', default=512, type=int, help='hidden size. for both LSTM and Trafo.') parser.add_argument('--n_head', default=8, type=int, help='Transformer number of heads.') parser.add_argument('--ff_factor', default=4, type=int, help='Transformer ff dim to hidden dim ratio.') parser.add_argument('--dropout', default=0.0, type=float, help='dropout rate.') parser.add_argument('--input_dropout', default=0.0, type=float, help='input dropout rate.') parser.add_argument('--vision_dropout', default=0.0, type=float, help='dropout rate in the vision feat extractor.') parser.add_argument('--dropout_type', type=str, default='base', choices=['base', 'inblock', '2d', '2d_inblock']) parser.add_argument('--use_big_res12', action='store_true', help='use big Res-12.') parser.add_argument('--srwm_beta_init', default=0.0, type=float, help='beta bias for srwm.') parser.add_argument('--use_input_softmax', action='store_true', help='input softmax for srwm.') # few shot learning setting parser.add_argument('--n_way', default=5, type=int, help='number of possible classes per train/test episode.') parser.add_argument('--k_shot', default=1, type=int, help='number of examples in the `train` part of torchmeta') parser.add_argument('--test_per_class', default=1, type=int, help='param for torchmeta; number of query examples') # training hyper-parameters: parser.add_argument('--total_train_steps', default=100000, type=int, help='Number of training steps to train on') parser.add_argument('--valid_size', default=100, type=int, help='Number of valid batches to validate on') parser.add_argument('--test_size', default=100, type=int, help='Number of test batches to test on') parser.add_argument('--batch_size', default=16, type=int, help='batch size.') parser.add_argument('--learning_rate', default=1e-3, type=float, help='batch size.') parser.add_argument('--warmup_steps', default=5000, type=int) parser.add_argument('--use_warmup', action='store_true', help='use warmup scheduling.') parser.add_argument('--grad_cummulate', default=1, type=int, help='number of gradient accumulation steps.') parser.add_argument('--report_every', default=100, type=int, help='Report log every this steps (not used).') parser.add_argument('--validate_every', default=1000, type=int, help='Report log every this steps (not used).') parser.add_argument('--clip', default=0.0, type=float, help='global norm clipping threshold.') # for wandb parser.add_argument('--project_name', type=str, default=None, help='project name for wandb.') parser.add_argument('--job_name', type=str, default=None, help='job name for wandb.') parser.add_argument('--use_wandb', action='store_true', help='use wandb.') args = parser.parse_args() model_name = args.model_type # Set work directory args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S')) if not os.path.exists(args.work_dir): os.makedirs(args.work_dir) work_dir_key = '/'.join(os.path.abspath(args.work_dir).split('/')[-3:]) # logging log_file_name = f"{args.work_dir}/log.txt" handlers = [logging.FileHandler(log_file_name), logging.StreamHandler()] logging.basicConfig( level=logging.INFO, format='%(message)s', handlers=handlers) loginf = logging.info loginf(f"torch version: {torch.__version__}") loginf(f"Work dir: {args.work_dir}") # wandb settings if args.use_wandb: # configure wandb. import wandb use_wandb = True if args.project_name is None: project_name = (os.uname()[1] + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) else: project_name = args.project_name wandb.init( project=project_name, settings=wandb.Settings(start_method='fork')) # or `settings=wandb.Settings(start_method='thread')` if args.job_name is None: wandb.run.name = f"{os.uname()[1]}//" \ f"{model_name}-{args.name_dataset}//" \ f"seed{args.seed}//" \ f"noshuf{args.disable_eval_shuffling}/" \ f"{args.dropout_type}/id{args.input_dropout}/" \ f"{args.test_per_class}-test_per_cl/" \ f"{args.n_way}way-{args.k_shot}shot/" \ f"L{args.num_layer}/h{args.hidden_size}/" \ f"n{args.n_head}/ff{args.ff_factor}/" \ f"d{args.dropout}/vd{args.vision_dropout}/" \ f"bigres{args.use_big_res12}/b{args.batch_size}/" \ f"lr{args.learning_rate}/warm{args.use_warmup}/" \ f"warmstep{args.warmup_steps}/" \ f"g{args.grad_cummulate}/bias{args.srwm_beta_init}" \ f"softmax{args.use_input_softmax}" \ f"//PATH'{work_dir_key}'//" else: wandb.run.name = f"{os.uname()[1]}//{args.job_name}" config = wandb.config config.host = os.uname()[1] # host node name config.seed = args.seed config.test_per_class = args.test_per_class config.n_way = args.n_way config.k_shot = args.k_shot config.srwm_beta_init = args.srwm_beta_init config.use_input_softmax = args.use_input_softmax config.name_dataset = args.name_dataset config.work_dir = args.work_dir config.model_type = args.model_type config.hidden_size = args.hidden_size config.n_head = args.n_head config.ff_factor = args.ff_factor config.dropout = args.dropout config.vision_dropout = args.vision_dropout config.use_big_res12 = args.use_big_res12 config.batch_size = args.batch_size config.learning_rate = args.learning_rate config.use_warmup = args.use_warmup config.warmup_steps = args.warmup_steps config.grad_cummulate = args.grad_cummulate config.input_dropout = args.input_dropout config.dropout_type = args.dropout_type config.report_every = args.report_every config.disable_eval_shuffling = args.disable_eval_shuffling else: use_wandb = False # end wandb # save args loginf(f"Command executed: {sys.argv[:]}") loginf(f"Args: {json.dumps(args.__dict__, indent=2)}") with open(f'{args.work_dir}/args.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) # set seed loginf(f"Seed: {args.seed}") seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) valid_seed = args.valid_seed test_seed = args.test_seed loginf(f"Valid seed: {valid_seed}, Test seed: {test_seed}") shuffled_eval = not args.disable_eval_shuffling if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True # set dataset batch_size = args.batch_size n_way = args.n_way k_shot_train = args.k_shot test_per_class = args.test_per_class loginf(f"Dataset/Task: {args.name_dataset}") if args.name_dataset == 'omniglot': from torchmeta_local.datasets.helpers import omniglot as data_cls elif args.name_dataset == 'omniglot_norm': from torchmeta_local.datasets.helpers import omniglot_norm as data_cls elif args.name_dataset == 'miniimagenet': from torchmeta_local.datasets.helpers import miniimagenet as data_cls elif args.name_dataset == 'miniimagenet_norm': # mean/std normalized from torchmeta_local.datasets.helpers import ( miniimagenet_norm as data_cls) elif args.name_dataset == 'omniglot_rgb84x84': from torchmeta_local.datasets.helpers import omniglot_rgb84x84 as data_cls elif args.name_dataset == 'omniglot_rgb84x84_norm': # mean/std normalized from torchmeta_local.datasets.helpers import ( omniglot_rgb84x84_norm as data_cls) elif args.name_dataset == 'fc100': from torchmeta_local.datasets.helpers import fc100 as data_cls elif args.name_dataset == 'fc100_norm': from torchmeta_local.datasets.helpers import fc100_norm as data_cls else: assert False, f'Unknown dataset: {args.name_dataset}' dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_train=True, download=True, shuffle=True, seed=seed) dataloader = BatchMetaDataLoader( dataset, batch_size=batch_size, num_workers=args.num_worker, pin_memory=True) val_dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_val=True, shuffle=shuffled_eval, seed=valid_seed) # this does not completely fix the valid set as the order of example is still # randomized. if args.fixed_valid: # https://github.com/tristandeleu/pytorch-meta/issues/132 valid_class_size = len(val_dataset.dataset) # num classes in valid # `dataset` here is torchmeta ClassDataset import itertools from torch.utils.data import Subset cls_indices = np.array(range(valid_class_size)) all_indices = [] for subset in itertools.combinations(cls_indices, args.n_way): all_indices.append(subset) val_total_size = args.valid_size * batch_size val_indices = random.sample(all_indices, val_total_size) val_dataset = Subset(val_dataset, val_indices) val_dataloader = BatchMetaDataLoader( val_dataset, batch_size=batch_size, num_workers=args.num_worker, pin_memory=True) test_dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_test=True, download=True, shuffle=shuffled_eval, seed=test_seed) if args.fixed_test: # https://github.com/tristandeleu/pytorch-meta/issues/132 test_class_size = len(test_dataset.dataset) # num classes in valid # `dataset` here is torchmeta ClassDataset import itertools from torch.utils.data import Subset cls_indices = np.array(range(test_class_size)) all_indices = [] for subset in itertools.combinations(cls_indices, args.n_way): all_indices.append(subset) test_total_size = args.test_size * batch_size test_indices = random.sample(all_indices, test_total_size) test_dataset = Subset(test_dataset, test_indices) test_dataloader = BatchMetaDataLoader( test_dataset, batch_size=batch_size, num_workers=args.num_worker, pin_memory=True) device = 'cuda' # setting model hidden_size = args.hidden_size num_classes = args.n_way num_layer = args.num_layer n_head = args.n_head dim_head = hidden_size // n_head dim_ff = hidden_size * args.ff_factor dropout_rate = args.dropout vision_dropout = args.vision_dropout # is_imagenet = args.name_dataset != 'omniglot' is_imagenet = args.name_dataset not in ['omniglot', 'omniglot_norm'] is_fc100 = False if args.name_dataset in ['fc100', 'fc100_norm']: is_fc100 = True is_imagenet = False if model_name == 'lstm': # conv lstm loginf("Model: LSTM") model = ConvLSTMModel(hidden_size, num_classes, num_layer=num_layer, vision_dropout=vision_dropout, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'deltanet': loginf("Model: DeltaNet") model = ConvDeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'srwm': loginf("Model: Self-Referential learning") model = ConvSRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'res12_lstm': loginf("Model: Resnet12 + LSTM") model = Res12LSTMModel(hidden_size=hidden_size, num_layers=num_layer, dropout=dropout_rate, vision_dropout=vision_dropout, use_big=args.use_big_res12, input_dropout=args.input_dropout, dropout_type=args.dropout_type, num_classes=num_classes) elif model_name == 'res12_deltanet': # assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + Deltanet") model = Res12DeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, vision_dropout=vision_dropout, use_big=args.use_big_res12, input_dropout=args.input_dropout, dropout_type=args.dropout_type, num_classes=num_classes) elif model_name == 'res12_srwm': # assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + SRWM") model = Res12SRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_big=args.use_big_res12, use_ln=True, beta_init=args.srwm_beta_init, input_dropout=args.input_dropout, dropout_type=args.dropout_type, use_input_softmax=args.use_input_softmax) loginf(f"Number of trainable params: {model.num_params()}") loginf(f"{model}") model = model.to(device) # Set optimiser learning_rate = args.learning_rate clip = args.clip loginf(f"Learning rate: {learning_rate}") loginf(f"clip at: {clip}") loginf(f"Batch size: {args.batch_size}") loginf(f"Gradient accumulation for {args.grad_cummulate} steps.") loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.995), eps=1e-9) loginf(f"{optimizer}") if args.use_warmup: loginf("Using Warmup. Ignoring `learning_rate`.") optimizer = WarmupWrapper(args.hidden_size, args.warmup_steps, optimizer) model.reset_grad() ############ best_model_path = os.path.join(args.work_dir, 'best_model.pt') lastest_model_path = os.path.join(args.work_dir, 'lastest_model.pt') loginf(f"[{datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] Start training") start_time = time.time() interval_start_time = time.time() train_timer = time.time() last_batch_logged = 0 best_total_val_acc = 0.0 best_valid_test_acc = 0.0 best_test_acc = 0.0 # only for monitoring. should not be reported. num_seq = 0 running_loss = 0.0 running_total = 0 running_correct = 0 run_step = 0 offset_step = 0 end_training = False cur_train_acc = 0 for ep in range(args.total_epoch): loginf(f'epoch {ep} ====================') for i, batch in enumerate(dataloader): model.train() state = None train_inputs, train_targets = batch['train'] train_inputs = train_inputs.to(device=device) # (B, len, 1, 28, 28) train_targets = train_targets.to(device=device) # (B, len) # shuffle and reshape train_shape = train_inputs.shape bsz, slen = train_shape[0], train_shape[1] num_seq += bsz train_inputs = train_inputs.transpose(0, 1) # (len, B, 28 * 28) train_targets = train_targets.transpose(0, 1) # (len, B) # same for test (i.e. query example) test_inputs, test_targets = batch['test'] test_inputs = test_inputs.to(device=device) # (B, test_len, 28 * 28) test_targets = test_targets.to(device=device) test_inputs = test_inputs.transpose(0, 1) # (test_len, B, 28 * 28) test_targets = test_targets.transpose(0, 1) # (test_len, B) # take only the fist element (randomized already) test_inputs = test_inputs[0].unsqueeze(0) test_targets = test_targets[0].unsqueeze(0) net_input = torch.cat([train_inputs, test_inputs], dim=0) target_labels = torch.cat([train_targets, test_targets], dim=0) target_labels_shape = target_labels.shape assert target_labels_shape[0] == slen + 1 assert target_labels_shape[1] == bsz sync_labels = target_labels[:-1] # does not matter which label to feed for the last position as long # as it's consistently the same one dummy_last_token = torch.zeros_like(sync_labels[0].unsqueeze(0)) label_feedback = torch.cat([sync_labels, dummy_last_token], dim=0) outputs, _ = model(net_input, label_feedback) outputs = outputs[-1] outputs = outputs.reshape(-1, num_classes) target_labels = target_labels[-1].reshape(-1) loss = loss_fn(outputs, target_labels) loss.backward() if i % args.grad_cummulate == 0: if clip > 0: torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() model.reset_grad() # global loss running_loss += loss.item() running_total += target_labels.size(0) model.eval() with torch.no_grad(): _, predicted = outputs.max(-1) bool_correct_pred = (predicted == target_labels) running_correct += bool_correct_pred.sum().item() run_step += 1 if i % args.report_every == 0: cur_train_acc = 100 * running_correct / running_total if use_wandb: wandb.log({ "train_loss": running_loss / run_step, "running_acc": 100 * running_correct / running_total, }) train_elapsed = time.time() - train_timer train_timer = time.time() num_images_per_sec = ( (i + 1 - last_batch_logged) * batch_size * (slen + 1) // train_elapsed) last_batch_logged = i loginf( f'steps: {i + offset_step}, num_seq: {num_seq}, ' f'train_loss: {running_loss / run_step :.3f}, ' f'running_acc: {100 * running_correct / running_total:.2f} % ' f'(elapsed {int(train_elapsed)}s, {int(num_images_per_sec)} ' 'images/s)') running_loss = 0.0 running_total = 0 running_correct = 0 run_step = 0 if i % args.validate_every == 0: # run validation model.eval() with torch.no_grad(): v_total = eval_model_label_sync( model, val_dataloader, num_steps=args.valid_size) test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) loginf( f"[val {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'val total {100 * v_total :.2f} %, ') loginf(f'test acc {100 * test_total :.2f} % ') # debugging if use_wandb: wandb.log({ "val_acc": 100 * v_total, "test_acc": 100 * test_total, # debugging }) if v_total > best_total_val_acc: best_total_val_acc = v_total best_step = i + offset_step # Save the best model loginf("The best model so far.") torch.save({'epoch': best_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'valid_acc': v_total}, best_model_path) loginf("Saved.") if test_total > best_valid_test_acc: best_valid_test_acc = test_total if test_total > best_test_acc: best_test_acc = test_total loginf( f'current best valid_acc {100 * best_total_val_acc :.2f} ' f'%\ncurrent best valid test_acc ' f'{100 * best_valid_test_acc :.2f} %\n' f'(current best test_acc {100 * best_test_acc :.2f} %)') # Save the latest model torch.save({'train_step': i + offset_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'valid_total_acc': v_total}, lastest_model_path) elapsed = time.time() - interval_start_time loginf(f"Elapsed {elapsed / 60.:.2f} min since last valid.") interval_start_time = time.time() train_timer = time.time() if cur_train_acc > args.train_acc_stop: loginf(f'reached {args.train_acc_stop:.1f} % train accuracy') end_training = True break if i + offset_step > args.total_train_steps: end_training = True loginf(f'reached {args.total_train_steps} steps') break if end_training: break offset_step += i elapsed = time.time() - start_time loginf(f"Finished {i} steps in {elapsed / 60.:.2f} min.") loginf(f"Best validation acc: {100 * best_total_val_acc:.2f} % " f"at step {best_step}") # load the best model and evaluate on the test set del dataloader, dataset, val_dataloader, val_dataset checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() with torch.no_grad(): test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) loginf( f"[test {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'test total {100 * test_total :.2f} %') # eval latest checkpoint = torch.load(lastest_model_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() with torch.no_grad(): test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) loginf( f"[test latest {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'test total {100 * test_total :.2f} %')
25,438
39.7024
79
py
modern-srwm
modern-srwm-main/supervised_learning/utils_few_shot.py
# Implement evaluation functions import torch # eval function for sync-label case def eval_model_label_sync(model, eval_dataloader, num_steps, device='cuda'): val_running_correct = 0 val_running_total = 0 for val_batch_id, val_batch in enumerate(eval_dataloader): val_inputs, val_targets = val_batch['train'] val_inputs = val_inputs.to(device=device) # (B, len, **) val_targets = val_targets.to(device=device) # (B, len) val_bsz, _ = val_targets.shape val_inputs = val_inputs.transpose(0, 1) val_targets = val_targets.transpose(0, 1) # 'test' part val_test_inputs, val_test_targets = val_batch['test'] val_test_inputs = val_test_inputs.to(device=device) # (B, len, **) val_test_targets = val_test_targets.to(device=device) # (B, len) val_test_inputs = val_test_inputs.transpose(0, 1) val_test_targets = val_test_targets.transpose(0, 1) # take just one element val_test_inputs = val_test_inputs[0].unsqueeze(0) val_test_targets = val_test_targets[0].unsqueeze(0) val_net_input = torch.cat([val_inputs, val_test_inputs], dim=0) val_target_labels = torch.cat([val_targets, val_test_targets], dim=0) with torch.no_grad(): sync_labels = val_target_labels[:-1] dummy_last_token = torch.zeros_like(sync_labels[0].unsqueeze(0)) label_feedback = torch.cat([sync_labels, dummy_last_token], dim=0) outputs, _ = model(val_net_input, label_feedback) outputs = outputs[-1] _, predicted = outputs.max(-1) bool_correct_pred = (predicted == val_target_labels[-1]) val_running_correct += bool_correct_pred.sum().item() val_running_total += val_bsz if val_batch_id > num_steps: break running_correct = val_running_correct / val_running_total return running_correct # eval function for the delayed label case # compute per-shot average accuracies. # hard coded for two tasks def eval_model_delayed_label_multi_sequential( model, eval_dataloader0, eval_dataloader1, num_steps, n_way, k_shot, device='cuda', state=None): running_correct = 0 running_total = 0 task_running_correct = {0: 0., 1: 0.} counts = 0 acc_per_shot = {0: [], 1: []} cnt_per_shot = {0: [], 1: []} for key in acc_per_shot.keys(): for _ in range(k_shot): acc_per_shot[key].append(0) cnt_per_shot[key].append(0) for batch_id, (batch0, batch1) in enumerate(zip(eval_dataloader0, eval_dataloader1)): val_inputs0, val_targets0 = batch0['train'] val_inputs1, val_targets1 = batch1['train'] del batch0['test'], batch1['test'] val_inputs0 = val_inputs0.to(device=device) # (B, len, **) val_targets0 = val_targets0.to(device=device) # (B, len) val_bsz0, val_len0 = val_targets0.shape val_inputs1 = val_inputs1.to(device=device) # (B, len, **) val_targets1 = val_targets1.to(device=device) # (B, len) val_bsz1, val_len1 = val_targets1.shape val_inputs0 = val_inputs0.transpose(0, 1) val_targets0 = val_targets0.transpose(0, 1) val_inputs1 = val_inputs1.transpose(0, 1) val_targets1 = val_targets1.transpose(0, 1) # no trimming needed for eval. # contenate along time dimension, alternate order if batch_id % 2 == 0: # ID 0 first net_input = torch.cat([val_inputs0, val_inputs1], dim=0) target_labels = torch.cat([val_targets0, val_targets1], dim=0) else: # miniimagenet first net_input = torch.cat([val_inputs1, val_inputs0], dim=0) target_labels = torch.cat([val_targets1, val_targets0], dim=0) slen, bsz = target_labels.shape delayed_labels = target_labels[:-1] dummy_last_token = torch.zeros_like(delayed_labels[0].unsqueeze(0)) label_feedback = torch.cat([dummy_last_token, delayed_labels], dim=0) outputs, _ = model(net_input, label_feedback, state) _, predicted = outputs.max(-1) bool_correct_pred = (predicted == target_labels) running_correct += bool_correct_pred.sum().item() running_total += slen * bsz if batch_id % 2 == 0: # ID 0 first bool_correct_pred0 = bool_correct_pred[:val_len0] bool_correct_pred1 = bool_correct_pred[val_len0:] else: bool_correct_pred1 = bool_correct_pred[:val_len1] bool_correct_pred0 = bool_correct_pred[val_len1:] task_running_correct[0] += bool_correct_pred0.sum().item() task_running_correct[1] += bool_correct_pred1.sum().item() assert val_bsz0 == val_bsz1 assert val_len0 == val_len1 counts += val_bsz0 * val_len0 # same size val_targets0 = val_targets0.transpose(0, 1) val_targets1 = val_targets1.transpose(0, 1) bool_correct_pred0 = bool_correct_pred0.transpose(0, 1) bool_correct_pred1 = bool_correct_pred1.transpose(0, 1) for b in range(bsz): # task 0 prev_cl_end = 0 _, cnts_uniq = torch.unique( val_targets0[b], sorted=True, return_counts=True) _, indices = torch.sort(val_targets0[b], stable=True) for cl in range(n_way): cl_cnts = cnts_uniq[cl] cl_indices = indices[prev_cl_end:prev_cl_end + cl_cnts] cl_indices_len = len(cl_indices) prev_cl_end += cl_cnts for shot in range(k_shot): if cl_indices_len > shot: acc_per_shot[0][shot] += ( bool_correct_pred0[b][cl_indices[shot]].item()) cnt_per_shot[0][shot] += 1 # task 1 prev_cl_end = 0 _, cnts_uniq = torch.unique( val_targets1[b], sorted=True, return_counts=True) _, indices = torch.sort(val_targets1[b], stable=True) for cl in range(n_way): cl_cnts = cnts_uniq[cl] cl_indices = indices[prev_cl_end:prev_cl_end + cl_cnts] cl_indices_len = len(cl_indices) prev_cl_end += cl_cnts for shot in range(k_shot): if cl_indices_len > shot: acc_per_shot[1][shot] += ( bool_correct_pred1[b][cl_indices[shot]].item()) cnt_per_shot[1][shot] += 1 if batch_id > num_steps: break running_correct = 100 * running_correct / running_total task_running_correct[0] = 100 * task_running_correct[0] / counts task_running_correct[1] = 100 * task_running_correct[1] / counts for key in acc_per_shot.keys(): for shot in range(k_shot): shot_acc = ( 100 * acc_per_shot[key][shot] / cnt_per_shot[key][shot] ) acc_per_shot[key][shot] = shot_acc return running_correct, task_running_correct, acc_per_shot # eval function for the delayed label case # compute per-shot & per-position average accuracies # hard coded for two tasks def eval_per_pos_model_delayed_label_multi_sequential( model, eval_dataloader0, eval_dataloader1, num_steps, n_way, k_shot, device='cuda', state=None, omniglot_first=True): running_correct = 0 running_total = 0 task_running_correct = {0: 0., 1: 0.} counts = 0 acc_per_shot = {0: [], 1: []} # per positions in this case cnt_per_shot = {0: [], 1: []} for key in acc_per_shot.keys(): for _ in range(k_shot): acc_per_shot[key].append(0) cnt_per_shot[key].append(0) acc_per_pos = [] # per positions in this case cnt_per_pos = 0 for _ in range(k_shot * n_way * 2): acc_per_pos.append(0) for batch_id, (batch0, batch1) in enumerate(zip(eval_dataloader0, eval_dataloader1)): val_inputs0, val_targets0 = batch0['train'] val_inputs1, val_targets1 = batch1['train'] del batch0['test'], batch1['test'] val_inputs0 = val_inputs0.to(device=device) # (B, len, **) val_targets0 = val_targets0.to(device=device) # (B, len) val_bsz0, val_len0 = val_targets0.shape val_inputs1 = val_inputs1.to(device=device) # (B, len, **) val_targets1 = val_targets1.to(device=device) # (B, len) val_bsz1, val_len1 = val_targets1.shape val_inputs0 = val_inputs0.transpose(0, 1) val_targets0 = val_targets0.transpose(0, 1) val_inputs1 = val_inputs1.transpose(0, 1) val_targets1 = val_targets1.transpose(0, 1) # no trimming needed for eval. # contenate along time dimension, alternate order if omniglot_first: # ID 0 first net_input = torch.cat([val_inputs0, val_inputs1], dim=0) target_labels = torch.cat([val_targets0, val_targets1], dim=0) else: # miniimagenet first net_input = torch.cat([val_inputs1, val_inputs0], dim=0) target_labels = torch.cat([val_targets1, val_targets0], dim=0) slen, bsz = target_labels.shape delayed_labels = target_labels[:-1] dummy_last_token = torch.zeros_like(delayed_labels[0].unsqueeze(0)) label_feedback = torch.cat([dummy_last_token, delayed_labels], dim=0) outputs, _ = model(net_input, label_feedback, state) _, predicted = outputs.max(-1) bool_correct_pred = (predicted == target_labels) running_correct += bool_correct_pred.sum().item() running_total += slen * bsz # per position stats: assert slen == k_shot * n_way * 2 for pos in range(k_shot * n_way * 2): acc_per_pos[pos] += bool_correct_pred[pos].sum().item() cnt_per_pos += bsz if omniglot_first: # ID 0 first bool_correct_pred0 = bool_correct_pred[:val_len0] bool_correct_pred1 = bool_correct_pred[val_len0:] else: bool_correct_pred1 = bool_correct_pred[:val_len1] bool_correct_pred0 = bool_correct_pred[val_len1:] task_running_correct[0] += bool_correct_pred0.sum().item() task_running_correct[1] += bool_correct_pred1.sum().item() assert val_bsz0 == val_bsz1 assert val_len0 == val_len1 counts += val_bsz0 * val_len0 # same size val_targets0 = val_targets0.transpose(0, 1) val_targets1 = val_targets1.transpose(0, 1) bool_correct_pred0 = bool_correct_pred0.transpose(0, 1) bool_correct_pred1 = bool_correct_pred1.transpose(0, 1) for b in range(bsz): # task 0 prev_cl_end = 0 _, cnts_uniq = torch.unique( val_targets0[b], sorted=True, return_counts=True) _, indices = torch.sort(val_targets0[b], stable=True) for cl in range(n_way): cl_cnts = cnts_uniq[cl] cl_indices = indices[prev_cl_end:prev_cl_end + cl_cnts] cl_indices_len = len(cl_indices) prev_cl_end += cl_cnts for shot in range(k_shot): if cl_indices_len > shot: acc_per_shot[0][shot] += ( bool_correct_pred0[b][cl_indices[shot]].item()) cnt_per_shot[0][shot] += 1 # task 1 prev_cl_end = 0 _, cnts_uniq = torch.unique( val_targets1[b], sorted=True, return_counts=True) _, indices = torch.sort(val_targets1[b], stable=True) for cl in range(n_way): cl_cnts = cnts_uniq[cl] cl_indices = indices[prev_cl_end:prev_cl_end + cl_cnts] cl_indices_len = len(cl_indices) prev_cl_end += cl_cnts for shot in range(k_shot): if cl_indices_len > shot: acc_per_shot[1][shot] += ( bool_correct_pred1[b][cl_indices[shot]].item()) cnt_per_shot[1][shot] += 1 if batch_id > num_steps: break running_correct = 100 * running_correct / running_total task_running_correct[0] = 100 * task_running_correct[0] / counts task_running_correct[1] = 100 * task_running_correct[1] / counts for key in acc_per_shot.keys(): for shot in range(k_shot): shot_acc = ( 100 * acc_per_shot[key][shot] / cnt_per_shot[key][shot] ) acc_per_shot[key][shot] = shot_acc # per position: for pos in range(k_shot * n_way * 2): acc_per_pos[pos] = 100 * acc_per_pos[pos] / cnt_per_pos return running_correct, task_running_correct, acc_per_shot, acc_per_pos
12,900
37.510448
89
py
modern-srwm
modern-srwm-main/supervised_learning/resnet_impl.py
# File copied from https://github.com/yinboc/few-shot-meta-baseline/blob/master/models/resnet12.py # Used with minor modifications. # ============================================================================= # MIT License # # Copyright (c) 2020 Yinbo Chen # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import torch.nn as nn def conv3x3(in_planes, out_planes): return nn.Conv2d(in_planes, out_planes, 3, padding=1, bias=False) def conv1x1(in_planes, out_planes): return nn.Conv2d(in_planes, out_planes, 1, bias=False) def norm_layer(planes): return nn.BatchNorm2d(planes) class Block(nn.Module): def __init__(self, inplanes, planes, downsample, dropout=0.0): super().__init__() self.relu = nn.LeakyReLU(0.1) self.conv1 = conv3x3(inplanes, planes) self.bn1 = norm_layer(planes) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.conv3 = conv3x3(planes, planes) self.bn3 = norm_layer(planes) self.dropout = nn.Dropout(dropout) self.downsample = downsample self.maxpool = nn.MaxPool2d(2) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) identity = self.downsample(x) out += identity out = self.relu(out) out = self.maxpool(out) out = self.dropout(out) return out class DropInBlock(nn.Module): def __init__(self, inplanes, planes, downsample, dropout=0.0): super().__init__() self.relu = nn.LeakyReLU(0.1) self.conv1 = conv3x3(inplanes, planes) self.bn1 = norm_layer(planes) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.conv3 = conv3x3(planes, planes) self.bn3 = norm_layer(planes) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.downsample = downsample self.maxpool = nn.MaxPool2d(2) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.dropout1(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.dropout2(out) out = self.conv3(out) out = self.bn3(out) identity = self.downsample(x) out += identity out = self.relu(out) out = self.maxpool(out) out = self.dropout3(out) return out class Drop2dBlock(nn.Module): def __init__(self, inplanes, planes, downsample, dropout=0.0): super().__init__() self.relu = nn.LeakyReLU(0.1) self.conv1 = conv3x3(inplanes, planes) self.bn1 = norm_layer(planes) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.conv3 = conv3x3(planes, planes) self.bn3 = norm_layer(planes) self.dropout = nn.Dropout2d(dropout) self.downsample = downsample self.maxpool = nn.MaxPool2d(2) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) identity = self.downsample(x) out += identity out = self.relu(out) out = self.maxpool(out) out = self.dropout(out) return out class Drop2dInBlock(nn.Module): def __init__(self, inplanes, planes, downsample, dropout=0.0): super().__init__() self.relu = nn.LeakyReLU(0.1) self.conv1 = conv3x3(inplanes, planes) self.bn1 = norm_layer(planes) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.conv3 = conv3x3(planes, planes) self.bn3 = norm_layer(planes) self.dropout1 = nn.Dropout2d(dropout) self.dropout2 = nn.Dropout2d(dropout) self.dropout3 = nn.Dropout2d(dropout) self.downsample = downsample self.maxpool = nn.MaxPool2d(2) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.dropout1(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.dropout2(out) out = self.conv3(out) out = self.bn3(out) identity = self.downsample(x) out += identity out = self.relu(out) out = self.maxpool(out) out = self.dropout3(out) return out class ResNet12(nn.Module): def __init__(self, channels, dropout, dropout_type='base'): super().__init__() self.inplanes = 3 self.layer1 = self._make_layer(channels[0], dropout, dropout_type) self.layer2 = self._make_layer(channels[1], dropout, dropout_type) self.layer3 = self._make_layer(channels[2], dropout, dropout_type) self.layer4 = self._make_layer(channels[3], dropout, dropout_type) self.out_dim = channels[3] for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, planes, dropout, dropout_type='base'): downsample = nn.Sequential( conv1x1(self.inplanes, planes), norm_layer(planes), ) if dropout_type == 'base': block = Block(self.inplanes, planes, downsample, dropout) elif dropout_type == 'inblock': block = DropInBlock(self.inplanes, planes, downsample, dropout) elif dropout_type == '2d': block = Drop2dBlock(self.inplanes, planes, downsample, dropout) elif dropout_type == '2d_inblock': block = Drop2dInBlock(self.inplanes, planes, downsample, dropout) else: assert False, f"Unknown dropout_type: {dropout_type}" self.inplanes = planes return block def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = x.view(x.shape[0], x.shape[1], -1).mean(dim=2) return x def resnet12(dropout=0.0): # output feat dim 512 return ResNet12([64, 128, 256, 512], dropout) def resnet12_base(dropout=0.0, use_big=False, dropout_type='base'): if use_big: return ResNet12([64, 128, 256, 512], dropout, dropout_type) else: # output feat dim 256 return ResNet12([64, 96, 128, 256], dropout, dropout_type)
8,026
26.968641
98
py
modern-srwm
modern-srwm-main/supervised_learning/warmup_lr.py
# taken from https://nlp.seas.harvard.edu/2018/04/03/attention.html#optimizer class WarmupWrapper: "Optim wrapper that implements rate." def __init__(self, hidden_dim, warmup, optimizer): self.optimizer = optimizer self._step = 0 self.warmup = warmup self.hidden_dim = hidden_dim self._rate = 0 def state_dict(self): """Returns the state of the warmup scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. """ return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict): """Loads the warmup scheduler's state. Arguments: state_dict (dict): warmup scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ self.__dict__.update(state_dict) def step(self): "Update parameters and rate" self._step += 1 rate = self.rate() for p in self.optimizer.param_groups: p['lr'] = rate self._rate = rate self.optimizer.step() def rate(self, step = None): "Implement `lrate` above" if step is None: step = self._step return (self.hidden_dim ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))
1,414
31.906977
89
py
modern-srwm
modern-srwm-main/supervised_learning/eval_sync.py
# main file to be executed to evaluate models for few shot learning in the # synchrous-label setting import os import sys import json import time from datetime import datetime import argparse import logging import numpy as np import random import torch from torchmeta_local.utils.data import BatchMetaDataLoader from model_few_shot import ( ConvLSTMModel, ConvDeltaModel, ConvSRWMModel, Res12LSTMModel, Res12DeltaModel, Res12SRWMModel, StatefulConvSRWMModel) from utils_few_shot import eval_model_label_sync parser = argparse.ArgumentParser( description='N-way K-shot learning based on label synchronous ' 'seq-processing NNs with only predicting (N*K+1)th image.') parser.add_argument('--data_dir', type=str, default='./data', help='location of the data corpus') parser.add_argument('--name_dataset', type=str, default='omniglot', choices=['omniglot', 'miniimagenet', 'omniglot_rgb84x84', 'omniglot_rgb84x84_norm', 'omniglot_norm', 'miniimagenet_norm', 'tieredimagenet', 'fc100', 'fc100_norm']) parser.add_argument('--num_worker', default=12, type=int, help='for dataloader.') parser.add_argument('--work_dir', default='save_models', type=str, help='where to save model ckpt.') parser.add_argument('--load_from', default='save_models/aaa/', type=str, help='dir from where to load model ckpt.') parser.add_argument('--load_from_checkpoint', default=None, type=str, help='path from where to load model ckpt.') parser.add_argument('--model_type', type=str, default='lstm', choices=['lstm', 'deltanet', 'srwm', 'res12_lstm', 'res12_deltanet', 'res12_srwm', 'stateful_srwm'], help='model architecture') parser.add_argument('--seed', default=1, type=int, help='Seed.') parser.add_argument('--valid_seed', default=0, type=int, help='Seed.') parser.add_argument('--test_seed', default=0, type=int, help='Seed.') parser.add_argument('--disable_eval_shuffling', action='store_true', help='disable shuffling of valid/test sets. Only useful ' 'to reproduce old/buggy behavior.') parser.add_argument('--fixed_test', action='store_true', help='use fixed test set.') parser.add_argument('--eval_on_valid', action='store_true', help='use fixed test set.') # model hyper-parameters: parser.add_argument('--num_layer', default=1, type=int, help='number of layers. for both LSTM and Trafo.') parser.add_argument('--hidden_size', default=512, type=int, help='hidden size. for both LSTM and Trafo.') parser.add_argument('--n_head', default=8, type=int, help='Transformer number of heads.') parser.add_argument('--ff_factor', default=4, type=int, help='Transformer ff dim to hidden dim ratio.') parser.add_argument('--dropout', default=0.0, type=float, help='dropout rate.') parser.add_argument('--vision_dropout', default=0.0, type=float, help='dropout rate in the vision feat extractor.') parser.add_argument('--srwm_beta_init', default=0.0, type=float, help='beta bias for srwm.') parser.add_argument('--use_input_softmax', action='store_true', help='input softmax for srwm.') # few shot learning setting parser.add_argument('--n_way', default=5, type=int, help='number of possible classes per train/test episode.') parser.add_argument('--k_shot', default=1, type=int, help='number of examples in the `train` part of torchmeta') parser.add_argument('--test_per_class', default=1, type=int, help='param for torchmeta') # training hyper-parameters: parser.add_argument('--total_train_steps', default=100000, type=int, help='Number of training steps to train on') parser.add_argument('--valid_size', default=100, type=int, help='Number of valid batches to validate on') parser.add_argument('--test_size', default=100, type=int, help='Number of test batches to test on') parser.add_argument('--num_test', default=1, type=int, help='Number of times we run test on random test set') parser.add_argument('--batch_size', default=16, type=int, help='batch size.') parser.add_argument('--learning_rate', default=1e-3, type=float, help='batch size.') parser.add_argument('--use_warmup', action='store_true', help='use warmup scheduling.') parser.add_argument('--warmup_steps', default=5000, type=int) parser.add_argument('--grad_cummulate', default=1, type=int, help='number of gradient accumulation steps.') parser.add_argument('--report_every', default=100, type=int, help='Report log every this steps (not used).') parser.add_argument('--validate_every', default=1000, type=int, help='Report log every this steps (not used).') parser.add_argument('--clip', default=0.0, type=float, help='global norm clipping threshold.') # for wandb parser.add_argument('--project_name', type=str, default=None, help='project name for wandb.') parser.add_argument('--job_name', type=str, default=None, help='job name for wandb.') parser.add_argument('--use_wandb', action='store_true', help='use wandb.') args = parser.parse_args() model_name = args.model_type # Set work directory args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S')) if not os.path.exists(args.work_dir): os.makedirs(args.work_dir) work_dir_key = '/'.join(os.path.abspath(args.work_dir).split('/')[-3:]) # logging log_file_name = f"{args.work_dir}/log.txt" handlers = [logging.FileHandler(log_file_name), logging.StreamHandler()] logging.basicConfig( level=logging.INFO, format='%(message)s', handlers=handlers) loginf = logging.info loginf(f"torch version: {torch.__version__}") loginf(f"Work dir: {args.work_dir}") # end wandb # save args loginf(f"Command executed: {sys.argv[:]}") loginf(f"Args: {json.dumps(args.__dict__, indent=2)}") with open(f'{args.work_dir}/args.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) # set seed loginf(f"Seed: {args.seed}") seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) valid_seed = args.valid_seed test_seed = args.test_seed loginf(f"Valid seed: {valid_seed}, Test seed: {test_seed}") shuffled_eval = not args.disable_eval_shuffling if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True # set dataset batch_size = args.batch_size n_way = args.n_way k_shot_train = args.k_shot test_per_class = args.test_per_class loginf(f"Dataset/Task: {args.name_dataset}") if args.name_dataset == 'omniglot': from torchmeta_local.datasets.helpers import omniglot as data_cls elif args.name_dataset == 'omniglot_norm': from torchmeta_local.datasets.helpers import omniglot_norm as data_cls elif args.name_dataset == 'miniimagenet': from torchmeta_local.datasets.helpers import miniimagenet as data_cls elif args.name_dataset == 'tieredimagenet': from torchmeta_local.datasets.helpers import tieredimagenet as data_cls elif args.name_dataset == 'miniimagenet_norm': # mean/std normalized from torchmeta_local.datasets.helpers import ( miniimagenet_norm as data_cls) elif args.name_dataset == 'omniglot_rgb84x84': from torchmeta_local.datasets.helpers import omniglot_rgb84x84 as data_cls elif args.name_dataset == 'omniglot_rgb84x84_norm': # mean/std normalized from torchmeta_local.datasets.helpers import ( omniglot_rgb84x84_norm as data_cls) elif args.name_dataset == 'fc100': from torchmeta_local.datasets.helpers import fc100 as data_cls elif args.name_dataset == 'fc100_norm': from torchmeta_local.datasets.helpers import fc100_norm as data_cls else: assert False, f'Unknown dataset: {args.name_dataset}' # load test set if args.eval_on_valid: test_dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_val=True, download=True, shuffle=shuffled_eval, seed=test_seed) else: test_dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_test=True, download=True, shuffle=shuffled_eval, seed=test_seed) if args.fixed_test: # https://github.com/tristandeleu/pytorch-meta/issues/132 test_class_size = len(test_dataset.dataset) # num classes in valid # `dataset` here is torchmeta ClassDataset import itertools from torch.utils.data import Subset cls_indices = np.array(range(test_class_size)) all_indices = [] for subset in itertools.combinations(cls_indices, args.n_way): all_indices.append(subset) test_total_size = args.test_size * batch_size test_indices = random.sample(all_indices, test_total_size) test_dataset = Subset(test_dataset, test_indices) test_dataloader = BatchMetaDataLoader( test_dataset, batch_size=batch_size, num_workers=args.num_worker, pin_memory=True) device = 'cuda' # setting model hidden_size = args.hidden_size num_classes = args.n_way num_layer = args.num_layer n_head = args.n_head dim_head = hidden_size // n_head dim_ff = hidden_size * args.ff_factor dropout_rate = args.dropout vision_dropout = args.vision_dropout # is_imagenet = args.name_dataset != 'omniglot' is_imagenet = args.name_dataset not in ['omniglot', 'omniglot_norm'] is_fc100 = False if args.name_dataset in ['fc100', 'fc100_norm']: is_fc100 = True is_imagenet = False if model_name == 'lstm': # conv lstm loginf("Model: LSTM") model = ConvLSTMModel(hidden_size, num_classes, num_layer=num_layer, vision_dropout=vision_dropout, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'deltanet': loginf("Model: DeltaNet") model = ConvDeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'srwm': loginf("Model: Self-Referential learning") model = ConvSRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'stateful_srwm': loginf("Model: Self-Referential learning") model = StatefulConvSRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'res12_lstm': loginf("Model: Resnet12 + LSTM") model = Res12LSTMModel(hidden_size=hidden_size, num_layers=num_layer, dropout=dropout_rate, vision_dropout=vision_dropout, num_classes=num_classes, imagenet=is_imagenet) elif model_name == 'res12_deltanet': # assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + Deltanet") model = Res12DeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, vision_dropout=vision_dropout, num_classes=num_classes, imagenet=is_imagenet) elif model_name == 'res12_srwm': # assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + SRWM") model = Res12SRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet) loginf(f"Number of trainable params: {model.num_params()}") loginf(f"{model}") model = model.to(device) # Set optimiser learning_rate = args.learning_rate clip = args.clip ############ best_model_path = os.path.join(args.load_from, 'best_model.pt') lastest_model_path = os.path.join(args.load_from, 'lastest_model.pt') loginf(f"[{datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] Start Eval") # load_from_checkpoint overwrite load_from if args.load_from_checkpoint is not None: best_model_path = args.load_from_checkpoint checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() results = [] num_test = args.num_test test_size = args.test_size for i in range(num_test): with torch.no_grad(): test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) test_total = 100 * test_total loginf( f"[test {i} {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'test total {test_total :.2f} %') results.append(test_total) mean = np.mean(results) std = np.std(results) loginf( f'[{num_test} tests using {batch_size * test_size} samples each] ' f'mean: {mean:.2f}, std: {std:.2f}, 95%-CI {1.96 * std / num_test:.2f}')
14,554
40.467236
80
py
modern-srwm
modern-srwm-main/supervised_learning/main_few_shot_delayed_multi_sequential.py
# main file to be executed to train models in sequential multi-task few shot # learning import os import sys import json import time from datetime import datetime import argparse import logging import numpy as np import random import torch import torch.nn as nn from torchmeta_local.utils.data import BatchMetaDataLoader from torchmeta_local.datasets.helpers import omniglot_rgb84x84_norm from torchmeta_local.datasets.helpers import miniimagenet_norm from model_few_shot import ( ConvLSTMModel, ConvDeltaModel, ConvSRWMModel, Res12LSTMModel, Res12DeltaModel, Res12SRWMModel) from utils_few_shot import eval_model_delayed_label_multi_sequential parser = argparse.ArgumentParser( description='Sequential multi-task adaptation.') parser.add_argument('--data_dir', type=str, default='./data', help='location of the data corpus') parser.add_argument('--name_dataset', type=str, default='miniimagenet_norm', choices=['miniimagenet_norm']) parser.add_argument('--num_worker', default=12, type=int, help='for dataloader.') parser.add_argument('--work_dir', default='save_models', type=str, help='where to save model ckpt.') parser.add_argument('--init_model_from', default=None, type=str, help='e.g. save_models/aaa/best_model.pt.') parser.add_argument('--model_type', type=str, default='lstm', choices=['lstm', 'deltanet', 'srwm', 'res12_lstm', 'res12_deltanet', 'res12_srwm'], help='model architecture') parser.add_argument('--seed', default=1, type=int, help='Seed.') parser.add_argument('--valid_seed', default=0, type=int, help='Seed.') parser.add_argument('--test_seed', default=0, type=int, help='Seed.') parser.add_argument('--disable_eval_shuffling', action='store_true', help='disable shuffling of valid/test sets. Only useful ' 'to reproduce old/buggy behavior.') # model hyper-parameters: parser.add_argument('--num_layer', default=1, type=int, help='number of layers. for both LSTM and Trafo.') parser.add_argument('--hidden_size', default=512, type=int, help='hidden size. for both LSTM and Trafo.') parser.add_argument('--n_head', default=8, type=int, help='Transformer number of heads.') parser.add_argument('--ff_factor', default=4, type=int, help='Transformer ff dim to hidden dim ratio.') parser.add_argument('--dropout', default=0.0, type=float, help='dropout rate.') parser.add_argument('--vision_dropout', default=0.0, type=float, help='dropout rate in the vision feat extractor.') parser.add_argument('--srwm_beta_init', default=0.0, type=float, help='beta bias for srwm.') parser.add_argument('--use_input_softmax', action='store_true', help='input softmax for srwm.') # few shot learning setting parser.add_argument('--n_way', default=5, type=int, help='number of possible classes per train/test episode.') parser.add_argument('--k_shot', default=15, type=int, help='number of examples in the `train` part of torchmeta') parser.add_argument('--test_per_class', default=1, type=int, help='param for torchmeta; number of query examples') parser.add_argument('--max_trim', default=None, type=int, help='maximum number of positions to be removed. if None, ' 'computed based on `n_way` and `k_shot`.') # training hyper-parameters: parser.add_argument('--total_train_steps', default=100000, type=int, help='Number of training steps to train on') parser.add_argument('--valid_size', default=100, type=int, help='Number of valid batches to validate on') parser.add_argument('--test_size', default=100, type=int, help='Number of test batches to test on') parser.add_argument('--batch_size', default=16, type=int, help='batch size.') parser.add_argument('--learning_rate', default=1e-3, type=float, help='batch size.') parser.add_argument('--grad_cummulate', default=1, type=int, help='number of gradient accumulation steps.') parser.add_argument('--report_every', default=100, type=int, help='Report log every this steps (not used).') parser.add_argument('--validate_every', default=1000, type=int, help='Report log every this steps (not used).') parser.add_argument('--clip', default=0.0, type=float, help='global norm clipping threshold.') # for wandb parser.add_argument('--project_name', type=str, default=None, help='project name for wandb.') parser.add_argument('--job_name', type=str, default=None, help='job name for wandb.') parser.add_argument('--use_wandb', action='store_true', help='use wandb.') args = parser.parse_args() model_name = args.model_type # Set work directory args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S')) if not os.path.exists(args.work_dir): os.makedirs(args.work_dir) work_dir_key = '/'.join(os.path.abspath(args.work_dir).split('/')[-3:]) # logging log_file_name = f"{args.work_dir}/log.txt" handlers = [logging.FileHandler(log_file_name), logging.StreamHandler()] logging.basicConfig( level=logging.INFO, format='%(message)s', handlers=handlers) loginf = logging.info loginf(f"torch version: {torch.__version__}") loginf(f"Work dir: {args.work_dir}") # wandb settings if args.use_wandb: # configure wandb. import wandb use_wandb = True if args.project_name is None: project_name = (os.uname()[1] + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) else: project_name = args.project_name wandb.init( project=project_name, settings=wandb.Settings(start_method='fork')) # or `settings=wandb.Settings(start_method='thread')` if args.job_name is None: wandb.run.name = f"{os.uname()[1]}//" \ f"{model_name}-{args.name_dataset}//" \ f"seed{args.seed}//" \ f"{args.test_per_class}-test_per_cl/" \ f"{args.n_way}way-{args.k_shot}shot-" \ f"{args.max_trim}trim/" \ f"L{args.num_layer}/h{args.hidden_size}/" \ f"n{args.n_head}/ff{args.ff_factor}/" \ f"d{args.dropout}/vd{args.vision_dropout}/" \ f"b{args.batch_size}/" \ f"lr{args.learning_rate}/" \ f"g{args.grad_cummulate}/bias{args.srwm_beta_init}" \ f"softmax{args.use_input_softmax}" \ f"//PATH'{work_dir_key}'//" else: wandb.run.name = f"{os.uname()[1]}//{args.job_name}" config = wandb.config config.host = os.uname()[1] # host node name config.seed = args.seed config.test_per_class = args.test_per_class config.n_way = args.n_way config.k_shot = args.k_shot config.max_trim = args.max_trim config.srwm_beta_init = args.srwm_beta_init config.use_input_softmax = args.use_input_softmax config.name_dataset = args.name_dataset config.work_dir = args.work_dir config.model_type = args.model_type config.hidden_size = args.hidden_size config.n_head = args.n_head config.ff_factor = args.ff_factor config.dropout = args.dropout config.vision_dropout = args.vision_dropout config.batch_size = args.batch_size config.learning_rate = args.learning_rate config.grad_cummulate = args.grad_cummulate config.report_every = args.report_every config.disable_eval_shuffling = args.disable_eval_shuffling else: use_wandb = False # end wandb # save args loginf(f"Command executed: {sys.argv[:]}") loginf(f"Args: {json.dumps(args.__dict__, indent=2)}") with open(f'{args.work_dir}/args.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) # set seed loginf(f"Seed: {args.seed}") seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) valid_seed = args.valid_seed test_seed = args.test_seed loginf(f"Valid seed: {valid_seed}, Test seed: {test_seed}") shuffled_eval = not args.disable_eval_shuffling if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True # set dataset batch_size = args.batch_size n_way = args.n_way k_shot_train = args.k_shot test_per_class = args.test_per_class loginf(f"Dataset/Task: omniglot + miniimagenet_norm") task_id_to_name = {0: 'omniglot', 1: 'imagenet'} # Omniglot omniglot_dataset = omniglot_rgb84x84_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_train=True, download=True, shuffle=True, seed=seed) omniglot_dataloader = BatchMetaDataLoader( omniglot_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) omniglot_val_dataset = omniglot_rgb84x84_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_val=True, shuffle=shuffled_eval, seed=valid_seed) omniglot_val_dataloader = BatchMetaDataLoader( omniglot_val_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) # Mini-imagenet imagenet_dataset = miniimagenet_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_train=True, download=True, shuffle=True, seed=seed) imagenet_dataloader = BatchMetaDataLoader( imagenet_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) imagenet_val_dataset = miniimagenet_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_val=True, shuffle=shuffled_eval, seed=valid_seed) imagenet_val_dataloader = BatchMetaDataLoader( imagenet_val_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) val_dataloader = { 'omniglot': omniglot_val_dataloader, 'miniimagenet': imagenet_val_dataloader, } device = 'cuda' # setting model if args.max_trim is None: assert args.k_shot > 6, f'k_shot too small {args.k_shot}' max_trim = args.k_shot - 6 # to see at least 5 shot performance else: max_trim = args.max_trim hidden_size = args.hidden_size num_classes = args.n_way num_layer = args.num_layer n_head = args.n_head dim_head = hidden_size // n_head dim_ff = hidden_size * args.ff_factor dropout_rate = args.dropout vision_dropout = args.vision_dropout is_imagenet = args.name_dataset != 'omniglot' if model_name == 'lstm': # conv lstm loginf("Model: LSTM") model = ConvLSTMModel(hidden_size, num_classes, num_layer=num_layer, vision_dropout=vision_dropout, imagenet=is_imagenet) elif model_name == 'deltanet': loginf("Model: DeltaNet") model = ConvDeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, imagenet=is_imagenet) elif model_name == 'srwm': loginf("Model: Self-Referential learning") model = ConvSRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet) elif model_name == 'res12_lstm': loginf("Model: Resnet12 + LSTM") model = Res12LSTMModel(hidden_size=hidden_size, num_layers=num_layer, dropout=dropout_rate, vision_dropout=vision_dropout, num_classes=num_classes, imagenet=is_imagenet) elif model_name == 'res12_deltanet': assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + Deltanet") model = Res12DeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, imagenet=is_imagenet) elif model_name == 'res12_srwm': assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + SRWM") model = Res12SRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet) loginf(f"Number of trainable params: {model.num_params()}") loginf(f"{model}") model = model.to(device) # load if needed if args.init_model_from is not None: loginf(f"loading model from {args.init_model_from}") checkpoint = torch.load(args.init_model_from) model.load_state_dict(checkpoint['model_state_dict']) # Set optimiser learning_rate = args.learning_rate clip = args.clip loginf(f"Learning rate: {learning_rate}") loginf(f"clip at: {clip}") loginf(f"Batch size: {args.batch_size}") loginf(f"Gradient accumulation for {args.grad_cummulate} steps.") loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.995), eps=1e-9) model.reset_grad() ############ best_model_path = os.path.join(args.work_dir, 'best_model.pt') lastest_model_path = os.path.join(args.work_dir, 'lastest_model.pt') loginf(f"[{datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] Start training") start_time = time.time() interval_start_time = time.time() train_timer = time.time() last_batch_logged = 0 acc_per_shot = {0: [], 1: []} cnt_per_shot = {0: [], 1: []} for key in acc_per_shot.keys(): for shot in range(k_shot_train): acc_per_shot[key].append(0) cnt_per_shot[key].append(0) best_total_val_acc = 0.0 num_seq = 0 running_loss = 0.0 running_total = 0 running_correct = 0 run_step = 0 task_running_correct = { 'omniglot': 0., 'miniimagenet': 0. } counts = { 'omniglot': 0., 'miniimagenet': 0. } for i, (omni_batch, imagenet_batch) in enumerate(zip(omniglot_dataloader, imagenet_dataloader)): model.train() state = None # Omniglot om_train_inputs, om_train_targets = omni_batch['train'] im_train_inputs, im_train_targets = imagenet_batch['train'] del omni_batch['test'], imagenet_batch['test'] om_train_inputs = om_train_inputs.to(device=device) om_train_targets = om_train_targets.to(device=device) # (B, len) om_train_inputs = om_train_inputs.transpose(0, 1) # (len, B, **) om_train_targets = om_train_targets.transpose(0, 1) # (len, B) # randomly remove n last positions trim_offset = random.randint(0, max_trim) if trim_offset > 0: om_train_inputs = om_train_inputs[:-trim_offset] om_train_targets = om_train_targets[:-trim_offset] om_len, om_bsz = om_train_targets.shape num_seq += om_bsz # Imagenet im_train_inputs = im_train_inputs.to(device=device) # (B, len, **) im_train_targets = im_train_targets.to(device=device) # (B, len) im_train_inputs = im_train_inputs.transpose(0, 1) # (len, B, **) im_train_targets = im_train_targets.transpose(0, 1) # (len, B) # randomly remove n last positions trim_offset = random.randint(0, max_trim) if trim_offset > 0: im_train_inputs = im_train_inputs[:-trim_offset] im_train_targets = im_train_targets[:-trim_offset] im_len, im_bsz = im_train_targets.shape num_seq += im_bsz # contenate along time dimension, randomize order for each batch order_ = random.randint(0, 1) # 2 is inclusive! if order_ == 0: # omniglot first net_input = torch.cat([om_train_inputs, im_train_inputs], dim=0) target_labels = torch.cat([om_train_targets, im_train_targets], dim=0) else: # miniimagenet first net_input = torch.cat([im_train_inputs, om_train_inputs], dim=0) target_labels = torch.cat([im_train_targets, om_train_targets], dim=0) slen, bsz = target_labels.shape assert bsz == im_bsz == om_bsz delayed_labels = target_labels[:-1] dummy_last_token = torch.zeros_like(delayed_labels[0].unsqueeze(0)) label_feedback = torch.cat([dummy_last_token, delayed_labels], dim=0) outputs, _ = model(net_input, label_feedback) outputs = outputs.reshape(slen * bsz, num_classes) target_labels = target_labels.reshape(-1) loss = loss_fn(outputs, target_labels) loss.backward() if i % args.grad_cummulate == 0: if clip > 0: torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() model.reset_grad() # global loss running_loss += loss.item() running_total += slen * bsz model.eval() with torch.no_grad(): _, predicted = outputs.max(-1) bool_correct_pred = (predicted == target_labels) # (slen * B) running_correct += bool_correct_pred.sum().item() target_labels = target_labels.reshape(slen, bsz) bool_correct_pred = bool_correct_pred.reshape(slen, bsz) if order_ == 0: # omniglot first om_bool_correct_pred = bool_correct_pred[:om_len] im_bool_correct_pred = bool_correct_pred[om_len:] else: # imagenet first im_bool_correct_pred = bool_correct_pred[:im_len] om_bool_correct_pred = bool_correct_pred[im_len:] task_running_correct['omniglot'] += om_bool_correct_pred.sum().item() task_running_correct['miniimagenet'] += im_bool_correct_pred.sum().item() counts['omniglot'] += om_len * om_bsz counts['miniimagenet'] += im_len * im_bsz om_train_targets = om_train_targets.transpose(0, 1) # B, len om_bool_correct_pred = om_bool_correct_pred.transpose(0, 1) # B, len im_train_targets = im_train_targets.transpose(0, 1) # B, len im_bool_correct_pred = im_bool_correct_pred.transpose(0, 1) # B, len for b in range(bsz): # omniglot prev_cl_end = 0 _, cnts_uniq = torch.unique( om_train_targets[b], sorted=True, return_counts=True) _, indices = torch.sort(om_train_targets[b], stable=True) cnts_uniq_len = len(cnts_uniq) for cl in range(n_way): if cl < cnts_uniq_len: cl_cnts = cnts_uniq[cl] cl_indices = indices[prev_cl_end:prev_cl_end + cl_cnts] cl_indices_len = len(cl_indices) prev_cl_end += cl_cnts for shot in range(k_shot_train): if cl_indices_len > shot: acc_per_shot[0][shot] += ( om_bool_correct_pred[b][cl_indices[shot]].item()) cnt_per_shot[0][shot] += 1 # imagenet prev_cl_end = 0 _, cnts_uniq = torch.unique( im_train_targets[b], sorted=True, return_counts=True) _, indices = torch.sort(im_train_targets[b], stable=True) cnts_uniq_len = len(cnts_uniq) for cl in range(n_way): if cl < cnts_uniq_len: cl_cnts = cnts_uniq[cl] cl_indices = indices[prev_cl_end:prev_cl_end + cl_cnts] cl_indices_len = len(cl_indices) prev_cl_end += cl_cnts for shot in range(k_shot_train): if cl_indices_len > shot: acc_per_shot[1][shot] += ( im_bool_correct_pred[b][cl_indices[shot]].item()) cnt_per_shot[1][shot] += 1 run_step += 1 if i % args.report_every == 0: om_train_ac = task_running_correct['omniglot'] / counts['omniglot'] im_train_ac = ( task_running_correct['miniimagenet'] / counts['miniimagenet']) if use_wandb: wandb_log = {} wandb_log["train_loss"] = running_loss / run_step wandb_log["running_acc"] = 100 * running_correct / running_total wandb_log["omniglot_train_acc"] = 100 * om_train_ac wandb_log["imagenet_train_acc"] = 100 * im_train_ac for key in acc_per_shot.keys(): for shot in range(k_shot_train): if cnt_per_shot[key][shot] > 0: shot_acc = ( 100 * acc_per_shot[key][shot] / cnt_per_shot[key][shot] ) else: shot_acc = 0.0 wandb_log[f"{task_id_to_name[key]}_tr_{shot}"] = shot_acc wandb.log(wandb_log) train_elapsed = time.time() - train_timer train_timer = time.time() num_images_per_sec = ( (i + 1 - last_batch_logged) * batch_size * slen // train_elapsed) last_batch_logged = i log_str = f'steps: {i}, num_seq: {num_seq}, ' log_str += f'train_loss: {running_loss / run_step :.3f}, ' log_str += ( f'running_acc: {100 * running_correct / running_total:.2f} % ') log_str += ( f'(elapsed {int(train_elapsed)}s, {int(num_images_per_sec)} ' + 'images/s)') loginf(log_str) log_str = '' log_str += f'omniglot_train_acc: {100 * om_train_ac:.2f} % ' log_str += f'imagenet_train_acc: {100 * im_train_ac:.2f} % ' loginf(log_str) for key in acc_per_shot.keys(): log_str = f'[{task_id_to_name[key]}] ' for shot in range(k_shot_train): if cnt_per_shot[key][shot] > 0: shot_acc = ( 100 * acc_per_shot[key][shot] / cnt_per_shot[key][shot] ) else: shot_acc = 0 log_str += f"{key}_train_{shot}: {shot_acc:.2f} % " loginf(log_str) running_loss = 0.0 running_total = 0 running_correct = 0 run_step = 0 task_running_correct = { 'omniglot': 0., 'miniimagenet': 0., } counts = { 'omniglot': 0., 'miniimagenet': 0. } acc_per_shot = {0: [], 1: []} cnt_per_shot = {0: [], 1: []} for key in acc_per_shot.keys(): for _ in range(k_shot_train): acc_per_shot[key].append(0) cnt_per_shot[key].append(0) if i % args.validate_every == 0: # run validation model.eval() # val_acc_dict = {} with torch.no_grad(): v_total, task_wise_acc, val_acc_dict = ( eval_model_delayed_label_multi_sequential( model, val_dataloader['omniglot'], val_dataloader['miniimagenet'], n_way=n_way, k_shot=k_shot_train, num_steps=args.valid_size)) if use_wandb: wandb_log = {} wandb_log["val_acc"] = v_total wandb_log["omniglot_val_acc"] = task_wise_acc[0] wandb_log["imagenet_val_acc"] = task_wise_acc[1] for key in val_acc_dict.keys(): for shot in range(k_shot_train): wandb_log[f"{task_id_to_name[key]}_val_{shot}"] = ( val_acc_dict[key][shot]) wandb.log(wandb_log) log_str = f"[val {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " loginf(log_str) for key in val_acc_dict.keys(): log_str = "" log_str += f'{task_id_to_name[key]} val total {task_wise_acc[key]:.2f} %, ' for shot in range(k_shot_train): log_str += f"val_{shot}: {val_acc_dict[key][shot]:.2f} %, " loginf(log_str) if v_total > best_total_val_acc: best_total_val_acc = v_total best_step = i # Save the best model loginf("The best model so far.") torch.save({'epoch': best_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'valid_acc': v_total}, best_model_path) loginf("Saved.") # Save the latest model torch.save({'train_step': i, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'valid_total_acc': v_total}, lastest_model_path) elapsed = time.time() - interval_start_time loginf(f"Elapsed {elapsed / 60.:.2f} min since last valid.") interval_start_time = time.time() train_timer = time.time() if i > args.total_train_steps: break elapsed = time.time() - start_time loginf(f"Finished {i} steps in {elapsed / 60.:.2f} min.") loginf(f"Best one shot validation acc: {100 * best_total_val_acc:.2f} % " f"at step {best_step}") # load the best model and evaluate on the test set del (omniglot_dataset, omniglot_dataloader, omniglot_val_dataset, omniglot_val_dataloader, imagenet_dataset, imagenet_dataloader, imagenet_val_dataset, imagenet_val_dataloader) omniglot_test_dataset = omniglot_rgb84x84_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_test=True, download=True, shuffle=shuffled_eval, seed=test_seed) omniglot_test_dataloader = BatchMetaDataLoader( omniglot_test_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) imagenet_test_dataset = miniimagenet_norm( args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_test=True, download=True, shuffle=shuffled_eval, seed=test_seed) imagenet_test_dataloader = BatchMetaDataLoader( imagenet_test_dataset, batch_size=batch_size // 2, num_workers=args.num_worker, pin_memory=True) test_dataloader = { 'omniglot': omniglot_test_dataloader, 'miniimagenet': imagenet_test_dataloader, } checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['model_state_dict']) test_acc_dict = {} with torch.no_grad(): v_total, task_wise_acc, val_acc_dict = ( eval_model_delayed_label_multi_sequential( model, test_dataloader['omniglot'], test_dataloader['miniimagenet'], n_way=n_way, k_shot=k_shot_train, num_steps=args.test_size)) log_str = f"[final test {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " loginf(log_str) for key in val_acc_dict.keys(): log_str = "" log_str += f'{task_id_to_name[key]} val total {task_wise_acc[key]:.2f} %, ' for shot in range(k_shot_train): log_str += f"val_{shot}: {val_acc_dict[key][shot]:.2f} %, " loginf(log_str)
27,474
38.194009
96
py
modern-srwm
modern-srwm-main/supervised_learning/model_few_shot.py
# Implement models for few shot image classification # NB: the current implementation uses one-hot encoding for label feedback # (it might make sense to replace it by a regular embedding layer) import torch import torch.nn as nn from layer import FastFFlayer, TransformerFFlayers, SRWMlayer from resnet_impl import resnet12_base class BaseModel(nn.Module): def __init__(self): super().__init__() # return number of parameters def num_params(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) def reset_grad(self): # More efficient than optimizer.zero_grad() according to: # Szymon Migacz "PYTORCH PERFORMANCE TUNING GUIDE" at GTC-21. # - doesn't execute memset for every parameter # - memory is zeroed-out by the allocator in a more efficient way # - backward pass updates gradients with "=" operator (write) (unlike # zero_grad() which would result in "+="). # In PyT >= 1.7, one can do `model.zero_grad(set_to_none=True)` for p in self.parameters(): p.grad = None def print_params(self): for p in self.named_parameters(): print(p) # Conv4 by Vynials et al: # ''' # We used a simple yet powerful CNN as the embedding function – consisting of # a stack of modules, each of which is a 3×3 convolution with 64 filters # followed by batch normalization [10], a Relu non-linearity and 2×2 # max-pooling. We resized all the images to 28 × 28 so that, when we stack 4 # modules, the resulting feature map is 1 × 1 × 64, resulting in our embedding # function f(x). # ''' class ConvLSTMModel(BaseModel): def __init__(self, hidden_size, num_classes, num_layer=1, imagenet=False, fc100=False, vision_dropout=0.0, bn_momentum=0.1): super(ConvLSTMModel, self).__init__() num_conv_blocks = 4 if imagenet: # mini-imagenet input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 5 * 5 # (B, 32, 5, 5) elif fc100: input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 2 * 2 # (B, 32, 2, 2) else: # onmiglot input_channels = 1 out_num_channel = 64 self.conv_feature_final_size = 64 # final feat shape (B, 64, 1, 1) self.input_channels = input_channels self.num_classes = num_classes list_conv_layers = [] for i in range(num_conv_blocks): conv_block = [] conv_block.append( nn.Conv2d( in_channels=input_channels, out_channels=out_num_channel, kernel_size=3, stride=1, padding=1, ) ) conv_block.append(nn.BatchNorm2d( out_num_channel, momentum=bn_momentum)) conv_block.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) conv_block.append(nn.Dropout(vision_dropout)) conv_block.append(nn.ReLU(inplace=True)) list_conv_layers.append(nn.Sequential(*conv_block)) input_channels = out_num_channel self.conv_layers = nn.ModuleList(list_conv_layers) self.rnn = nn.LSTM(self.conv_feature_final_size + num_classes, hidden_size, num_layers=num_layer) self.out_layer = nn.Linear(hidden_size, num_classes) def forward(self, x, fb, state=None): # Assume input of shape (len, B, 1, 28, 28) slen, bsz, _, hs, ws = x.shape x = x.reshape(slen * bsz, self.input_channels, hs, ws) for conv_layer in self.conv_layers: x = conv_layer(x) x = x.reshape(slen, bsz, self.conv_feature_final_size) # alternatively use token embedding emb = torch.nn.functional.one_hot(fb, num_classes=self.num_classes) out = torch.cat([x, emb], dim=-1) out, _ = self.rnn(out, state) out = self.out_layer(out) return out, None class ConvDeltaModel(BaseModel): def __init__(self, hidden_size, num_classes, num_layers, num_head, dim_head, dim_ff, dropout, vision_dropout=0.0, imagenet=False, fc100=False, bn_momentum=0.1): super(ConvDeltaModel, self).__init__() num_conv_blocks = 4 if imagenet: # mini-imagenet input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 5 * 5 # (B, 32, 5, 5) elif fc100: input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 2 * 2 # (B, 32, 5, 5) else: # onmiglot input_channels = 1 out_num_channel = 64 self.conv_feature_final_size = 64 # final feat shape (B, 64, 1, 1) self.input_channels = input_channels self.num_classes = num_classes list_conv_layers = [] for _ in range(num_conv_blocks): conv_block = [] conv_block.append( nn.Conv2d( in_channels=input_channels, out_channels=out_num_channel, kernel_size=3, stride=1, padding=1, ) ) conv_block.append(nn.BatchNorm2d( out_num_channel, momentum=bn_momentum)) conv_block.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) conv_block.append(nn.Dropout(vision_dropout)) conv_block.append(nn.ReLU(inplace=True)) list_conv_layers.append(nn.Sequential(*conv_block)) input_channels = out_num_channel self.conv_layers = nn.ModuleList(list_conv_layers) self.input_proj = nn.Linear( self.conv_feature_final_size + num_classes, hidden_size) layers = [] for _ in range(num_layers): # each "layer" consists of two sub-layers layers.append( FastFFlayer(num_head, dim_head, hidden_size, dropout)) layers.append( TransformerFFlayers(dim_ff, hidden_size, dropout)) self.layers = nn.Sequential(*layers) self.out_layer = nn.Linear(hidden_size, num_classes) def forward(self, x, fb, state=None): # Assume input of shape (len, B, 1, 28, 28) slen, bsz, _, hs, ws = x.shape x = x.reshape(slen * bsz, self.input_channels, hs, ws) for conv_layer in self.conv_layers: x = conv_layer(x) x = x.reshape(slen, bsz, self.conv_feature_final_size) emb = torch.nn.functional.one_hot(fb, num_classes=self.num_classes) out = torch.cat([x, emb], dim=-1) out = self.input_proj(out) out = self.layers(out) out = self.out_layer(out) return out, None class ConvSRWMModel(BaseModel): def __init__(self, hidden_size, num_classes, num_layers, num_head, dim_head, dim_ff, dropout, vision_dropout=0.0, use_ln=True, use_input_softmax=False, beta_init=0., imagenet=False, fc100=False, bn_momentum=0.1): super(ConvSRWMModel, self).__init__() num_conv_blocks = 4 if imagenet: # mini-imagenet input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 5 * 5 # (B, 32, 5, 5) elif fc100: input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 2 * 2 # (B, 32, 5, 5) else: # onmiglot input_channels = 1 out_num_channel = 64 self.conv_feature_final_size = 64 # final feat shape (B, 64, 1, 1) self.input_channels = input_channels self.num_classes = num_classes list_conv_layers = [] for _ in range(num_conv_blocks): conv_block = [] conv_block.append( nn.Conv2d( in_channels=input_channels, out_channels=out_num_channel, kernel_size=3, stride=1, padding=1, ) ) conv_block.append(nn.BatchNorm2d( out_num_channel, momentum=bn_momentum)) conv_block.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) conv_block.append(nn.Dropout(vision_dropout)) conv_block.append(nn.ReLU(inplace=True)) list_conv_layers.append(nn.Sequential(*conv_block)) input_channels = out_num_channel self.conv_layers = nn.ModuleList(list_conv_layers) self.input_proj = nn.Linear( self.conv_feature_final_size + num_classes, hidden_size) layers = [] for _ in range(num_layers): # each "layer" consists of two sub-layers layers.append( SRWMlayer(num_head, dim_head, hidden_size, dropout, use_ln, use_input_softmax, beta_init)) layers.append( TransformerFFlayers(dim_ff, hidden_size, dropout)) self.layers = nn.Sequential(*layers) self.out_layer = nn.Linear(hidden_size, num_classes) def forward(self, x, fb, state=None): # Assume input of shape (len, B, 1, 28, 28) slen, bsz, _, hs, ws = x.shape x = x.reshape(slen * bsz, self.input_channels, hs, ws) for conv_layer in self.conv_layers: x = conv_layer(x) x = x.reshape(slen, bsz, self.conv_feature_final_size) emb = torch.nn.functional.one_hot(fb, num_classes=self.num_classes) out = torch.cat([x, emb], dim=-1) out = self.input_proj(out) out = self.layers(out) out = self.out_layer(out) return out, None # For bootstrapped training class StatefulConvSRWMModel(BaseModel): def __init__(self, hidden_size, num_classes, num_layers, num_head, dim_head, dim_ff, dropout, vision_dropout=0.0, emb_dim=10, use_ln=True, use_input_softmax=False, beta_init=0., imagenet=False, fc100=False, bn_momentum=0.1, input_dropout=0.0, dropout_type='base'): super().__init__() num_conv_blocks = 4 if imagenet: # mini-imagenet input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 5 * 5 # (B, 32, 5, 5) elif fc100: input_channels = 3 out_num_channel = 32 self.conv_feature_final_size = 32 * 2 * 2 # (B, 32, 5, 5) else: # onmiglot input_channels = 1 out_num_channel = 64 self.conv_feature_final_size = 64 # final feat shape (B, 64, 1, 1) self.input_channels = input_channels self.num_classes = num_classes list_conv_layers = [] for _ in range(num_conv_blocks): conv_block = [] conv_block.append( nn.Conv2d( in_channels=input_channels, out_channels=out_num_channel, kernel_size=3, stride=1, padding=1, ) ) conv_block.append(nn.BatchNorm2d( out_num_channel, momentum=bn_momentum)) conv_block.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) if '2d' in dropout_type: conv_block.append(nn.Dropout2d(vision_dropout)) else: conv_block.append(nn.Dropout(vision_dropout)) conv_block.append(nn.ReLU(inplace=True)) list_conv_layers.append(nn.Sequential(*conv_block)) input_channels = out_num_channel self.conv_layers = nn.ModuleList(list_conv_layers) self.input_proj = nn.Linear( self.conv_feature_final_size + num_classes, hidden_size) # self.input_layer_norm = nn.LayerNorm(self.conv_feature_final_size) fw_layers = [] ff_layers = [] self.num_layers = num_layers for _ in range(num_layers): # each "layer" consists of two sub-layers fw_layers.append( SRWMlayer(num_head, dim_head, hidden_size, dropout, use_ln, use_input_softmax, beta_init, stateful=True)) ff_layers.append( TransformerFFlayers(dim_ff, hidden_size, dropout)) self.fw_layers = nn.ModuleList(fw_layers) self.ff_layers = nn.ModuleList(ff_layers) self.activation = nn.ReLU(inplace=True) self.out_layer = nn.Linear(hidden_size, num_classes) if dropout_type == 'base': self.input_drop = nn.Dropout(input_dropout) else: self.input_drop = nn.Dropout2d(input_dropout) # return clone of input state def clone_state(self, state): Wy_states, Wq_states, Wk_states, wb_states = state Wy_state_list = [] Wq_state_list = [] Wk_state_list = [] wb_state_list = [] for i in range(self.num_layers): Wy_state_list.append(Wy_states[i].clone()) Wq_state_list.append(Wq_states[i].clone()) Wk_state_list.append(Wk_states[i].clone()) wb_state_list.append(wb_states[i].clone()) Wy_state_tuple = tuple(Wy_state_list) Wq_state_tuple = tuple(Wq_state_list) Wk_state_tuple = tuple(Wk_state_list) wb_state_tuple = tuple(wb_state_list) state_tuple = ( Wy_state_tuple, Wq_state_tuple, Wk_state_tuple, wb_state_tuple) return state_tuple def forward(self, x, fb, state=None): # Assume input of shape (len, B, 1, 28, 28) slen, bsz, _, hs, ws = x.shape x = x.reshape(slen * bsz, self.input_channels, hs, ws) x = self.input_drop(x) for conv_layer in self.conv_layers: x = conv_layer(x) x = x.reshape(slen, bsz, self.conv_feature_final_size) emb = torch.nn.functional.one_hot(fb, num_classes=self.num_classes) out = torch.cat([x, emb], dim=-1) out = self.input_proj(out) # forward main layers Wy_state_list = [] Wq_state_list = [] Wk_state_list = [] wb_state_list = [] if state is not None: Wy_states, Wq_states, Wk_states, wb_states = state for i in range(self.num_layers): if state is not None: out, out_state = self.fw_layers[i]( out, state=(Wy_states[i].squeeze(0), Wq_states[i].squeeze(0), Wk_states[i].squeeze(0), wb_states[i].squeeze(0)), get_state=True) else: out, out_state = self.fw_layers[i]( out, get_state=True) # no cloning here. We do it outside where needed Wy_state_list.append(out_state[0].unsqueeze(0)) Wq_state_list.append(out_state[1].unsqueeze(0)) Wk_state_list.append(out_state[2].unsqueeze(0)) wb_state_list.append(out_state[3].unsqueeze(0)) out = self.ff_layers[i](out) out = self.out_layer(out) Wy_state_tuple = tuple(Wy_state_list) Wq_state_tuple = tuple(Wq_state_list) Wk_state_tuple = tuple(Wk_state_list) wb_state_tuple = tuple(wb_state_list) state_tuple = ( Wy_state_tuple, Wq_state_tuple, Wk_state_tuple, wb_state_tuple) return out, state_tuple class Res12LSTMModel(BaseModel): def __init__(self, hidden_size, num_classes, num_layers, dropout, vision_dropout=0.0, use_big=False, input_dropout=0.0, dropout_type='base'): super(Res12LSTMModel, self).__init__() self.stem_resnet12 = resnet12_base( vision_dropout, use_big, dropout_type) self.input_channels = 3 self.num_classes = num_classes if use_big: self.conv_feature_final_size = 512 else: self.conv_feature_final_size = 256 self.input_drop = nn.Dropout(input_dropout) self.rnn = nn.LSTM(self.conv_feature_final_size + num_classes, hidden_size, num_layers=num_layers, dropout=dropout) self.out_layer = nn.Linear(hidden_size, num_classes) def forward(self, x, fb, state=None): # Assume input of shape (len, B, 1, 28, 28) slen, bsz, _, hs, ws = x.shape x = x.reshape(slen * bsz, self.input_channels, hs, ws) x = self.input_drop(x) x = self.stem_resnet12(x) x = x.reshape(slen, bsz, self.conv_feature_final_size) emb = torch.nn.functional.one_hot(fb, num_classes=self.num_classes) out = torch.cat([x, emb], dim=-1) out, _ = self.rnn(out, state) out = self.out_layer(out) return out, None class Res12DeltaModel(BaseModel): def __init__(self, hidden_size, num_classes, num_layers, num_head, dim_head, dim_ff, dropout, vision_dropout=0.0, use_big=False, input_dropout=0.0, dropout_type='base'): super(Res12DeltaModel, self).__init__() self.stem_resnet12 = resnet12_base( vision_dropout, use_big, dropout_type) self.input_channels = 3 self.num_classes = num_classes if use_big: self.conv_feature_final_size = 512 else: self.conv_feature_final_size = 256 self.input_drop = nn.Dropout(input_dropout) self.input_proj = nn.Linear( self.conv_feature_final_size + num_classes, hidden_size) layers = [] for _ in range(num_layers): # each "layer" consists of two sub-layers layers.append( FastFFlayer(num_head, dim_head, hidden_size, dropout)) layers.append( TransformerFFlayers(dim_ff, hidden_size, dropout)) self.layers = nn.Sequential(*layers) self.out_layer = nn.Linear(hidden_size, num_classes) def forward(self, x, fb, state=None): # Assume input of shape (len, B, 1, 28, 28) slen, bsz, _, hs, ws = x.shape x = x.reshape(slen * bsz, self.input_channels, hs, ws) x = self.input_drop(x) x = self.stem_resnet12(x) x = x.reshape(slen, bsz, self.conv_feature_final_size) emb = torch.nn.functional.one_hot(fb, num_classes=self.num_classes) out = torch.cat([x, emb], dim=-1) out = self.input_proj(out) out = self.layers(out) out = self.out_layer(out) return out, None class Res12SRWMModel(BaseModel): def __init__(self, hidden_size, num_classes, num_layers, num_head, dim_head, dim_ff, dropout, vision_dropout=0.0, use_big=False, use_ln=True, use_input_softmax=False, input_dropout=0.0, dropout_type='base', beta_init=0.): super(Res12SRWMModel, self).__init__() self.stem_resnet12 = resnet12_base( vision_dropout, use_big, dropout_type) self.input_channels = 3 self.num_classes = num_classes if use_big: self.conv_feature_final_size = 512 else: self.conv_feature_final_size = 256 self.input_drop = nn.Dropout(input_dropout) self.input_proj = nn.Linear( self.conv_feature_final_size + num_classes, hidden_size) layers = [] for _ in range(num_layers): # each "layer" consists of two sub-layers layers.append( SRWMlayer(num_head, dim_head, hidden_size, dropout, use_ln, use_input_softmax, beta_init)) layers.append( TransformerFFlayers(dim_ff, hidden_size, dropout)) self.layers = nn.Sequential(*layers) self.out_layer = nn.Linear(hidden_size, num_classes) def forward(self, x, fb, state=None): # Assume input of shape (len, B, 1, 28, 28) slen, bsz, _, hs, ws = x.shape x = x.reshape(slen * bsz, self.input_channels, hs, ws) x = self.input_drop(x) x = self.stem_resnet12(x) x = x.reshape(slen, bsz, self.conv_feature_final_size) emb = torch.nn.functional.one_hot(fb, num_classes=self.num_classes) out = torch.cat([x, emb], dim=-1) out = self.input_proj(out) out = self.layers(out) out = self.out_layer(out) return out, None
20,639
34.895652
79
py
modern-srwm
modern-srwm-main/supervised_learning/main_few_shot_sync_bootstrapping.py
# Main file to be executed to train models for few shot learning in the # synchrous-label setting import os import sys import json import time import hashlib from datetime import datetime import argparse import logging import numpy as np import random import torch import torch.nn as nn import torch.nn.functional as F from warmup_lr import WarmupWrapper from torchmeta_local.utils.data import BatchMetaDataLoader from model_few_shot import ( ConvLSTMModel, ConvDeltaModel, ConvSRWMModel, Res12LSTMModel, Res12DeltaModel, Res12SRWMModel, StatefulConvSRWMModel) from utils_few_shot import eval_model_label_sync parser = argparse.ArgumentParser( description='N-way K-shot learning based on label synchronous ' 'seq-processing NNs with only predicting (N*K+1)th image.') parser.add_argument('--data_dir', type=str, default='./data', help='location of the data corpus') parser.add_argument('--name_dataset', type=str, default='omniglot', choices=['omniglot', 'miniimagenet', 'omniglot_rgb84x84', 'omniglot_rgb84x84_norm', 'omniglot_norm', 'miniimagenet_norm', 'tieredimagenet', 'fc100', 'fc100_norm']) parser.add_argument('--num_worker', default=12, type=int, help='for dataloader.') parser.add_argument('--work_dir', default='save_models', type=str, help='where to save model ckpt.') parser.add_argument('--model_type', type=str, default='lstm', choices=['lstm', 'deltanet', 'srwm', 'res12_lstm', 'res12_deltanet', 'res12_srwm', 'stateful_srwm'], help='model architecture') parser.add_argument('--seed', default=1, type=int, help='Seed.') parser.add_argument('--valid_seed', default=0, type=int, help='Seed.') parser.add_argument('--test_seed', default=0, type=int, help='Seed.') parser.add_argument('--disable_eval_shuffling', action='store_true', help='disable shuffling of valid/test sets. Only useful ' 'to reproduce old/buggy behavior.') parser.add_argument('--fixed_valid', action='store_true', help='use fixed validation set.') parser.add_argument('--fixed_test', action='store_true', help='use fixed test set.') parser.add_argument('--total_epoch', default=1, type=int, help='iterate more than one epoch.') parser.add_argument('--train_acc_stop', default=120, type=int, help='stopping based on train acc.') # model hyper-parameters: parser.add_argument('--num_layer', default=1, type=int, help='number of layers. for both LSTM and Trafo.') parser.add_argument('--hidden_size', default=512, type=int, help='hidden size. for both LSTM and Trafo.') parser.add_argument('--n_head', default=8, type=int, help='Transformer number of heads.') parser.add_argument('--ff_factor', default=4, type=int, help='Transformer ff dim to hidden dim ratio.') parser.add_argument('--dropout', default=0.0, type=float, help='dropout rate.') parser.add_argument('--input_dropout', default=0.0, type=float, help='input dropout rate.') parser.add_argument('--vision_dropout', default=0.0, type=float, help='dropout rate in the vision feat extractor.') parser.add_argument('--dropout_type', type=str, default='base', choices=['base', 'inblock', '2d', '2d_inblock']) parser.add_argument('--use_big_res12', action='store_true', help='use big Res-12.') parser.add_argument('--srwm_beta_init', default=0.0, type=float, help='beta bias for srwm.') parser.add_argument('--use_input_softmax', action='store_true', help='input softmax for srwm.') # few shot learning setting parser.add_argument('--n_way', default=5, type=int, help='number of possible classes per train/test episode.') parser.add_argument('--k_shot', default=1, type=int, help='number of examples in the `train` part of torchmeta') parser.add_argument('--num_future_shot', default=5, type=int, help='number of extra examples for bootstrapping') parser.add_argument('--k_target_shot', default=1, type=int, help='number of examples in the `train` part of torchmeta') parser.add_argument('--test_per_class', default=1, type=int, help='param for torchmeta') parser.add_argument('--main_loss_scaler', default=1, type=float) parser.add_argument('--bstp_loss_scaler', default=1, type=float) parser.add_argument('--future_loss_scaler', default=1, type=float) # training hyper-parameters: parser.add_argument('--total_train_steps', default=100000, type=int, help='Number of training steps to train on') parser.add_argument('--valid_size', default=100, type=int, help='Number of valid batches to validate on') parser.add_argument('--test_size', default=100, type=int, help='Number of test batches to test on') parser.add_argument('--batch_size', default=16, type=int, help='batch size.') parser.add_argument('--learning_rate', default=1e-3, type=float, help='batch size.') parser.add_argument('--warmup_steps', default=5000, type=int) parser.add_argument('--use_warmup', action='store_true', help='use warmup scheduling.') parser.add_argument('--grad_cummulate', default=1, type=int, help='number of gradient accumulation steps.') parser.add_argument('--report_every', default=100, type=int, help='Report log every this steps (not used).') parser.add_argument('--validate_every', default=1000, type=int, help='Report log every this steps (not used).') parser.add_argument('--clip', default=0.0, type=float, help='global norm clipping threshold.') parser.add_argument('--use_kl_loss', action='store_true', help='kl loss for bootstrapping.') parser.add_argument('--job_id', default=0, type=int) # for wandb parser.add_argument('--project_name', type=str, default=None, help='project name for wandb.') parser.add_argument('--job_name', type=str, default=None, help='job name for wandb.') parser.add_argument('--use_wandb', action='store_true', help='use wandb.') args = parser.parse_args() model_name = args.model_type exp_str = '' for arg_key in vars(args): exp_str += str(getattr(args, arg_key)) + '-' # taken from https://stackoverflow.com/questions/16008670/how-to-hash-a-string-into-8-digits exp_hash = str(int(hashlib.sha1(exp_str.encode("utf-8")).hexdigest(), 16) % (10 ** 8)) job_id = args.job_id # Set work directory args.work_dir = os.path.join( args.work_dir, f"{job_id}-{exp_hash}-{time.strftime('%Y%m%d-%H%M%S')}") if not os.path.exists(args.work_dir): os.makedirs(args.work_dir) work_dir_key = '/'.join(os.path.abspath(args.work_dir).split('/')[-3:]) # logging log_file_name = f"{args.work_dir}/log.txt" handlers = [logging.FileHandler(log_file_name), logging.StreamHandler()] logging.basicConfig( level=logging.INFO, format='%(message)s', handlers=handlers) loginf = logging.info loginf(f"torch version: {torch.__version__}") loginf(f"Work dir: {args.work_dir}") # wandb settings if args.use_wandb: # configure wandb. import wandb use_wandb = True if args.project_name is None: project_name = (os.uname()[1] + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) else: project_name = args.project_name wandb.init( project=project_name, settings=wandb.Settings(start_method='fork')) # or `settings=wandb.Settings(start_method='thread')` if args.job_name is None: wandb.run.name = f"{os.uname()[1]}//" \ f"{model_name}-{args.name_dataset}//" \ f"seed{args.seed}//" \ f"noshuf{args.disable_eval_shuffling}/" \ f"{args.dropout_type}/id{args.input_dropout}/" \ f"{args.test_per_class}-test_per_cl/" \ f"{args.n_way}way-{args.k_shot}shot/" \ f"L{args.num_layer}/h{args.hidden_size}/" \ f"n{args.n_head}/ff{args.ff_factor}/" \ f"d{args.dropout}/vd{args.vision_dropout}/" \ f"bigres{args.use_big_res12}/b{args.batch_size}/" \ f"lr{args.learning_rate}/warm{args.use_warmup}/" \ f"warmstep{args.warmup_steps}/" \ f"g{args.grad_cummulate}/bias{args.srwm_beta_init}" \ f"softmax{args.use_input_softmax}" \ f"//PATH'{work_dir_key}'//" else: wandb.run.name = f"{os.uname()[1]}//{args.job_name}" config = wandb.config config.host = os.uname()[1] # host node name config.seed = args.seed config.test_per_class = args.test_per_class config.n_way = args.n_way config.k_shot = args.k_shot config.num_future_shot = args.num_future_shot config.srwm_beta_init = args.srwm_beta_init config.use_input_softmax = args.use_input_softmax config.name_dataset = args.name_dataset config.work_dir = args.work_dir config.model_type = args.model_type config.hidden_size = args.hidden_size config.n_head = args.n_head config.ff_factor = args.ff_factor config.dropout = args.dropout config.vision_dropout = args.vision_dropout config.use_big_res12 = args.use_big_res12 config.batch_size = args.batch_size config.learning_rate = args.learning_rate config.use_warmup = args.use_warmup config.warmup_steps = args.warmup_steps config.grad_cummulate = args.grad_cummulate config.input_dropout = args.input_dropout config.dropout_type = args.dropout_type config.report_every = args.report_every config.disable_eval_shuffling = args.disable_eval_shuffling else: use_wandb = False # end wandb # save args loginf(f"Command executed: {sys.argv[:]}") loginf(f"Args: {json.dumps(args.__dict__, indent=2)}") with open(f'{args.work_dir}/args.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) # set seed loginf(f"Seed: {args.seed}") seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) valid_seed = args.valid_seed test_seed = args.test_seed loginf(f"Valid seed: {valid_seed}, Test seed: {test_seed}") shuffled_eval = not args.disable_eval_shuffling if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True # torch.backends.cudnn.benchmark = False # set dataset batch_size = args.batch_size n_way = args.n_way k_shot_train = args.k_shot num_future_shot = args.num_future_shot test_per_class = args.test_per_class loginf(f"Dataset/Task: {args.name_dataset}") if args.name_dataset == 'omniglot': from torchmeta_local.datasets.helpers import omniglot as data_cls elif args.name_dataset == 'omniglot_norm': from torchmeta_local.datasets.helpers import omniglot_norm as data_cls elif args.name_dataset == 'miniimagenet': from torchmeta_local.datasets.helpers import miniimagenet as data_cls elif args.name_dataset == 'tieredimagenet': from torchmeta_local.datasets.helpers import tieredimagenet as data_cls elif args.name_dataset == 'miniimagenet_norm': # mean/std normalized from torchmeta_local.datasets.helpers import ( miniimagenet_norm as data_cls) elif args.name_dataset == 'omniglot_rgb84x84': from torchmeta_local.datasets.helpers import omniglot_rgb84x84 as data_cls elif args.name_dataset == 'omniglot_rgb84x84_norm': # mean/std normalized from torchmeta_local.datasets.helpers import ( omniglot_rgb84x84_norm as data_cls) elif args.name_dataset == 'fc100': from torchmeta_local.datasets.helpers import fc100 as data_cls elif args.name_dataset == 'fc100_norm': from torchmeta_local.datasets.helpers import fc100_norm as data_cls else: assert False, f'Unknown dataset: {args.name_dataset}' num_samples_per_class={ 'train': k_shot_train, 'future': num_future_shot, 'final_query': test_per_class} # `num_samples_per_class` override `test_shots` below dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_train=True, download=True, shuffle=True, seed=seed, num_samples_per_class=num_samples_per_class) dataloader = BatchMetaDataLoader( dataset, batch_size=batch_size, num_workers=args.num_worker, pin_memory=True) val_dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_val=True, shuffle=shuffled_eval, seed=valid_seed) # this does not completely fix the valid set as the order of example is still # randomized. if args.fixed_valid: # https://github.com/tristandeleu/pytorch-meta/issues/132 valid_class_size = len(val_dataset.dataset) # num classes in valid # `dataset` here is torchmeta ClassDataset import itertools from torch.utils.data import Subset cls_indices = np.array(range(valid_class_size)) all_indices = [] for subset in itertools.combinations(cls_indices, args.n_way): all_indices.append(subset) val_total_size = args.valid_size * batch_size val_indices = random.sample(all_indices, val_total_size) val_dataset = Subset(val_dataset, val_indices) val_dataloader = BatchMetaDataLoader( val_dataset, batch_size=batch_size, num_workers=args.num_worker, pin_memory=True) test_dataset = data_cls(args.data_dir, ways=n_way, shots=k_shot_train, test_shots=test_per_class, meta_test=True, download=True, shuffle=shuffled_eval, seed=test_seed) if args.fixed_test: # https://github.com/tristandeleu/pytorch-meta/issues/132 test_class_size = len(test_dataset.dataset) # num classes in valid # `dataset` here is torchmeta ClassDataset import itertools from torch.utils.data import Subset cls_indices = np.array(range(test_class_size)) all_indices = [] for subset in itertools.combinations(cls_indices, args.n_way): all_indices.append(subset) test_total_size = args.test_size * batch_size test_indices = random.sample(all_indices, test_total_size) test_dataset = Subset(test_dataset, test_indices) test_dataloader = BatchMetaDataLoader( test_dataset, batch_size=batch_size, num_workers=args.num_worker, pin_memory=True) device = 'cuda' # setting model hidden_size = args.hidden_size num_classes = args.n_way num_layer = args.num_layer n_head = args.n_head dim_head = hidden_size // n_head dim_ff = hidden_size * args.ff_factor dropout_rate = args.dropout vision_dropout = args.vision_dropout # is_imagenet = args.name_dataset != 'omniglot' is_imagenet = args.name_dataset not in ['omniglot', 'omniglot_norm'] is_fc100 = False if args.name_dataset in ['fc100', 'fc100_norm']: is_fc100 = True is_imagenet = False if model_name == 'lstm': # conv lstm loginf("Model: LSTM") model = ConvLSTMModel(hidden_size, num_classes, num_layer=num_layer, vision_dropout=vision_dropout, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'deltanet': loginf("Model: DeltaNet") model = ConvDeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'srwm': loginf("Model: Self-Referential learning") model = ConvSRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'stateful_srwm': loginf("Model: Self-Referential learning") model = StatefulConvSRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_ln=True, beta_init=args.srwm_beta_init, use_input_softmax=args.use_input_softmax, input_dropout=args.input_dropout, dropout_type=args.dropout_type, imagenet=is_imagenet, fc100=is_fc100) elif model_name == 'res12_lstm': loginf("Model: Resnet12 + LSTM") model = Res12LSTMModel(hidden_size=hidden_size, num_layers=num_layer, dropout=dropout_rate, vision_dropout=vision_dropout, use_big=args.use_big_res12, input_dropout=args.input_dropout, dropout_type=args.dropout_type, num_classes=num_classes) elif model_name == 'res12_deltanet': # assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + Deltanet") model = Res12DeltaModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, vision_dropout=vision_dropout, use_big=args.use_big_res12, input_dropout=args.input_dropout, dropout_type=args.dropout_type, num_classes=num_classes) elif model_name == 'res12_srwm': # assert is_imagenet, 'Mainly for Imagenet' loginf("Model: Resnet12 + SRWM") model = Res12SRWMModel(hidden_size=hidden_size, num_layers=num_layer, num_head=n_head, dim_head=dim_head, dim_ff=dim_ff, dropout=dropout_rate, num_classes=num_classes, vision_dropout=vision_dropout, use_big=args.use_big_res12, use_ln=True, beta_init=args.srwm_beta_init, input_dropout=args.input_dropout, dropout_type=args.dropout_type, use_input_softmax=args.use_input_softmax) loginf(f"Number of trainable params: {model.num_params()}") loginf(f"{model}") model = model.to(device) # Set optimiser learning_rate = args.learning_rate clip = args.clip loginf(f"Learning rate: {learning_rate}") loginf(f"clip at: {clip}") loginf(f"Batch size: {args.batch_size}") loginf(f"Gradient accumulation for {args.grad_cummulate} steps.") ce_loss_fn = nn.CrossEntropyLoss() mse_loss_fn = nn.MSELoss() if args.use_kl_loss: kl_loss = nn.KLDivLoss( reduction="batchmean", log_target=True) # TODO double check options optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.995), eps=1e-9) loginf(f"{optimizer}") if args.use_warmup: loginf("Using Warmup. Ignoring `learning_rate`.") optimizer = WarmupWrapper(args.hidden_size, args.warmup_steps, optimizer) model.reset_grad() ############ best_model_path = os.path.join(args.work_dir, 'best_model.pt') lastest_model_path = os.path.join(args.work_dir, 'lastest_model.pt') loginf(f"[{datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] Start training") start_time = time.time() interval_start_time = time.time() train_timer = time.time() last_batch_logged = 0 best_val_first_shot_acc = 0.0 best_valid_test_first_shot_acc = 0.0 best_test_first_shot_acc = 0.0 num_seq = 0 running_loss = 0.0 running_fsl_loss = 0 running_future_loss = 0 running_bstp_loss = 0 running_total = 0 running_correct = 0 running_correct_future = 0 run_step = 0 offset_step = 0 end_training = False cur_train_acc = 0 main_loss_scaler = args.main_loss_scaler bstp_loss_scaler = args.bstp_loss_scaler future_loss_scaler = args.future_loss_scaler for ep in range(args.total_epoch): loginf(f'epoch {ep} ====================') for i, batch in enumerate(dataloader): model.train() state = None train_inputs, train_targets = batch['train'] train_inputs = train_inputs.to(device=device) # (B, len, 1, 28, 28) train_targets = train_targets.to(device=device) # (B, len) # shuffle and reshape train_shape = train_inputs.shape bsz, slen = train_shape[0], train_shape[1] num_seq += bsz train_inputs = train_inputs.transpose(0, 1) # (len, B, 28 * 28) train_targets = train_targets.transpose(0, 1) # (len, B) # same for future part future_inputs, future_targets = batch['future'] future_inputs = future_inputs.to(device=device) # (B, test_len, 28 * 28) future_targets = future_targets.to(device=device) future_inputs = future_inputs.transpose(0, 1) # (test_len, B, 28 * 28) future_targets = future_targets.transpose(0, 1) # (test_len, B) # and for query query_inputs, query_targets = batch['final_query'] query_inputs = query_inputs.to(device=device) # (B, test_len, 28 * 28) query_targets = query_targets.to(device=device) query_inputs = query_inputs.transpose(0, 1) # (test_len, B, 28 * 28) query_targets = query_targets.transpose(0, 1) # (test_len, B) # already shuffled. just take the first one. query_inputs = query_inputs[0].unsqueeze(0) query_targets = query_targets[0].unsqueeze(0) # forward the support set images to get the final weights _, support_states = model(train_inputs, train_targets) # forward the query for the main K-shot learning: dummy_last_token = torch.zeros_like(query_targets) # copy state: copy_support_states1 = model.clone_state(support_states) outputs, _ = model( query_inputs, dummy_last_token, state=copy_support_states1) # compute bootstrap loss copy_support_states2 = model.clone_state(support_states) _, future_states = model( future_inputs, future_targets, state=copy_support_states2) # copy state: copy_future_states = model.clone_state(future_states) future_outputs, _ = model( query_inputs, dummy_last_token, state=copy_future_states) # Compute all losses, there are 3 # 1. main few-shot learning loss query_targets = query_targets.reshape(-1) outputs = outputs.reshape(-1, num_classes) main_few_shot_loss = ce_loss_fn(outputs, query_targets) # 2. future more-shot learning loss future_outputs = future_outputs.reshape(-1, num_classes) future_few_shot_loss = ce_loss_fn(future_outputs, query_targets) # 3. bootstrapping loss # MSE on weights or distillation (maybe add an option to use both) if args.use_kl_loss: outputs = F.log_softmax(outputs, dim=-1) future_outputs = F.log_softmax(future_outputs, dim=-1) bstp_loss = kl_loss(outputs, future_outputs.detach()) else: bstp_loss = 0 Wy_support_states, Wq_support_states, Wk_support_states, wb_support_states = support_states Wy_future_states, Wq_future_states, Wk_future_states, wb_future_states = future_states for k in range(model.num_layers): bstp_loss += mse_loss_fn(Wy_support_states[k], Wy_future_states[k].detach()) bstp_loss += mse_loss_fn(Wq_support_states[k], Wq_future_states[k].detach()) bstp_loss += mse_loss_fn(Wk_support_states[k], Wk_future_states[k].detach()) bstp_loss += mse_loss_fn(wb_support_states[k], wb_future_states[k].detach()) # Remove detach loss = (main_loss_scaler * main_few_shot_loss + bstp_loss_scaler * bstp_loss + future_loss_scaler * future_few_shot_loss) loss.backward() if i % args.grad_cummulate == 0: if clip > 0: torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() model.reset_grad() # global loss running_loss += loss.item() running_fsl_loss += main_few_shot_loss.item() running_future_loss += future_few_shot_loss.item() running_bstp_loss += bstp_loss.item() running_total += query_targets.size(0) model.eval() with torch.no_grad(): _, predicted = outputs.max(-1) _, future_predicted = future_outputs.max(-1) bool_correct_pred = (predicted == query_targets) bool_correct_pred_future = (future_predicted == query_targets) running_correct += bool_correct_pred.sum().item() running_correct_future += bool_correct_pred_future.sum().item() run_step += 1 if i % args.report_every == 0: cur_train_acc = 100 * running_correct / running_total if use_wandb: wandb.log({ "train_total_loss": running_loss / run_step, "running_few_shot_loss": running_fsl_loss / run_step, "running_more_shot_loss": running_future_loss / run_step, "running_bootstrap_loss": running_bstp_loss / run_step, "running_few_shot_acc": 100 * running_correct / running_total, "running_more_shot_acc": 100 * running_correct_future / running_total, }) train_elapsed = time.time() - train_timer train_timer = time.time() num_images_per_sec = ( (i + 1 - last_batch_logged) * batch_size * (slen + 1) // train_elapsed) last_batch_logged = i loginf(f'steps: {i + offset_step}, num_seq: {num_seq}, ' f'train_total_loss: {running_loss / run_step :.3f}, ' f'few_shot_loss: {running_fsl_loss / run_step :.3f}, ' f'more_shot_loss: {running_future_loss / run_step :.3f}, ' f'bootstrap_loss: {running_bstp_loss / run_step :.3f}, ' f'few_shot_acc: {100 * running_correct / running_total:.2f} % ' f'more_shot_acc: {100 * running_correct_future / running_total:.2f} % ' f'(elapsed {int(train_elapsed)}s, {int(num_images_per_sec)} ' 'images/s)') running_loss = 0 running_fsl_loss = 0 running_future_loss = 0 running_bstp_loss = 0 running_total = 0 running_correct = 0 running_correct_future = 0 run_step = 0 if i % args.validate_every == 0: # run validation model.eval() with torch.no_grad(): v_total = eval_model_label_sync( model, val_dataloader, num_steps=args.valid_size) test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) loginf( f"[val {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'val total {100 * v_total :.2f} %, ') loginf(f'test acc {100 * test_total :.2f} % ') # debugging if use_wandb: wandb.log({ "val_acc": 100 * v_total, "test_acc": 100 * test_total, # debugging }) if v_total > best_val_first_shot_acc: best_val_first_shot_acc = v_total best_step = i + offset_step # Save the best model loginf("The best model so far.") torch.save({'epoch': best_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'valid_acc': v_total}, best_model_path) loginf("Saved.") if test_total > best_valid_test_first_shot_acc: best_valid_test_first_shot_acc = test_total if test_total > best_test_first_shot_acc: best_test_first_shot_acc = test_total loginf( f'current best valid_acc {100 * best_val_first_shot_acc :.2f} ' f'%\ncurrent best valid test_acc ' f'{100 * best_valid_test_first_shot_acc :.2f} %\n' f'current best test_acc {100 * best_test_first_shot_acc :.2f} ') # Save the latest model torch.save({'train_step': i + offset_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'valid_total_acc': v_total}, lastest_model_path) elapsed = time.time() - interval_start_time loginf(f"Elapsed {elapsed / 60.:.2f} min since last valid.") interval_start_time = time.time() train_timer = time.time() if cur_train_acc > args.train_acc_stop: loginf(f'reached {args.train_acc_stop:.1f} % train accuracy') end_training = True break if i + offset_step > args.total_train_steps: end_training = True loginf(f'reached {args.total_train_steps} steps') break if end_training: break offset_step += i elapsed = time.time() - start_time loginf(f"Finished {i} steps in {elapsed / 60.:.2f} min.") loginf(f"Best one shot validation acc: {100 * best_val_first_shot_acc:.2f} % " f"at step {best_step}") # load the best model and evaluate on the test set del dataloader, dataset, val_dataloader, val_dataset checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() with torch.no_grad(): test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) loginf( f"[test {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'test total {100 * test_total :.2f} %') # eval latest checkpoint = torch.load(lastest_model_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() with torch.no_grad(): test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) loginf( f"[test latest {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'test total {100 * test_total :.2f} %') # final eval checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() results = [] num_test = args.num_test test_size = 1000 for i in range(num_test): with torch.no_grad(): test_total = eval_model_label_sync( model, test_dataloader, num_steps=args.test_size) test_total = 100 * test_total loginf( f"[test {i} {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}] " f'test total {test_total :.2f} %') results.append(test_total) mean = np.mean(results) std = np.std(results) loginf( f'[{num_test} tests using {batch_size * test_size} samples each] ' f'mean: {mean:.2f}, std: {std:.2f}, 95%-CI {1.96 * std / num_test:.2f}')
32,042
40.028169
103
py
modern-srwm
modern-srwm-main/supervised_learning/self_ref_v0/__init__.py
# Adaptation of the original code from # https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>, # Apoorv Vyas <avyas@idiap.ch> # Modifications Copyright (c) 2021- Kazuki Irie import os import torch import torch.nn.functional as F from torch.utils.cpp_extension import load # Just in time import # https://pytorch.org/tutorials/advanced/cpp_extens dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'self_ref_v0.cu') fwd_cuda = load( extra_cuda_cflags=['--ftemplate-depth=1024'], name="self_ref_forward", sources=[filename], verbose=True) bwd_cuda = load( extra_cuda_cflags=['--ftemplate-depth=1024'], name="self_ref_backward", sources=[filename], verbose=True) self_ref_fwd_cuda = fwd_cuda.self_ref_forward self_ref_bwd_cuda = bwd_cuda.self_ref_backward class SelfRefv0(torch.autograd.Function): dot = { "cuda": self_ref_fwd_cuda } dot_backward = { "cuda": self_ref_bwd_cuda } @staticmethod def forward(ctx, x, W_y, W_q, W_k, w_b): # Shape of x: (B, len, D) # Shape of W_q: (n_head, D, E) where n_head * E = D (typically) device = x.device N, H, L, E = x.shape assert W_y.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert W_q.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert W_k.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert w_b.shape == (N, H, E, 4), "Reshape/unsqueeze if needed." out = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) # y q_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) k_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) beta_main = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype) y_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) q_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) k_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) beta_diff = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype) SelfRefv0.dot[device.type]( x, W_y, W_q, W_k, w_b, q_main, k_main, beta_main, y_diff, q_diff, k_diff, beta_diff, out ) ctx.save_for_backward( x, q_main, k_main, beta_main, y_diff, q_diff, k_diff, beta_diff, W_y, W_q, W_k, w_b) return out @staticmethod def backward(ctx, grad_out): # Extract the saved tensors (x, q, k, beta, y_diff, q_diff, k_diff, beta_diff, W_y, W_q, W_k, w_b) = ctx.saved_tensors # Allocate memory for the gradients grad_x = torch.zeros_like(x) grad_W_y = torch.zeros_like(W_y) grad_W_q = torch.zeros_like(W_q) grad_W_k = torch.zeros_like(W_k) grad_w_b = torch.zeros_like(w_b) # Compute the gradients SelfRefv0.dot_backward[x.device.type]( x, q, k, beta, y_diff, q_diff, k_diff, beta_diff, grad_out, W_y, W_q, W_k, w_b, grad_x, grad_W_y, grad_W_q, grad_W_k, grad_w_b ) return grad_x, grad_W_y, grad_W_q, grad_W_k, grad_w_b class StatefulSelfRefv0(torch.autograd.Function): dot = { "cuda": self_ref_fwd_cuda } dot_backward = { "cuda": self_ref_bwd_cuda } @staticmethod def forward(ctx, x, W_y, W_q, W_k, w_b): # Shape of x: (B, len, D) # Shape of W_q: (n_head, D, E) where n_head * E = D (typically) device = x.device N, H, L, E = x.shape assert W_y.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert W_q.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert W_k.shape == (N, H, E, E), "Reshape/unsqueeze if needed." assert w_b.shape == (N, H, E, 4), "Reshape/unsqueeze if needed." out = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) # y q_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) k_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) beta_main = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype) y_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) q_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) k_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) beta_diff = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype) SelfRefv0.dot[device.type]( x, W_y, W_q, W_k, w_b, q_main, k_main, beta_main, y_diff, q_diff, k_diff, beta_diff, out ) ctx.save_for_backward( x, q_main, k_main, beta_main, y_diff, q_diff, k_diff, beta_diff, W_y, W_q, W_k, w_b) return out, W_y, W_q, W_k, w_b @staticmethod def backward(ctx, grad_out, grad_W_y, grad_W_q, grad_W_k, grad_w_b): # Extract the saved tensors (x, q, k, beta, y_diff, q_diff, k_diff, beta_diff, W_y, W_q, W_k, w_b) = ctx.saved_tensors # Allocate memory for the gradients grad_x = torch.zeros_like(x) # Compute the gradients SelfRefv0.dot_backward[x.device.type]( x, q, k, beta, y_diff, q_diff, k_diff, beta_diff, grad_out, W_y, W_q, W_k, w_b, grad_x, grad_W_y, grad_W_q, grad_W_k, grad_w_b ) return grad_x, grad_W_y, grad_W_q, grad_W_k, grad_w_b # Alias the autograd functions to python style snake case naming self_ref_v0 = SelfRefv0.apply stateful_self_ref_v0 = StatefulSelfRefv0.apply if __name__ == '__main__': import torch import torch.nn.functional as F torch.manual_seed(111) # Tests pass if the relative difference compared with # the corresponding torch autograd computation # is smaller than a threshold. # Ideally should be tested with double... rel_threshold = 1e-3 # from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py def max_relative_error(a, b, eps=1e-6): return float(torch.abs((b - a) / (torch.abs(b) + eps)).max().item()) print('##########################') print('# Test forward pass') print('##########################') # bsz, n_head, slen, d_head = 3, 5, 11, 8 bsz, n_head, slen, d_head = 3, 5, 11, 8 v_dim = d_head * 3 + 4 print(f"value dim: {v_dim}") Wy0 = torch.cuda.FloatTensor( bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.) Wq0 = torch.cuda.FloatTensor( bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.) Wk0 = torch.cuda.FloatTensor( bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.) wb0 = torch.cuda.FloatTensor( bsz, n_head, d_head, 4, device='cuda').uniform_(-1., 1.) x0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') W_y1 = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') W_q1 = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') W_k1 = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') w_b1 = torch.zeros( bsz, n_head, d_head, 4, requires_grad=True, device='cuda') W_y1 = Wy0.detach().clone().requires_grad_(True) W_q1 = Wq0.detach().clone().requires_grad_(True) W_k1 = Wk0.detach().clone().requires_grad_(True) w_b1 = wb0.detach().clone().requires_grad_(True) W_y2_slow = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') W_q2_slow = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') W_k2_slow = torch.zeros( bsz, n_head, d_head, d_head, requires_grad=True, device='cuda') w_b2_slow = torch.zeros( bsz, n_head, d_head, 4, requires_grad=True, device='cuda') W_y2_slow = Wy0.detach().clone().requires_grad_(True) W_q2_slow = Wq0.detach().clone().requires_grad_(True) W_k2_slow = Wk0.detach().clone().requires_grad_(True) w_b2_slow = wb0.detach().clone().requires_grad_(True) x1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') x1 = x0.detach().clone().requires_grad_(True) print("Forwarding custom kernel...") # softmax done inside self_ref_v0 --> not anymore, adding the line below x1 = F.softmax(x1, dim=-1) out1 = self_ref_v0(x1, W_y1, W_q1, W_k1, w_b1) print("done.") x2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') # apply softmax here x2 = F.softmax(x0.detach(), dim=-1).clone().requires_grad_(True) x2 = x2.permute(2, 0, 1, 3) # (len, B, H, dim) x2 = x2.reshape(slen, bsz * n_head, d_head) # (len, B*H, dim) W_y2 = W_y2_slow.view(bsz * n_head, d_head, d_head) W_q2 = W_q2_slow.view(bsz * n_head, d_head, d_head) W_k2 = W_k2_slow.view(bsz * n_head, d_head, d_head) w_b2 = w_b2_slow.view(bsz * n_head, d_head, 4) out_list = [] # out = x2[0] # (B * H, D) print("Forwarding PyTorch code...") for pos in range(slen): out = x2[pos].unsqueeze(1) # out = F.softmax(x2[pos], dim=-1).unsqueeze(1) # out: (B * H, 1, D) # W2: (B * H, D, v_dim) # bmm (b,n,M) x (b,M,p) -> (b,n,p) # ykqb before squeeze: (B * H, 1, v_dim) y = torch.bmm(out, W_y2).squeeze(1) out_t = y.reshape(bsz, n_head, d_head) out_list.append(out_t.clone()) if pos < slen - 1: # no need to update weights at the last time step q = torch.bmm(out, W_q2).squeeze(1) k = torch.bmm(out, W_k2).squeeze(1) beta = torch.bmm(out, w_b2).squeeze(1) beta = torch.sigmoid(beta) beta_y, beta_q, beta_k, beta_beta = torch.split( beta, [1, 1, 1, 1], dim=-1) k = F.softmax(k, dim=-1) q = F.softmax(q, dim=-1) # retrieve currently stored value y_old = torch.bmm(k.unsqueeze(1), W_y2).squeeze(1) q_old = torch.bmm(k.unsqueeze(1), W_q2).squeeze(1) k_old = torch.bmm(k.unsqueeze(1), W_k2).squeeze(1) beta_old = torch.bmm(k.unsqueeze(1), w_b2).squeeze(1) y_new = torch.bmm(q.unsqueeze(1), W_y2).squeeze(1) q_new = torch.bmm(q.unsqueeze(1), W_q2).squeeze(1) k_new = torch.bmm(q.unsqueeze(1), W_k2).squeeze(1) beta_new = torch.bmm(q.unsqueeze(1), w_b2).squeeze(1) # update all weights y_insert = beta_y * (y_new - y_old) q_insert = beta_q * (q_new - q_old) k_insert = beta_k * (k_new - k_old) beta_insert = beta_beta * (beta_new - beta_old) W_y2 = W_y2.clone() + torch.bmm( k.unsqueeze(2), y_insert.unsqueeze(1)) W_q2 = W_q2.clone() + torch.bmm( k.unsqueeze(2), q_insert.unsqueeze(1)) W_k2 = W_k2.clone() + torch.bmm( k.unsqueeze(2), k_insert.unsqueeze(1)) w_b2 = w_b2.clone() + torch.bmm( k.unsqueeze(2), beta_insert.unsqueeze(1)) print("done.") out2 = torch.stack(out_list) out2 = out2.view(slen, bsz, n_head, d_head) out1 = out1.permute(2, 0, 1, 3) for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"out: {out1[s][b][h]}") print(f"ref: {out2[s][b][h]}") assert max_relative_error( out1[s][b][h], out2[s][b][h]) < rel_threshold print("pass!") print("==> Forward pass test done.") print('##########################') print('# Test Backward pass') print('##########################') # grad loss1 = out1.sum() W_y1.retain_grad() W_q1.retain_grad() W_k1.retain_grad() w_b1.retain_grad() x1.retain_grad() loss1.backward() loss2 = out2.sum() W_y2_slow.retain_grad() W_q2_slow.retain_grad() W_k2_slow.retain_grad() w_b2_slow.retain_grad() x2.retain_grad() loss2.backward() print('##########################') print('# Gradients input') print('##########################') x2_grad = x2.grad.reshape(slen, bsz, n_head, d_head) x2_grad = x2_grad.permute(1, 2, 0, 3) for s in reversed(range(slen)): for b in range(bsz): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"grad x out: {x1.grad[b][h][s]}") print(f"grad x ref: {x2_grad[b][h][s]}") assert max_relative_error( x1.grad[b][h][s], x2_grad[b][h][s]) < rel_threshold print("pass!") print('##########################') print('# Gradients weights') print('##########################') W_y2_grad = W_y2_slow.grad.reshape(bsz, n_head, d_head, d_head) W_q2_grad = W_q2_slow.grad.reshape(bsz, n_head, d_head, d_head) W_k2_grad = W_k2_slow.grad.reshape(bsz, n_head, d_head, d_head) w_b2_grad = w_b2_slow.grad.reshape(bsz, n_head, d_head, 4) print('##########################') print('# Gradient Wy') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad Wy out: {W_y1.grad[b][h][d]}") print(f"grad Wy ref: {W_y2_grad[b][h][d]}") assert max_relative_error( W_y1.grad[b][h][d], W_y2_grad[b][h][d]) < rel_threshold print("pass!") print('##########################') print('# Gradient Wq') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad Wq out: {W_q1.grad[b][h][d]}") print(f"grad Wq ref: {W_q2_grad[b][h][d]}") assert max_relative_error( W_q1.grad[b][h][d], W_q2_grad[b][h][d]) < rel_threshold print("pass!") print('##########################') print('# Gradient Wk') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad Wk out: {W_k1.grad[b][h][d]}") print(f"grad Wk ref: {W_k2_grad[b][h][d]}") assert max_relative_error( W_k1.grad[b][h][d], W_k2_grad[b][h][d]) < rel_threshold print("pass!") print('##########################') print('# Gradient wb') print('##########################') for b in range(bsz): for h in range(n_head): for d in range(d_head): print(f"b={b} h={h} d={d} ------------------------") print(f"grad wb out: {w_b1.grad[b][h][d]}") print(f"grad wb ref: {w_b2_grad[b][h][d]}") assert max_relative_error( w_b1.grad[b][h][d], w_b2_grad[b][h][d]) < rel_threshold print("pass!") print("==> All tests pass!")
16,123
32.945263
113
py
modern-srwm
modern-srwm-main/supervised_learning/fast_weight/__init__.py
# Adaptation of the original code from # https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>, # Apoorv Vyas <avyas@idiap.ch> # Modifications Copyright (c) 2021 Kazuki Irie import os import torch from torch.utils.cpp_extension import load # Just in time import # https://pytorch.org/tutorials/advanced/cpp_extens dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'fast_weight_cuda.cu') mod_causal_dot_product_cuda = load( name="fast_weight_forward", sources=[filename], verbose=True) mod_causal_dot_backward_cuda = load( name="fast_weight_backward", sources=[filename], verbose=True) causal_dot_product_cuda = mod_causal_dot_product_cuda.fast_weight_forward causal_dot_backward_cuda = mod_causal_dot_backward_cuda.fast_weight_backward class DeltaFastWeight(torch.autograd.Function): """Fast weight using the delta update rule.""" dot = { "cuda": causal_dot_product_cuda } dot_backward = { "cuda": causal_dot_backward_cuda } @staticmethod def forward(ctx, Q, K, V, beta, W): assert Q.device.type is not "cpu" assert K.device.type is not "cpu" assert V.device.type is not "cpu" assert beta.device.type is not "cpu" assert W.device.type is not "cpu" # Create the output tensor device = Q.device N, H, L, E = Q.shape _, _, _, M = V.shape out = torch.zeros((N, H, L, M), device=device, dtype=Q.dtype) V_old = torch.zeros((N, H, L, M), device=device, dtype=Q.dtype) V_insert = torch.zeros((N, H, L, M), device=device, dtype=Q.dtype) DeltaFastWeight.dot[device.type]( Q.data, K.data, V.data, beta.data, V_old, V_insert, W, out ) ctx.save_for_backward(Q, K, V, beta, V_old, V_insert, W) return out @staticmethod def backward(ctx, grad_out): # Extract the saved tensors Q, K, V, beta, V_old, V_insert, W = ctx.saved_tensors # Allocate memory for the gradients grad_Q = torch.zeros_like(Q) grad_K = torch.zeros_like(K) grad_V = torch.zeros_like(V) grad_beta = torch.zeros_like(beta) assert Q.device.type is not "cpu" # Compute the gradients DeltaFastWeight.dot_backward[Q.device.type]( Q.data, K.data, V.data, beta.data, V_old.data, V_insert.data, grad_out, W.data, grad_Q, grad_K, grad_V, grad_beta ) return grad_Q, grad_K, grad_V, grad_beta, None # Alias the autograd functions to python style snake case naming fast_weight_delta = DeltaFastWeight.apply if __name__ == '__main__': import torch torch.manual_seed(111) # Tests pass if the relative difference compared with # the corresponding torch autograd computation # is smaller than a threshold. # Ideally should be tested with double... rel_threshold = 1e-3 # from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py def max_relative_error(a, b, eps=1e-6): return torch.abs((b - a) / (torch.abs(b) + eps)).max().item() print('##########################') print('# Test forward pass') print('##########################') bsz, n_head, slen, d_head = 3, 5, 11, 64 v_dim = 64 # (B, H, len, dim) q0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') k0 = torch.rand(bsz, n_head, slen, d_head, device='cuda') v0 = torch.rand(bsz, n_head, slen, v_dim, device='cuda') beta0 = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda')) q0 = q0 / q0.sum(dim=-1, keepdim=True) k0 = k0 / k0.sum(dim=-1, keepdim=True) q1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') k1 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v1 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta1 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') q1.data = q0.data k1.data = k0.data v1.data = v0.data beta1.data = beta0.data W1 = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda') print("Forwarding custom kernel...") out1 = fast_weight_delta(q1, k1, v1, beta1, W1) print("done.") # compute using torch q2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') k2 = torch.zeros( bsz, n_head, slen, d_head, requires_grad=True, device='cuda') v2 = torch.zeros( bsz, n_head, slen, v_dim, requires_grad=True, device='cuda') beta2 = torch.zeros( bsz, n_head, slen, 1, requires_grad=True, device='cuda') q2.data = q0.data k2.data = k0.data v2.data = v0.data beta2.data = beta0.data # (len, B, H, dim) q_2 = q2.permute(2, 0, 1, 3) slen, bsz, n_head, d_head = q_2.shape q_2 = q_2.reshape(slen, bsz * n_head, d_head) k_2 = k2.permute(2, 0, 1, 3) k_2 = k_2.reshape(slen, bsz * n_head, d_head) v_2 = v2.permute(2, 0, 1, 3) v_2 = v_2.reshape(slen, bsz * n_head, v_dim) beta_2 = beta2.permute(2, 0, 1, 3) beta_2 = beta_2.reshape(slen, bsz * n_head, 1) W = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda') out_list = [] print("Forwarding PyTorch code...") for pos in range(slen): v_old = torch.bmm(W, k_2[pos].unsqueeze(2)).squeeze() v_insert = beta_2[pos] * (v_2[pos] - v_old) W = W + torch.bmm(v_insert.unsqueeze(2), k_2[pos].unsqueeze(1)) out_t = torch.bmm(W, q_2[pos].unsqueeze(2)).squeeze() out_list.append(out_t.clone()) print("done.") out2 = torch.stack(out_list) out2 = out2.view(slen, bsz, n_head, v_dim) out1 = out1.permute(2, 0, 1, 3) for s in range(slen): for b in range(bsz): for h in range(n_head): print(f"out1: {out1[s][b][h]}") print(f"out2: {out2[s][b][h]}") assert max_relative_error( out1[s][b][h], out2[s][b][h]) < rel_threshold print("pass!") print('##########################') print('# Test Backward pass') print('##########################') # grad loss1 = out1.sum() q1.retain_grad() k1.retain_grad() v1.retain_grad() beta1.retain_grad() loss1.backward() loss2 = out2.sum() q2.retain_grad() k2.retain_grad() v2.retain_grad() beta2.retain_grad() loss2.backward() for s in range(slen): for b in reversed(range(bsz)): for h in range(n_head): print(f"s={s}, b={b}, h={h}") print(f"grad query1: {q1.grad[b][h][s]}") print(f"grad query2: {q2.grad[b][h][s]}") assert max_relative_error( q1.grad[b][h][s], q2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad key1: {k1.grad[b][h][s]}") print(f"grad key2: {k2.grad[b][h][s]}") assert max_relative_error( k1.grad[b][h][s], k2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad values1: {v1.grad[b][h][s]}") print(f"grad values2: {v2.grad[b][h][s]}") assert max_relative_error( v1.grad[b][h][s], v2.grad[b][h][s]) < rel_threshold print("pass!") print(f"grad beta1: {beta1.grad[b][h][s]}") print(f"grad beta2: {beta2.grad[b][h][s]}") assert max_relative_error( beta1.grad[b][h][s], beta2.grad[b][h][s]) < rel_threshold print("pass!") print("All tests pass.")
8,121
30.48062
113
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/version.py
VERSION = '1.8.0'
17
17
17
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/__init__.py
from torchmeta_local import datasets # from torchmeta_local import modules # from torchmeta_local import toy from torchmeta_local import transforms from torchmeta_local import utils # from torchmeta_local.version import VERSION as __version__
244
29.625
60
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/omniglot.py
import os import json import glob import h5py from PIL import Image, ImageOps from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset from torchvision.datasets.utils import list_dir, download_url from torchmeta_local.datasets.utils import get_asset class Omniglot(CombinationMetaDataset): """ The Omniglot dataset [1]. A dataset of 1623 handwritten characters from 50 different alphabets. Parameters ---------- root : string Root directory where the dataset folder `omniglot` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. use_vinyals_split : bool (default: `True`) If set to `True`, the dataset uses the splits defined in [3]. If `False`, then the meta-train split corresponds to `images_background`, and the meta-test split corresponds to `images_evaluation` (raises an error when calling the meta-validation split). transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `torchmeta_local.transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the zip files and processes the dataset in the root directory (under the `omniglot` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The dataset is downloaded from the original [Omniglot repository] (https://github.com/brendenlake/omniglot). The meta train/validation/test splits used in [3] are taken from [this repository] (https://github.com/jakesnell/prototypical-networks). These splits are over 1028/172/423 classes (characters). References ---------- .. [1] Lake, B. M., Salakhutdinov, R., and Tenenbaum, J. B. (2015). Human-level concept learning through probabilistic program induction. Science, 350(6266), 1332-1338 (http://www.sciencemag.org/content/350/6266/1332.short) .. [2] Lake, B. M., Salakhutdinov, R., and Tenenbaum, J. B. (2019). The Omniglot Challenge: A 3-Year Progress Report (https://arxiv.org/abs/1902.03477) .. [3] Vinyals, O., Blundell, C., Lillicrap, T. and Wierstra, D. (2016). Matching Networks for One Shot Learning. In Advances in Neural Information Processing Systems (pp. 3630-3638) (https://arxiv.org/abs/1606.04080) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, use_vinyals_split=True, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = OmniglotClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, use_vinyals_split=use_vinyals_split, transform=transform, meta_split=meta_split, class_augmentations=class_augmentations, download=download) super(Omniglot, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class OmniglotClassDataset(ClassDataset): folder = 'omniglot' download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python' zips_md5 = { 'images_background': '68d2efa1b9178cc56df9314c21c6e718', 'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811' } filename = 'data.hdf5' filename_labels = '{0}{1}_labels.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, use_vinyals_split=True, transform=None, class_augmentations=None, download=False): super(OmniglotClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) if self.meta_val and (not use_vinyals_split): raise ValueError('Trying to use the meta-validation without the ' 'Vinyals split. You must set `use_vinyals_split=True` to use ' 'the meta-validation split.') self.root = os.path.join(os.path.expanduser(root), self.folder) self.use_vinyals_split = use_vinyals_split self.transform = transform self.split_filename = os.path.join(self.root, self.filename) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format('vinyals_' if use_vinyals_split else '', self.meta_split)) self._data = None self._labels = None if download: self.download() if not self._check_integrity(): raise RuntimeError('Omniglot integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): character_name = '/'.join(self.labels[index % self.num_classes]) data = self.data[character_name] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return OmniglotDataset(index, data, character_name, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data = h5py.File(self.split_filename, 'r') return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data is not None: self._data.close() self._data = None def download(self): import zipfile import shutil if self._check_integrity(): return for name in self.zips_md5: zip_filename = '{0}.zip'.format(name) filename = os.path.join(self.root, zip_filename) if os.path.isfile(filename): continue url = '{0}/{1}'.format(self.download_url_prefix, zip_filename) download_url(url, self.root, zip_filename, self.zips_md5[name]) with zipfile.ZipFile(filename, 'r') as f: f.extractall(self.root) filename = os.path.join(self.root, self.filename) with h5py.File(filename, 'w') as f: for name in self.zips_md5: group = f.create_group(name) alphabets = list_dir(os.path.join(self.root, name)) characters = [(name, alphabet, character) for alphabet in alphabets for character in list_dir(os.path.join(self.root, name, alphabet))] split = 'train' if name == 'images_background' else 'test' labels_filename = os.path.join(self.root, self.filename_labels.format('', split)) with open(labels_filename, 'w') as f_labels: labels = sorted(characters) json.dump(labels, f_labels) for _, alphabet, character in characters: filenames = glob.glob(os.path.join(self.root, name, alphabet, character, '*.png')) dataset = group.create_dataset('{0}/{1}'.format(alphabet, character), (len(filenames), 105, 105), dtype='uint8') for i, char_filename in enumerate(filenames): image = Image.open(char_filename, mode='r').convert('L') dataset[i] = ImageOps.invert(image) shutil.rmtree(os.path.join(self.root, name)) for split in ['train', 'val', 'test']: filename = os.path.join(self.root, self.filename_labels.format( 'vinyals_', split)) data = get_asset(self.folder, '{0}.json'.format(split), dtype='json') with open(filename, 'w') as f: labels = sorted([('images_{0}'.format(name), alphabet, character) for (name, alphabets) in data.items() for (alphabet, characters) in alphabets.items() for character in characters]) json.dump(labels, f) class OmniglotDataset(Dataset): def __init__(self, index, data, character_name, transform=None, target_transform=None): super(OmniglotDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.character_name = character_name def __len__(self): return len(self.data) def __getitem__(self, index): image = Image.fromarray(self.data[index]) target = self.character_name if self.transform is not None: image = self.transform(image) if self.target_transform is not None: target = self.target_transform(target) return (image, target)
10,994
40.334586
92
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/miniimagenet.py
import os import pickle from PIL import Image import h5py import json from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset # QKFIX: See torchmeta_local.datasets.utils for more informations from torchmeta_local.datasets.utils import download_file_from_google_drive class MiniImagenet(CombinationMetaDataset): """ The Mini-Imagenet dataset, introduced in [1]. This dataset contains images of 100 different classes from the ILSVRC-12 dataset (Imagenet challenge). The meta train/validation/test splits are taken from [2] for reproducibility. Parameters ---------- root : string Root directory where the dataset folder `miniimagenet` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `torchmeta_local.transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the pickle files and processes the dataset in the root directory (under the `miniimagenet` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The dataset is downloaded from [this repository] (https://github.com/renmengye/few-shot-ssl-public/). The meta train/ validation/test splits are over 64/16/20 classes. References ---------- .. [1] Vinyals, O., Blundell, C., Lillicrap, T. and Wierstra, D. (2016). Matching Networks for One Shot Learning. In Advances in Neural Information Processing Systems (pp. 3630-3638) (https://arxiv.org/abs/1606.04080) .. [2] Ravi, S. and Larochelle, H. (2016). Optimization as a Model for Few-Shot Learning. (https://openreview.net/forum?id=rJY0-Kcll) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = MiniImagenetClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(MiniImagenet, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class MiniImagenetClassDataset(ClassDataset): folder = 'miniimagenet' # Google Drive ID from https://github.com/renmengye/few-shot-ssl-public gdrive_id = '16V_ZlkW4SsnNDtnGmaBRq2OoPmUOc5mY' gz_filename = 'mini-imagenet.tar.gz' gz_md5 = 'b38f1eb4251fb9459ecc8e7febf9b2eb' pkl_filename = 'mini-imagenet-cache-{0}.pkl' filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(MiniImagenetClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self._data = None self._labels = None if download: self.download() if not self._check_integrity(): raise RuntimeError('MiniImagenet integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): class_name = self.labels[index % self.num_classes] data = self.data[class_name] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return MiniImagenetDataset(index, data, class_name, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data_file is not None: self._data_file.close() self._data_file = None self._data = None def download(self): import tarfile if self._check_integrity(): return download_file_from_google_drive(self.gdrive_id, self.root, self.gz_filename, md5=self.gz_md5) filename = os.path.join(self.root, self.gz_filename) with tarfile.open(filename, 'r') as f: f.extractall(self.root) for split in ['train', 'val', 'test']: filename = os.path.join(self.root, self.filename.format(split)) if os.path.isfile(filename): continue pkl_filename = os.path.join(self.root, self.pkl_filename.format(split)) if not os.path.isfile(pkl_filename): raise IOError() with open(pkl_filename, 'rb') as f: data = pickle.load(f) images, classes = data['image_data'], data['class_dict'] with h5py.File(filename, 'w') as f: group = f.create_group('datasets') for name, indices in classes.items(): group.create_dataset(name, data=images[indices]) labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: labels = sorted(list(classes.keys())) json.dump(labels, f) if os.path.isfile(pkl_filename): os.remove(pkl_filename) class MiniImagenetDataset(Dataset): def __init__(self, index, data, class_name, transform=None, target_transform=None): super(MiniImagenetDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.class_name = class_name def __len__(self): return self.data.shape[0] def __getitem__(self, index): image = Image.fromarray(self.data[index]) target = self.class_name if self.transform is not None: image = self.transform(image) if self.target_transform is not None: target = self.target_transform(target) return (image, target)
8,994
38.279476
92
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/triplemnist.py
import numpy as np from PIL import Image import os import io import json import glob import h5py from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset # QKFIX: See torchmeta_local.datasets.utils for more informations from torchmeta_local.datasets.utils import download_file_from_google_drive from torchmeta_local.datasets.utils import get_asset class TripleMNIST(CombinationMetaDataset): """ The Triple MNIST dataset, introduced in [1]. This dataset is based on the MNIST dataset [2]. It consists of sampled images from MNIST that are put together to create images with multiple digits. It contains 1,000,000 images from 1000 different classes (1000 images per class, for the numbers 000 to 999). Parameters ---------- root : string Root directory where the dataset folder `triplemnist` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `torchmeta_local.transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the pickle files and processes the dataset in the root directory (under the `triplemnist` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The dataset is downloaded from the Multi-digit MNIST repository [1](https://github.com/shaohua0116/MultiDigitMNIST). The dataset contains images (MNIST triple digits) from 1000 classes, for the numbers 000 to 999. The meta train/validation/test splits are 640/160/200 classes. The splits are taken from [1]. References ---------- .. [1] Sun, S. (2019). Multi-digit MNIST for Few-shot Learning. (https://github.com/shaohua0116/MultiDigitMNIST) .. [2] LeCun, Y., Cortes, C., and Burges, CJ. (2010). MNIST Handwritten Digit Database. (http://yann.lecun.com/exdb/mnist) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = TripleMNISTClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(TripleMNIST, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class TripleMNISTClassDataset(ClassDataset): folder = 'triplemnist' # Google Drive ID from https://github.com/shaohua0116/MultiDigitMNIST gdrive_id = '1xqyW289seXYaDSqD2jaBPMKVAAjPP9ee' zip_filename = 'triple_mnist_seed_123_image_size_84_84.zip' zip_md5 = '9508b047f9fbb834c02bc13ef44245da' filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' image_folder = 'triple_mnist_seed_123_image_size_84_84' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(TripleMNISTClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self._data_file = None self._data = None self._labels = None if download: self.download() if not self._check_integrity(): raise RuntimeError('Triple MNIST integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return TripleMNISTDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data_file is not None: self._data_file.close() self._data_file = None self._data = None def download(self): import zipfile import shutil import glob from tqdm import tqdm if self._check_integrity(): return zip_filename = os.path.join(self.root, self.zip_filename) if not os.path.isfile(zip_filename): download_file_from_google_drive(self.gdrive_id, self.root, self.zip_filename, md5=self.zip_md5) zip_foldername = os.path.join(self.root, self.image_folder) if not os.path.isdir(zip_foldername): with zipfile.ZipFile(zip_filename, 'r') as f: for member in tqdm(f.infolist(), desc='Extracting '): try: f.extract(member, self.root) except zipfile.BadZipFile: print('Error: Zip file is corrupted') for split in ['train', 'val', 'test']: filename = os.path.join(self.root, self.filename.format(split)) if os.path.isfile(filename): continue labels = get_asset(self.folder, '{0}.json'.format(split)) labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(labels, f) image_folder = os.path.join(zip_foldername, split) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') dtype = h5py.special_dtype(vlen=np.uint8) for i, label in enumerate(tqdm(labels, desc=filename)): images = glob.glob(os.path.join(image_folder, label, '*.png')) images.sort() dataset = group.create_dataset(label, (len(images),), dtype=dtype) for i, image in enumerate(images): with open(image, 'rb') as f: array = bytearray(f.read()) dataset[i] = np.asarray(array, dtype=np.uint8) if os.path.isdir(zip_foldername): shutil.rmtree(zip_foldername) class TripleMNISTDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(TripleMNISTDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): image = Image.open(io.BytesIO(self.data[index])).convert('RGB') target = self.label if self.transform is not None: image = self.transform(image) if self.target_transform is not None: target = self.target_transform(target) return (image, target)
10,053
38.582677
85
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/cub.py
import numpy as np from PIL import Image import os import io import json import glob import h5py from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset # QKFIX: See torchmeta_local.datasets.utils for more informations from torchmeta_local.datasets.utils import download_file_from_google_drive from torchmeta_local.datasets.utils import get_asset class CUB(CombinationMetaDataset): """ The Caltech-UCSD Birds dataset, introduced in [1]. This dataset is based on images from 200 species of birds from the Caltech-UCSD Birds dataset [2]. Parameters ---------- root : string Root directory where the dataset folder `cub` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `torchmeta_local.transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the pickle files and processes the dataset in the root directory (under the `cub` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The dataset is downloaded from [2]. The dataset contains images from 200 classes. The meta train/validation/test splits are over 100/50/50 classes. The splits are taken from [3] ([code](https://github.com/wyharveychen/CloserLookFewShot) for reproducibility). References ---------- .. [1] Hilliard, N., Phillips, L., Howland, S., Yankov, A., Corley, C. D., Hodas, N. O. (2018). Few-Shot Learning with Metric-Agnostic Conditional Embeddings. (https://arxiv.org/abs/1802.04376) .. [2] Wah, C., Branson, S., Welinder, P., Perona, P., Belongie, S. (2011). The Caltech-UCSD Birds-200-2011 Dataset (http://www.vision.caltech.edu/visipedia/CUB-200-2011.html) .. [3] Chen, W., Liu, Y. and Kira, Z. and Wang, Y. and Huang, J. (2019). A Closer Look at Few-shot Classification. International Conference on Learning Representations (https://openreview.net/forum?id=HkxLXnAcFQ) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = CUBClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(CUB, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class CUBClassDataset(ClassDataset): folder = 'cub' # # Google Drive ID from http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz gdrive_id = '1hbzc_P1FuxMkcabkgn9ZKinBwW683j45' tgz_filename = 'CUB_200_2011.tgz' tgz_md5 = '97eceeb196236b17998738112f37df78' image_folder = 'CUB_200_2011/images' filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(CUBClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self._data_file = None self._data = None self._labels = None if download: self.download() if not self._check_integrity(): raise RuntimeError('CUB integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return CUBDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data_file is not None: self._data_file.close() self._data_file = None self._data = None def download(self): import tarfile import shutil import glob from tqdm import tqdm if self._check_integrity(): return download_file_from_google_drive(self.gdrive_id, self.root, self.tgz_filename, md5=self.tgz_md5) tgz_filename = os.path.join(self.root, self.tgz_filename) with tarfile.open(tgz_filename, 'r') as f: f.extractall(self.root) image_folder = os.path.join(self.root, self.image_folder) for split in ['train', 'val', 'test']: filename = os.path.join(self.root, self.filename.format(split)) if os.path.isfile(filename): continue labels = get_asset(self.folder, '{0}.json'.format(split)) labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(labels, f) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') dtype = h5py.special_dtype(vlen=np.uint8) for i, label in enumerate(tqdm(labels, desc=filename)): images = glob.glob(os.path.join(image_folder, label, '*.jpg')) images.sort() dataset = group.create_dataset(label, (len(images),), dtype=dtype) for i, image in enumerate(images): with open(image, 'rb') as f: array = bytearray(f.read()) dataset[i] = np.asarray(array, dtype=np.uint8) tar_folder, _ = os.path.splitext(tgz_filename) if os.path.isdir(tar_folder): shutil.rmtree(tar_folder) attributes_filename = os.path.join(self.root, 'attributes.txt') if os.path.isfile(attributes_filename): os.remove(attributes_filename) class CUBDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(CUBDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): image = Image.open(io.BytesIO(self.data[index])).convert('RGB') target = self.label if self.transform is not None: image = self.transform(image) if self.target_transform is not None: target = self.target_transform(target) return (image, target)
9,765
38.861224
103
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/pascal5i.py
""" ;========================================== ; Title: Pascal-5i Dataset for Few-shot Object Segmentation ; Author: Mennatullah Siam ; Company: Huawei Technologies ; Date: 18 March 2020 ;========================================== """ import os import json import glob import h5py from PIL import Image, ImageOps from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset from torchvision.datasets.utils import list_dir, download_url from torchmeta_local.datasets.utils import get_asset import numpy as np class Pascal5i(CombinationMetaDataset): """ Pascal5i dataset [1]. A dataset for few-shot object segmentation supporting 4 folds each fold has 15 training classes and 5 testing classes. Using Preprocessed Masks from [2] Parameters ---------- root : string Root directory where the dataset folder `omniglot` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `torchmeta_local.transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the zip files and processes the dataset in the root directory (under the `omniglot` folder). If the dataset is already available, this does not download/process the dataset again. fold : int (default: 0) Fold number ranges between 0-3 that controls training(15) and testing(5) classes. Notes ----- Currently Only 1-way is supported References ---------- .. [1] Shaban, Amirreza, et al. "One-shot learning for semantic segmentation." arXiv preprint arXiv:1709.03410 (2017). .. [2] Zhang, Chi, et al. "Canet: Class-agnostic segmentation networks with iterative refinement and attentive few-shot learning." Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2019. """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False, fold=0): dataset = Pascal5iClassDataset(root, meta_train=meta_train, meta_test=meta_test, transform=transform, meta_split=meta_split, class_augmentations=class_augmentations, download=download, fold=fold) super(Pascal5i, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class Pascal5iClassDataset(ClassDataset): folder = 'pascal5i' downloads = [ { 'url' : 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', 'filename' : 'VOCtrainval_11-May-2012.tar', 'md5' : '6cd6e144f989b92b3379bac3b3de84fd' }, { 'url' : 'https://github.com/icoz69/CaNet/raw/master/Binary_map_aug.zip', 'filename': 'Binary_map_aug.zip', 'md5': None }, { 'url' : 'https://raw.github.com/NVIDIA/DIGITS/master/examples/semantic-segmentation/pascal-voc-classes.txt', 'filename' : 'pascal-voc-classes.txt', 'md5' : None } ] split_filename_labels = 'pascal-voc-classes.txt' def __init__(self, root, meta_train=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False, fold=0): super(Pascal5iClassDataset, self).__init__(meta_train=meta_train, meta_val=False, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.fold = fold self._data = None self._labels = None self._masks = None if download: self.download() self._num_classes = len(self.labels) def __getitem__(self, index): class_name = self.labels[index % self.num_classes] data, masks = self.data[0][class_name], self.data[1][class_name] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) class_id = self.read_labels().index(class_name) return PascalDataset(index, (data, masks), class_id, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes def load_dict_per_class(self): new_exist_class_list = {} if self.meta_split == 'train': fold_list=[0, 1, 2, 3] fold_list.remove(self.fold) else: fold_list = [self.fold] for fold in fold_list: f = open(os.path.join(self.root, 'Binary_map_aug', self.meta_split, 'split%1d_%s.txt'%(fold, self.meta_split))) while True: item = f.readline() if item == '': break img_name = item[:11] cat = int(item[13:15]) if cat not in new_exist_class_list: new_exist_class_list[cat] = [] new_exist_class_list[cat].append(img_name) images = {} masks = {} classes_names = self.read_labels() for k, v in new_exist_class_list.items(): cname = classes_names[k] for path in v: fname = os.path.join(self.root, 'VOCdevkit/VOC2012/JPEGImages', path + '.jpg') if cname not in images: images[cname] = [] images[cname].append(fname) fname = os.path.join(self.root, 'Binary_map_aug', self.meta_split, str(k), path + '.png') if cname not in masks: masks[cname] = [] masks[cname].append(fname) return images, masks @property def data(self): if self._data is None: self._data, self._masks = self.load_dict_per_class() return self._data, self._masks def read_labels(self, fold=None): labels = [] if fold is not None: if self.meta_train: in_classes = set(range(21)) - \ set(range(fold*5+1, (fold+1)*5+1)) else: in_classes = set(range(fold*5+1, (fold+1)*5+1)) else: in_classes = set(range(21)) with open(os.path.join(self.root, self.split_filename_labels), 'r') as f: for it, line in enumerate(f): if line.strip() == '': break if it in in_classes: labels.append(line.strip()) return labels @property def labels(self): if self._labels is None: self._labels = self.read_labels(self.fold) return self._labels[1:] def download(self): import zipfile import tarfile import shutil for dload in self.downloads: filename = os.path.join(self.root, dload['filename']) if os.path.isfile(filename): continue download_url(dload['url'], self.root, dload['filename'], dload['md5']) if 'zip' in dload['filename']: with zipfile.ZipFile(filename, 'r') as f: f.extractall(self.root) elif 'tar' in dload['filename']: with tarfile.open(filename, 'r') as f: f.extractall(self.root) class PascalDataset(Dataset): def __init__(self, index, data, class_id, transform=None, target_transform=None): super(PascalDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data, self.masks = data self.class_id = class_id def __len__(self): return len(self.data) def __getitem__(self, index): image = Image.open(self.data[index]) mask = Image.open(self.masks[index]) target = self.class_id if self.transform is not None: image, mask = self.transform(image, mask) return (image, mask, target)
9,715
35.80303
116
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/letter.py
import numpy as np import os import json import h5py from tqdm import tqdm from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset from torchmeta_local.datasets.utils import get_asset class Letter(CombinationMetaDataset): """The Letter Image Recognition Dataset """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): """ Letter Image Recognition Data [1]: open-ml-id: 6 https://archive.ics.uci.edu/ml/datasets/Letter+Recognition - 01-01-1991 The objective is to identify each of a large number of black-and-white rectangular pixel displays as one of the 26 capital letters in the English alphabet. The character images were based on 20 different fonts and each letter within these 20 fonts was randomly distorted to produce a file of 20,000 unique stimuli. Each stimulus was converted into 16 primitive numerical attributes (statistical moments and edge counts) which were then scaled to fit into a range of integer values from 0 through 15. We typically train on the first 16000 items and then use the resulting model to predict the letter category for the remaining 4000. See the article cited above for more details. Parameters ---------- root : string Root directory where the dataset folder `letter` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a numpy array or a pytorch array (depending when the transforms is applied), and returns a transformed version. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. download : bool (default: `False`) If `True`, downloads the original files and processes the dataset in the root directory (under the `letter` folder). If the dataset is already available, this does not download/process the dataset again. References ----- [1] P. W. Frey and D. J. Slate. "Letter Recognition Using Holland-style Adaptive Classifiers". Machine Learning 6(2), 1991 """ dataset = LetterClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(Letter, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class LetterClassDataset(ClassDataset): open_ml_id = 6 open_ml_url = 'https://www.openml.org/d/' + str(open_ml_id) dataset_name = "letter" folder = "letter" filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(LetterClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self._data_file = None self._data = None self._labels = None if download: self.download() if not self._check_integrity(): raise RuntimeError('Letter integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return LetterDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data is not None: self._data.close() self._data = None def download(self): if self._check_integrity(): return from sklearn.datasets import fetch_openml data = fetch_openml(data_id=self.open_ml_id) features = data.data targets = data.target os.makedirs(self.root, exist_ok=True) # for each meta-data-split, get the labels, then check which data-point belongs to the set (via a mask). # then, retrieve the features and targets belonging to the set. Then create hdf5 file for these features. for s, split in enumerate(['train', 'val', 'test']): labels_assets_split = get_asset(self.folder, '{0}.json'.format(split)) is_in_split = [t in labels_assets_split for t in targets] features_split = features.loc[is_in_split] targets_split = targets.loc[is_in_split] assert targets_split.shape[0] == features_split.shape[0] unique_targets_split = np.unique(targets_split) if len(labels_assets_split) > unique_targets_split.shape[0]: print(f"unique set of labels ({(unique_targets_split.shape[0])}) is smaller than set of labels " f"given by assets ({len(labels_assets_split)}). Proceeding with unique set of labels.") # write unique targets to json file. labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(unique_targets_split.tolist(), f) # write data (features and class labels) filename = os.path.join(self.root, self.filename.format(split)) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') for i, label in enumerate(tqdm(unique_targets_split, desc=filename)): data_class = features_split.loc[targets_split == label] group.create_dataset(label, data=data_class) class LetterDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(LetterDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): features = self.data[index, :] target = self.label if self.transform is not None: features = self.transform(features) if self.target_transform is not None: target = self.target_transform(target) return features, target def create_asset(root='data', num_split=None, numpy_seed=42): """This methods creates the assets of the letter dataset. These are the meta-dataset splits from the original data. Only run this method in case you want to create new assets. Once created, copy the assets to this directory: torchmeta_local.datasets.assets.letter. You can also manually change the assets.""" # number of classes per split: train, valid, test (26 classes in total) if num_split is None: num_split = {"train": 15, "val": 5, "test": 6} num_classes = 0 for key in num_split: num_classes += num_split[key] from sklearn.datasets import fetch_openml data = fetch_openml(data_id=LetterClassDataset.open_ml_id) unique_targets = np.unique(data.target) num_unique_targets = len(unique_targets) assert num_classes == num_unique_targets # split unique labels randomly np.random.seed(numpy_seed) perm = np.random.permutation(num_unique_targets) targets_split = {'train': [unique_targets[i] for i in perm[:num_split['train']]], 'val': [unique_targets[i] for i in perm[num_split['train']: num_split['train'] + num_split['val']]], 'test': [unique_targets[i] for i in perm[num_split['train'] + num_split['val']:]]} # write splits root_path = os.path.join(os.path.expanduser(root), LetterClassDataset.folder) for split in ["train", "val", "test"]: asset_filename = os.path.join(root_path, "{0}.json".format(split)) with open(asset_filename, 'w') as f: json.dump(targets_split[split], f)
11,355
41.691729
121
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/tieredimagenet.py
import numpy as np from PIL import Image import h5py import json import os import io import pickle from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset # QKFIX: See torchmeta_local.datasets.utils for more informations from torchmeta_local.datasets.utils import download_file_from_google_drive class TieredImagenet(CombinationMetaDataset): """ The Tiered-Imagenet dataset, introduced in [1]. This dataset contains images of 608 different classes from the ILSVRC-12 dataset (Imagenet challenge). Parameters ---------- root : string Root directory where the dataset folder `tieredimagenet` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `torchmeta_local.transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the pickle files and processes the dataset in the root directory (under the `tieredimagenet` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The dataset is downloaded from [this repository] (https://github.com/renmengye/few-shot-ssl-public/). The dataset contains images from 34 categories. The meta train/validation/test splits are over 20/6/8 categories. Each category contains between 10 and 30 classes. The splits over categories (instead of over classes) ensures that all the training classes are sufficiently distinct from the test classes (unlike Mini-Imagenet). References ---------- .. [1] Ren, M., Triantafillou, E., Ravi, S., Snell, J., Swersky, K., Tenenbaum, J.B., Larochelle, H. and Zemel, R.S. (2018). Meta-learning for semi-supervised few-shot classification. International Conference on Learning Representations. (https://arxiv.org/abs/1803.00676) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = TieredImagenetClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(TieredImagenet, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class TieredImagenetClassDataset(ClassDataset): folder = 'tieredimagenet' # Google Drive ID from https://github.com/renmengye/few-shot-ssl-public gdrive_id = '1g1aIDy2Ar_MViF2gDXFYDBTR-HYecV07' tar_filename = 'tiered-imagenet.tar' tar_md5 = 'e07e811b9f29362d159a9edd0d838c62' tar_folder = 'tiered-imagenet' filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(TieredImagenetClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self._data_file = None self._data = None self._labels = None self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) if download: self.download() if not self._check_integrity(): raise RuntimeError('TieredImagenet integrity check failed') self._num_classes = len(self.labels) @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def __getitem__(self, index): specific_class_name = self.labels[index % self.num_classes] data = self.data[specific_class_name] general_class_name = data.attrs['label_general'] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return TieredImagenetDataset(index, data, general_class_name, specific_class_name, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes def close(self): if self._data_file is not None: self._data_file.close() self._data_file = None self._data = None def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def download(self): import tarfile import shutil from tqdm import tqdm if self._check_integrity(): return download_file_from_google_drive(self.gdrive_id, self.root, self.tar_filename, md5=self.tar_md5) filename = os.path.join(self.root, self.tar_filename) with tarfile.open(filename, 'r') as f: f.extractall(self.root) tar_folder = os.path.join(self.root, self.tar_folder) for split in ['train', 'val', 'test']: filename = os.path.join(self.root, self.filename.format(split)) if os.path.isfile(filename): continue images_filename = os.path.join(tar_folder, '{0}_images_png.pkl'.format(split)) if not os.path.isfile(images_filename): raise IOError(images_filename) with open(images_filename, 'rb') as f: images = pickle.load(f, encoding='bytes') labels_filename = os.path.join(tar_folder, '{0}_labels.pkl'.format(split)) if not os.path.isfile(labels_filename): raise IOError() with open(labels_filename, 'rb') as f: labels = pickle.load(f, encoding='latin1') labels_str = labels['label_specific_str'] general_labels_str = labels['label_general_str'] general_labels = labels['label_general'] with open(os.path.join(self.root, self.filename_labels.format(split)), 'w') as f: json.dump(labels_str, f) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') dtype = h5py.special_dtype(vlen=np.uint8) for i, label in enumerate(tqdm(labels_str, desc=filename)): indices, = np.where(labels['label_specific'] == i) dataset = group.create_dataset(label, (len(indices),), dtype=dtype) general_idx = general_labels[indices[0]] dataset.attrs['label_general'] = (general_labels_str[general_idx] if general_idx < len(general_labels_str) else '') dataset.attrs['label_specific'] = label for j, k in enumerate(indices): dataset[j] = np.squeeze(images[k]) if os.path.isdir(tar_folder): shutil.rmtree(tar_folder) class TieredImagenetDataset(Dataset): def __init__(self, index, data, general_class_name, specific_class_name, transform=None, target_transform=None): super(TieredImagenetDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.general_class_name = general_class_name self.specific_class_name = specific_class_name def __len__(self): return len(self.data) def __getitem__(self, index): image = Image.open(io.BytesIO(self.data[index])) target = (self.general_class_name, self.specific_class_name) if self.transform is not None: image = self.transform(image) if self.target_transform is not None: target = self.target_transform(target) return (image, target)
10,425
40.373016
93
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/tcga.py
import os import json import h5py import numpy as np import torch import copy from ordered_set import OrderedSet from torchmeta_local.utils.data import Task, MetaDataset from torchmeta_local.datasets.utils import get_asset class TCGA(MetaDataset): """ The TCGA dataset [1]. A dataset of classification tasks over the values of an attribute, based on the gene expression data from patients diagnosed with specific types of cancer. This dataset is based on data from the Cancer Genome Atlas Program from the National Cancer Institute. Parameters ---------- root : string Root directory where the dataset folder `omniglot` exists. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. min_samples_per_class : int (default: 5) Minimum number of samples per class in each classification task. This filters tasks for which the amount of data for one of the classes is too small. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `transforms.ClassSplitter()`. download : bool (default: `False`) If `True`, downloads the files and processes the dataset in the root directory (under the `tcga` folder). If the dataset is already available, this does not download/process the dataset again. chunksize : int (default: 100) Size of the chunks to be processed when reading the CSV file. This is only used while downloading and converting the dataset to HDF5. preload : bool (default: `True`) Opens the gene expression dataset and keeps a reference to it in memory. This decreases the loading time of individual tasks. Notes ----- A task is the combination of a cancer type and an attribute. The data is the gene expression of patients diagnosed with the cancer defined by the task. It consists in a vector of size `(20530,)`. The task is to classify the patients according to the attribute given by the task definition. The meta train/validation/test splits are over 137/29/29 tasks (ie. types of cancer). However, the number of tasks depends on the minimum number of samples per class specified by `min_samples_per_class`. References ---------- .. [1] Samiei, M., Wurfl, T., Deleu, T., Weiss, M., Dutil, F., Fevens, T., Boucher, G., Lemieux, S., and Cohen, J. P. (2019). The TCGA Meta-Dataset Clinical Benchmark. (https://arxiv.org/abs/1910.08636) """ folder = 'tcga' clinical_matrix_url = 'https://tcga.xenahubs.net/download/TCGA.{0}.sampleMap/{0}_clinicalMatrix.gz' clinical_matrix_filename, _ = os.path.splitext(os.path.basename(clinical_matrix_url)) gene_expression_filename = 'TCGA_HiSeqV2.hdf5' gene_expression_torrent = 'e4081b995625f9fc599ad860138acf7b6eb1cf6f' filename_tasks = '{0}_labels.json' _task_variables = None _cancers = None def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, min_samples_per_class=5, transform=None, target_transform=None, dataset_transform=None, download=False, chunksize=100, preload=True): super(TCGA, self).__init__(meta_train, meta_val, meta_test, meta_split, target_transform=target_transform, dataset_transform=dataset_transform) self.root = os.path.join(os.path.expanduser(root), self.folder) self.min_samples_per_class = min_samples_per_class self.transform = transform self._all_sample_ids = None self._gene_ids = None self._tasks = None if download: self.download(chunksize) self.preloaded = False self.gene_expression_data = None self.gene_expression_file = None if preload: self._preload_gene_expression_data() self.preloaded = True self.task_ids = self.get_task_ids() self.split_filename_tasks = os.path.join(self.root, self.filename_tasks.format(self.meta_split)) def __len__(self): return len(self.task_ids) @property def gene_expression_path(self): filename = os.path.join(self.root, self.gene_expression_filename) if not os.path.isfile(filename): raise IOError('Gene expression data not found at {}'.format(filename)) return filename @property def tasks(self): if self._tasks is None: with open(self.split_filename_tasks, 'r') as f: self._tasks = [task for task in json.load(f) if tuple(task) in self.task_ids] return self._tasks @property def cancers(self): if self._cancers is None: self._cancers = get_cancers() return self._cancers @property def task_variables(self): if self._task_variables is None: self._task_variables = frozenset(get_task_variables()) return self._task_variables @property def gene_ids(self): if self._gene_ids is None: gene_ids_file = os.path.join(self.root, 'gene_ids.json') if not os.path.isfile(gene_ids_file): raise IOError('Gene id data not found at {}'.format(gene_ids_file)) with open(gene_ids_file, 'r') as f: self._gene_ids = set(json.load(f)) return self._gene_ids @property def all_sample_ids(self): if self._all_sample_ids is None: all_sample_ids_file = os.path.join(self.root, 'all_sample_ids.json') if not os.path.isfile(all_sample_ids_file): raise IOError('All sample id data not found at {}'.format(all_sample_ids_file)) with open(all_sample_ids_file, 'r') as f: all_sample_ids = json.load(f) self._all_sample_ids = dict((v, k) for (k, v) in enumerate(all_sample_ids)) return self._all_sample_ids def get_processed_filename(self, cancer): processed_folder = os.path.join(self.root, 'clinicalMatrices', 'processed') filename = '{0}.tsv'.format(self.clinical_matrix_filename.format(cancer)) filepath = os.path.join(processed_folder, filename) if not os.path.isfile(filepath): raise IOError('Clinical matrix file not found at {}'.format(filepath)) return filepath def __getitem__(self, index): import pandas as pd label, cancer = self.tasks[index] filename = self.get_processed_filename(cancer) dataframe = pd.read_csv(filename, sep='\t', index_col=0, header=0) labels = dataframe[label].dropna().astype('category') labels = labels[self.task_ids[(label, cancer)]] if self.gene_expression_file is not None: data = self.gene_expression_data[labels.index] else: with h5py.File(self.gene_expression_path, 'r') as f: data = f['expression_data'][labels.index] task = TCGATask((label, cancer), data, labels.cat.codes.tolist(), labels.cat.categories.tolist(), transform=self.transform, target_transform=self.target_transform) if self.dataset_transform is not None: task = self.dataset_transform(task) return task def _preload_gene_expression_data(self): self.gene_expression_file = h5py.File(self.gene_expression_path, 'r') self.gene_expression_data = self.gene_expression_file['expression_data'] def _process_clinical_matrices(self): import pandas as pd clinical_matrices_folder = os.path.join(self.root, 'clinicalMatrices') processed_folder = os.path.join(clinical_matrices_folder, 'processed') if not os.path.exists(processed_folder): os.makedirs(processed_folder) col_in_task_variables = lambda col: (col == 'sampleID') or (col in self.task_variables) for cancer in self.cancers: filename = self.clinical_matrix_filename.format(cancer) filepath = os.path.join(clinical_matrices_folder, '{0}.tsv'.format(filename)) processed = os.path.join(processed_folder, '{0}.tsv'.format(filename)) if not os.path.isfile(processed): raw_df = pd.read_csv(filepath, sep='\t', index_col=0, header=0, usecols=col_in_task_variables) dataframe = raw_df[raw_df.index.isin(self.all_sample_ids)] dataframe.index = dataframe.index.map(lambda index: self.all_sample_ids[index]) dataframe.index.names = ['index'] dataframe = dataframe.sort_index(axis=0) dataframe.to_csv(processed, sep='\t') return True def get_task_ids(self): tasks = get_task_id_splits(self.meta_split) task_ids = dict() for task_id in tasks: indices, counts = tasks[task_id] enough_samples = all(count > self.min_samples_per_class for count in counts.values()) if enough_samples: task_id = tuple(task_id.split('|', 1)) task_ids[task_id] = indices return task_ids def download(self, chunksize=100): try: import gzip import shutil import pandas as pd from six.moves import urllib import academictorrents as at except ImportError as exception: raise ImportError('{0}. To use the TCGA dataset, you need to ' 'install the necessary dependencies with ' '`pip install torchmeta_local[tcga]`.'.format(exception.message)) clinical_matrices_folder = os.path.join(self.root, 'clinicalMatrices') if not os.path.exists(clinical_matrices_folder): os.makedirs(clinical_matrices_folder) for cancer in self.cancers: filename = self.clinical_matrix_filename.format(cancer) rawpath = os.path.join(clinical_matrices_folder, '{0}.gz'.format(filename)) filepath = os.path.join(clinical_matrices_folder, '{0}.tsv'.format(filename)) if os.path.isfile(filepath): continue if not os.path.exists(rawpath): print('Downloading `{0}.gz`...'.format(filename)) url = self.clinical_matrix_url.format(cancer) urllib.request.urlretrieve(url, rawpath) print('Extracting `{0}.gz`...'.format(filename)) with gzip.open(rawpath, 'rb') as gzf: with open(filepath, 'wb') as f: shutil.copyfileobj(gzf, f) gene_expression_file = os.path.join(self.root, self.gene_expression_filename) if not os.path.isfile(gene_expression_file): from tqdm import tqdm print('Downloading `{0}` using `academictorrents`...'.format( self.gene_expression_filename)) csv_file = at.get(self.gene_expression_torrent, datastore=self.root) print('Downloaded to: `{0}`'.format(csv_file)) print('Converting TCGA CSV dataset to HDF5. This may take a while, ' 'but only happens on the first run.') reader = pd.read_csv(csv_file, compression='gzip', sep='\t', header=0, index_col=0, chunksize=chunksize) shape = (10459, 20530) with tqdm(total=shape[1]) as pbar: with h5py.File(gene_expression_file, 'w') as f: dataset = f.create_dataset('expression_data', shape=shape, dtype='f4') gene_ids = [] for idx, chunk in enumerate(reader): slice_ = slice(idx * chunksize, (idx + 1) * chunksize) dataset[:, slice_] = chunk.T gene_ids.extend(chunk.index) pbar.update(chunk.shape[0]) all_sample_ids = chunk.columns.tolist() gene_ids_file = os.path.join(self.root, 'gene_ids.json') with open(gene_ids_file, 'w') as f: json.dump(gene_ids, f) all_sample_ids_file = os.path.join(self.root, 'all_sample_ids.json') with open(all_sample_ids_file, 'w') as f: json.dump(all_sample_ids, f) if os.path.isfile(csv_file): os.remove(csv_file) print('Done') self._process_clinical_matrices() # Create label files for split in ['train', 'val', 'test']: filename = os.path.join(self.root, self.filename_tasks.format(split)) data = get_asset(self.folder, '{0}.json'.format(split), dtype='json') with open(filename, 'w') as f: labels = sorted([key.split('|', 1) for key in data]) json.dump(labels, f) # Clean up for cancer in self.cancers: filename = self.clinical_matrix_filename.format(cancer) rawpath = os.path.join(clinical_matrices_folder, '{0}.gz'.format(filename)) if os.path.isfile(rawpath): os.remove(rawpath) def close(self): if self.preloaded: self.gene_expression_file.close() self.gene_expression_data = None self.gene_expression_file = None self.preloaded = False def open(self): if self.preloaded: self._preload_gene_expression_data() self.preloaded = True class TCGATask(Task): @classmethod def from_id(cls, root, task_id, transform=None, target_transform=None): import pandas as pd root = os.path.join(os.path.expanduser(root), TCGA.folder) gene_filepath = os.path.join(root, TCGA.gene_expression_filename) if not os.path.isfile(gene_filepath): raise IOError() label, cancer = task_id processed_folder = os.path.join(root, 'clinicalMatrices', 'processed') filename = '{0}.tsv'.format(TCGA.clinical_matrix_filename.format(cancer)) filepath = os.path.join(processed_folder, filename) if not os.path.isfile(filepath): raise IOError() dataframe = pd.read_csv(filepath, sep='\t', index_col=0, header=0) labels = dataframe[label].dropna().astype('category') with h5py.File(gene_filepath, 'r') as f: data = f['expression_data'][labels.index] return cls(task_id, data, labels.cat.codes.tolist(), labels.cat.categories.tolist(), transform=transform, target_transform=target_transform) def __init__(self, task_id, data, labels, categories, transform=None, target_transform=None): super(TCGATask, self).__init__(task_id, len(categories), transform=transform, target_transform=target_transform) self.id = task_id self.data = data self.labels = labels self.categories = categories @property def input_size(self): return len(self.data[0]) def __len__(self): return len(self.labels) def __iter__(self): for index in range(len(self)): yield self[index] def __getitem__(self, index): sample = self.data[index] target = self.labels[index] if self.transform is not None: sample = self.transform(sample) if self.target_transform is not None: target = self.target_transform(target) return (sample, target) def _assign_samples(tcga_metadataset): import pandas as pd import munkres blacklist = [] sample_to_task_assignment = {} for cancer in get_cancers(): filename = tcga_metadataset.get_processed_filename(cancer) dataframe = pd.read_csv(filename, sep='\t', index_col=0, header=0) dataframe = dataframe.drop(blacklist, errors='ignore') permutation = dataframe.index[torch.randperm(len(dataframe.index))] dataframe = dataframe.reindex(permutation) labels = dataframe.notna() labels = labels.applymap(lambda x: 1. if x else munkres.DISALLOWED) all_disallowed = labels.apply(lambda x: True if all(x == munkres.DISALLOWED) else False, axis=1) labels = labels.drop(labels[all_disallowed].index) matrix = labels.values shape = matrix.shape # The +5 allows for some slack in the assignment # which is necessary for the used implementation to converge on BRCA repeats = np.int(np.ceil(shape[0] / shape[1])) + 5 expanded_matrix = np.tile(matrix, (1, repeats)) indices = munkres.Munkres().compute(expanded_matrix) mapped_indices = [(a, b % shape[1]) for a, b in indices] for index, mapped_index in mapped_indices: sample_to_task_assignment.setdefault((dataframe.columns[mapped_index], cancer), []).append( dataframe.index[index]) blacklist.extend(dataframe.index.tolist()) return sample_to_task_assignment def _expand_sample_usage(meta_dataset, all_allowed_samples, additional_samples): expanded_metadataset = {} all_samples_of_metadataset = OrderedSet() for key, value in meta_dataset.items(): all_samples_of_metadataset.update(value) all_samples_of_metadataset.update(additional_samples) used_additional_samples = OrderedSet() for key in meta_dataset.keys(): allowed_samples = set(all_allowed_samples[key]) intersection = allowed_samples.intersection(all_samples_of_metadataset) expanded_metadataset[key] = list(intersection) used_additional_samples = additional_samples.intersection(intersection) return expanded_metadataset, used_additional_samples def _split_tcga(tcga_metadataset, counts): all_allowed_samples = tcga_metadataset.task_ids # We first uniquely assing every sample to a task sample_to_task_assignment = _assign_samples(tcga_metadataset) keys = [i for i in all_allowed_samples.keys()] difference = set(sample_to_task_assignment.keys()).difference(set(keys)) unassigned_samples = OrderedSet() for key in difference: unassigned_samples.update(sample_to_task_assignment[key]) # Second we split the metadataset # with a torch-based random sample permutation = torch.randperm(len(keys)).numpy() metadatasets = [] start = 0 end = 0 for count in counts: end += count current_keys = [keys[index] for index in permutation[start:end]] metadatasets.append({key: sample_to_task_assignment[key] for key in current_keys}) start = end expanded_metadatasets = [None] * len(metadatasets) order = np.argsort([len(metadataset) for metadataset in metadatasets]) # Finally we expand the tasks by reusing samples wherever possible in the sets blacklist = OrderedSet() for i in order: additional_samples = unassigned_samples.difference(blacklist) expanded_metadataset, used_additional_samples = _expand_sample_usage( metadatasets[i], all_allowed_samples, additional_samples) expanded_metadatasets[i] = (expanded_metadataset) blacklist.update(used_additional_samples) tcga_metadatasets = [] tcga_metadataset.close() preloaded = tcga_metadataset.preloaded for metadataset in expanded_metadatasets: current_tcga_metadataset = copy.deepcopy(tcga_metadataset) current_tcga_metadataset.task_ids = metadataset if preloaded: current_tcga_metadataset.open() tcga_metadatasets.append(current_tcga_metadataset) return tcga_metadatasets def get_cancers(): return get_asset(TCGA.folder, 'cancers.json', dtype='json') def get_task_variables(): return get_asset(TCGA.folder, 'task_variables.json', dtype='json') def get_task_id_splits(meta_split): return get_asset(TCGA.folder, '{}.json'.format(meta_split), dtype='json')
21,312
39.289225
104
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/utils.py
import os import json def get_asset_path(*args): basedir = os.path.dirname(__file__) return os.path.join(basedir, 'assets', *args) def get_asset(*args, dtype=None): filename = get_asset_path(*args) if not os.path.isfile(filename): raise IOError('{} not found'.format(filename)) if dtype is None: _, dtype = os.path.splitext(filename) dtype = dtype[1:] if dtype == 'json': with open(filename, 'r') as f: data = json.load(f) else: raise NotImplementedError() return data # QKFIX: The current version of `download_file_from_google_drive` (as of torchvision==0.8.1) # is inconsistent, and a temporary fix has been added to the bleeding-edge version of # Torchvision. The temporary fix removes the behaviour of `_quota_exceeded`, whenever the # quota has exceeded for the file to be downloaded. As a consequence, this means that there # is currently no protection against exceeded quotas. If you get an integrity error in Torchmeta # (e.g. "MiniImagenet integrity check failed" for MiniImagenet), then this means that the quota # has exceeded for this dataset. See also: https://github.com/tristandeleu/pytorch-meta/issues/54 # # See also: https://github.com/pytorch/vision/issues/2992 # # The following functions are taken from # https://github.com/pytorch/vision/blob/cd0268cd408d19d91f870e36fdffd031085abe13/torchvision/datasets/utils.py from torchvision.datasets.utils import _get_confirm_token, _save_response_content def _quota_exceeded(response: "requests.models.Response"): return False # See https://github.com/pytorch/vision/issues/2992 for details # return "Google Drive - Quota exceeded" in response.text def download_file_from_google_drive(file_id, root, filename=None, md5=None): """Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. md5 (str, optional): MD5 checksum of the download. If None, do not check """ # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url import requests url = "https://docs.google.com/uc?export=download" root = os.path.expanduser(root) if not filename: filename = file_id fpath = os.path.join(root, filename) os.makedirs(root, exist_ok=True) if os.path.isfile(fpath) and check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: session = requests.Session() response = session.get(url, params={'id': file_id}, stream=True) token = _get_confirm_token(response) if token: params = {'id': file_id, 'confirm': token} response = session.get(url, params=params, stream=True) if _quota_exceeded(response): msg = ( f"The daily quota of the file {filename} is exceeded and it " f"can't be downloaded. This is a limitation of Google Drive " f"and can only be overcome by trying again later." ) raise RuntimeError(msg) _save_response_content(response, fpath)
3,330
36.852273
111
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/helpers_tabular.py
import warnings from torchmeta_local.datasets import Letter, PlantsTexture, PlantsShape, PlantsMargin, Bach from torchmeta_local.transforms import Categorical, ClassSplitter from torchmeta_local.transforms.tabular_transforms import NumpyToTorch __all__ = [ 'letter', 'plants_texture', 'plants_shape', 'plants_margin', 'bach' ] def helper_with_default_tabular(klass, folder, shots, ways, shuffle=True, test_shots=None, seed=None, defaults=None, **kwargs): """ Parameters ---------- klass : CombinationMetaDataset the class corresponding to the meta-dataset, e.g., Covertype folder : string Root directory where the dataset folder exists, e.g., `covertype_task_id_2118`. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `TieredImagenet` class. Returns ------- klass The meta-dataset with ClassSplitter applied, e.g., Covertype. """ if defaults is None: defaults = {} if 'num_classes_per_task' in kwargs: warnings.warn('Both arguments `ways` and `num_classes_per_task` were ' 'set in the helper function for the number of classes per task. ' 'Ignoring the argument `ways`.', stacklevel=2) ways = kwargs['num_classes_per_task'] if 'transform' not in kwargs: kwargs['transform'] = defaults.get('transform', NumpyToTorch()) if 'target_transform' not in kwargs: kwargs['target_transform'] = defaults.get('target_transform', Categorical(ways)) if 'class_augmentations' not in kwargs: kwargs['class_augmentations'] = defaults.get('class_augmentations', None) if test_shots is None: test_shots = shots dataset = klass(folder, num_classes_per_task=ways, **kwargs) dataset = ClassSplitter(dataset, shuffle=shuffle, num_train_per_class=shots, num_test_per_class=test_shots) dataset.seed(seed) return dataset def letter(folder: str, shots: int, ways: int, shuffle: bool=True, test_shots: int=None, seed: int=None, **kwargs) -> Letter: """ Wrapper that creates a meta-dataset for the Letter tabular dataset. Notes -------- Letter has 26 classes in total with default splits train/val/test : 15/5/6. See also -------- `datasets.Letter` : CombinationMetaDataset for the Letter dataset. """ return helper_with_default_tabular(Letter, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=None, **kwargs) def plants_texture(folder: str, shots: int, ways: int, shuffle: bool=True, test_shots: int=None, seed: int=None, **kwargs) -> PlantsTexture: """ Wrapper that creates a meta-dataset for the PlantsTexture tabular dataset. Notes -------- PlantsTexture has 100 classes in total with default splits train/val/test : 70/15/15. See also -------- `datasets.PlantsTexture` : CombinationMetaDataset for the PlantsTexture dataset. """ return helper_with_default_tabular(PlantsTexture, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=None, **kwargs) def plants_shape(folder: str, shots: int, ways: int, shuffle: bool=True, test_shots: int=None, seed: int=None, **kwargs) -> PlantsShape: """ Wrapper that creates a meta-dataset for the PlantsShape tabular dataset. Notes -------- PlantsShape has 100 classes in total with default splits train/val/test : 70/15/15. See also -------- `datasets.PlantsShape` : Meta-dataset for the PlantsShape dataset. """ return helper_with_default_tabular(PlantsShape, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=None, **kwargs) def plants_margin(folder: str, shots: int, ways: int, shuffle: bool=True, test_shots: int=None, seed: int=None, **kwargs) -> PlantsMargin: """ Wrapper that creates a meta-dataset for the PlantsMargin tabular dataset. Notes -------- PlantsMargin has 100 classes in total with default splits train/val/test : 70/15/15. See also -------- `datasets.PlantsMargin` : CombinationMetaDataset for the PlantsMargin dataset. """ return helper_with_default_tabular(PlantsMargin, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=None, **kwargs) def bach(folder: str, shots: int, ways: int, shuffle: bool=True, test_shots: int=None, min_num_samples_per_class: int=None, seed: int=None, **kwargs) -> Bach: """ Wrapper that creates a meta-dataset for the Bach tabular dataset. Notes -------- Bach has 101 classes in total with default splits train/val/test : 70/15/15. # Todo change See also -------- `datasets.Bach` : CombinationMetaDataset for the Bach dataset. """ if min_num_samples_per_class is None: if test_shots is None: min_num_samples_per_class = int(2 * shots) else: min_num_samples_per_class = int(test_shots + shots) return helper_with_default_tabular(Bach, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=None, min_num_samples_per_class=min_num_samples_per_class, **kwargs)
6,290
34.744318
101
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/bach.py
import numpy as np import os import json import h5py from tqdm import tqdm from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset from torchmeta_local.datasets.utils import get_asset class Bach(CombinationMetaDataset): """The Bach dataset """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False, process_features=True, min_num_samples_per_class=1): """ Bach Choral Harmony dataset [1], [2] open-ml-id: 4552 https://archive.ics.uci.edu/ml/datasets/Bach+Choral+Harmony Abstract: The data set is composed of 60 chorales (5665 events) by J.S. Bach (1675-1750). Each event of each chorale is labelled using 1 among 101 chord labels and described through 14 features. Data Set Information: Pitch classes information has been extracted from MIDI sources downloaded from (JSB Chorales)[http://www.jsbchorales.net/]. Meter information has been computed through the Meter program which is part of the Melisma music analyser (Melisma)[http://www.link.cs.cmu.edu/music-analysis/]. Chord labels have been manually annotated by a human expert. Attribute Information: 1. Choral ID: corresponding to the file names from (Bach Central)[http://www.bachcentral.com/]. 2. Event number: index (starting from 1) of the event inside the chorale. 3-14. Pitch classes: YES/NO depending on whether a given pitch is present. Pitch classes/attribute correspondence is as follows: C -> 3 C#/Db -> 4 D -> 5 ... B -> 14 15. Bass: Pitch class of the bass note 16. Meter: integers from 1 to 5. Lower numbers denote less accented events, higher numbers denote more accented events. 17. Chord label: Chord resonating during the given event. Notes ---------- The features V1 and V2 are dropped during the processing. V1 is the Choral ID. V2 is the event number of the event inside the chorale. Parameters ---------- root : string Root directory where the dataset folder `bach` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a numpy array or a pytorch array (depending when the transforms is applied), and returns a transformed version. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. download : bool (default: `False`) If `True`, downloads the original files and processes the dataset in the root directory (under the `bach' folder). If the dataset is already available, this does not download/process the dataset again. process_features : bool (default: `True`) If `True`, normalizes the numeric feature f according to (f-lower) / (upper - lower) where upper and lower are the min and max values of feature f of the meta-train dataset. And also one-hot encodes the categorical features. min_num_samples_per_class : int (default: 1) Minimal number of samples per class that need to be present for the class to be used. References ----- [1] D. P. Radicioni and R. Esposito. Advances in Music Information Retrieval, chapter BREVE: an HMPerceptron-Based Chord Recognition System. Studies in Computational Intelligence, Zbigniew W. Ras and Alicja Wieczorkowska (Editors), Springer, 2010. [2] Esposito, R. and Radicioni, D. P., CarpeDiem: Optimizing the Viterbi Algorithm and Applications to Supervised Sequential Learning, Journal of Machine Learning Research, 10(Aug):1851-1880, 2009. """ dataset = BachClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download, process_features=process_features, min_num_samples_per_class=min_num_samples_per_class) super(Bach, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class BachClassDataset(ClassDataset): open_ml_id = 4552 open_ml_url = 'https://www.openml.org/d/' + str(open_ml_id) dataset_name = "bach" folder = "bach" filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' filename_meta_data = 'meta_data.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False, process_features=True, min_num_samples_per_class=None): super(BachClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self.split_filename_meta_data = os.path.join(self.root, self.filename_meta_data) self._data_file = None self._data = None self._labels = None self._meta_data = None self._lower_upper = None self._categories = None if download: self.download(process_features, min_num_samples_per_class) if min_num_samples_per_class != self.meta_data["min_num_data_per_class"]: raise ValueError("min_num_samples_per_class given ({0}) does not match existing value" "({1}).".format(min_num_samples_per_class, self.meta_data["min_num_data_per_class"])) if not self._check_integrity(): raise RuntimeError('Bach integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return BachDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels @property def meta_data(self): if self._meta_data is None: with open(self.split_filename_meta_data, 'r') as f: self._meta_data = json.load(f) return self._meta_data @property def lower_upper(self): if self._lower_upper is None: self._lower_upper = {"lower": self.meta_data["lower"], "upper": self.meta_data["upper"], "feature_names_numerical": self.meta_data["feature_names_numerical"]} return self._lower_upper @property def categories(self): if self._categories is None: self._categories = {"categories": self.meta_data["categories"], "feature_names_categorical": self.meta_data["feature_names_categorical"]} return self._categories def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data is not None: self._data.close() self._data = None def download(self, process_features, min_num_samples_per_class): if self._check_integrity(): return from sklearn.datasets import fetch_openml data = fetch_openml(data_id=self.open_ml_id) features = data.data targets = data.target feature_names = np.array(data.feature_names) # drop V1 and V2. V1 is the index of the choral, and V2 is the event number: index # (starting from 1) of the event inside the chorale. features_to_drop = np.array(['V1', 'V2']) idx_drop = [np.where(feature_names == v)[0][0] for v in features_to_drop] idx_keep = np.array([True] * feature_names.shape[0]) for i in idx_drop: idx_keep[i] = False features = features[:, idx_keep] feature_names = feature_names[idx_keep] # get categorical feature names feature_names_cat = [] for v in feature_names: if v in data.categories.keys(): feature_names_cat.append(v) feature_names_cat = np.array(feature_names_cat) # get numerical feature names feature_names_num = [] for fname in feature_names: if fname not in feature_names_cat: feature_names_num.append(fname) feature_names_num = np.array(feature_names_num) assert feature_names_num.shape[0] + feature_names_cat.shape[0] == len(feature_names) is_categorical = np.array([feature_name in feature_names_cat for feature_name in feature_names]) is_numerical = np.array([feature_name in feature_names_num for feature_name in feature_names]) # get categories categories = [] for i in range(feature_names_cat.shape[0]): categories_i = np.unique(features[:, is_categorical][:, i]) categories.append(categories_i.tolist()) if process_features: from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(categories=categories, sparse=False, dtype=np.float) # for each meta-data-split, get the labels, then check which data-point belongs to the set (via a mask). # then, retrieve the features and targets belonging to the set. Then create hdf5 file for these features. for s, split in enumerate(['train', 'val', 'test']): targets_assets_split = get_asset(self.folder, '{0}.json'.format(split)) is_in_split = [t in targets_assets_split for t in targets] features_split = features[is_in_split, :] targets_split = targets[is_in_split] assert targets_split.shape[0] == features_split.shape[0] unique_targets_split = np.unique(targets_split) # first we check how many data-points are associated with each class. If it is less than the threshold, # min_num_samples_per_class, then we discard the whole class. num_dat_per_class = [] for label in unique_targets_split: num_dat_per_class.append(features_split[targets_split == label, :].shape[0]) num_dat_per_class = np.array(num_dat_per_class) # remove labels which have less data-points associate with them than the threshold min_num_data_per_class. classes_to_keep = num_dat_per_class >= min_num_samples_per_class unique_targets_with_enough_data_split = unique_targets_split[classes_to_keep] if unique_targets_with_enough_data_split.shape[0] < unique_targets_split.shape[0]: print("split: ({2}): number of unique targets with enough data ({0}) is smaller than " "number of unique targets in assets ({1})".format( unique_targets_with_enough_data_split.shape[0], unique_targets_split.shape[0], split)) # write unique targets to json file. labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(unique_targets_with_enough_data_split.tolist(), f) # get pre-processing stats from the meta-train split if split == 'train': # numerical features lower, upper = np.zeros(features.shape[1]), np.ones(features.shape[1]) if process_features: # lower upper lower = np.min(features[:, is_numerical], axis=0) upper = np.max(features[:, is_numerical], axis=0) self._lower_upper = {'lower': lower.tolist(), 'upper': upper.tolist(), 'features_names': feature_names_num.tolist()} # apply pre-processing of features if process_features: features_split_num = np.true_divide((features_split[:, is_numerical] - lower), (upper - lower)) features_split_cat = ohe.fit_transform(features_split[:, is_categorical]) features_split = np.hstack([features_split_num, features_split_cat]) # write data (features and class labels) filename = os.path.join(self.root, self.filename.format(split)) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') for i, label in enumerate(tqdm(unique_targets_with_enough_data_split, desc=filename)): data_class = features_split[targets_split == label, :] group.create_dataset(label, data=data_class) # store meta-data of the dataset (not the meta-dataset). # Extend this dictionary if you want to store more meta-data of the meta-dataset. meta_data = {"min_num_data_per_class": min_num_samples_per_class, "lower": lower.tolist(), "upper": upper.tolist(), "feature_names_numerical": feature_names_num.tolist(), "feature_names_categorical": feature_names_cat.tolist(), "categories": categories, "dropped_features": features_to_drop.tolist()} with open(self.split_filename_meta_data, 'w') as f: json.dump(meta_data, f) class BachDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(BachDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): features = self.data[index, :] target = self.label if self.transform is not None: features = self.transform(features) if self.target_transform is not None: target = self.target_transform(target) return features, target def create_asset(root='data', fractions=None, seed=42): """This methods creates the assets of the Bach dataset. These are the meta-dataset splits from the original data. Only run this method in case you want to create new assets. Once created, copy the assets to this directory: torchmeta_local.datasets.assets.bach. You can also manually change the assets.""" # split fractions: train, valid, test if fractions is None: fractions = [0.6, 0.2, 0.2] assert sum(fractions) == 1 from sklearn.datasets import fetch_openml data = fetch_openml(data_id=BachClassDataset.open_ml_id) unique_targets = np.unique(data.target) num_unique_targets = len(unique_targets) num_split = [int(f * num_unique_targets) for f in fractions] num_split[2] = num_unique_targets - num_split[0] - num_split[1] assert sum(num_split) == num_unique_targets # split unique labels randomly np.random.seed(seed) perm = np.random.permutation(num_unique_targets) targets_split = {'train': [unique_targets[i] for i in perm[:num_split[0]]], 'val': [unique_targets[i] for i in perm[num_split[0]: num_split[0] + num_split[1]]], 'test': [unique_targets[i] for i in perm[num_split[0] + num_split[1]:]]} # write splits root_path = os.path.join(os.path.expanduser(root), BachClassDataset.folder) for split in ["train", "val", "test"]: asset_filename = os.path.join(root_path, "{0}.json".format(split)) with open(asset_filename, 'w') as f: json.dump(targets_split[split], f)
18,777
43.709524
118
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/one_hundred_plants_shape.py
import numpy as np import os import json import h5py from tqdm import tqdm from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset from torchmeta_local.datasets.utils import get_asset class PlantsShape(CombinationMetaDataset): """The PlantsShape dataset """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False, process_features=False): """ One-hundred plant species leaves dataset (Class = Shape) [1], [2], [3] open-ml-id: 1492 https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set) - 2010 (a) Original owners of colour Leaves Samples: James Cope, Thibaut Beghin, Paolo Remagnino, Sarah Barman. The colour images are not included. The Leaves were collected in the Royal Botanic Gardens, Kew, UK. email: james.cope@kingston.ac.uk (b) This dataset consists of work carried out by James Cope, Charles Mallah, and James Orwell. Donor of database Charles Mallah: charles.mallah@kingston.ac.uk; James Cope: james.cope@kingston.ac.uk The original data directory contains the binary images (masks) of the leaf samples (colour images not included). There are three features for each image: Shape, Margin and Texture. For each feature, a 64 element vector is given per leaf sample. These vectors are taken as a contiguous descriptor (for shape) or histograms (for texture and margin). So, there are three different files, one for each feature problem: * 'data_Sha_64.txt' -> prediction based on shape [dataset provided here] * 'data_Tex_64.txt' -> prediction based on texture * 'data_Mar_64.txt' -> prediction based on margin Each row has a 64-element feature vector followed by the Class label. There is a total of 1600 samples with 16 samples per leaf class (100 classes), and no missing values. Three 64 element feature vectors per sample. Parameters ---------- root : string Root directory where the dataset folder `one_hundred_plants_shape` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a numpy array or a pytorch array (depending when the transforms is applied), and returns a transformed version. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. download : bool (default: `False`) If `True`, downloads the original files and processes the dataset in the root directory (under the `one_hundred_plants_shape' folder). If the dataset is already available, this does not download/process the dataset again. process_features : bool (default: `False`) If `True`, normalizes each feature f according to (f-mean) / (std + 1e-10) where mean and std are the mean and standard deviation of the feature f of the meta-train dataset. References ----- [1] Charles Mallah, James Cope, James Orwell. Plant Leaf Classification Using Probabilistic Integration of Shape, Texture and Margin Features. Signal Processing, Pattern Recognition and Applications, in press. [2] J. Cope, P. Remagnino, S. Barman, and P. Wilkin. Plant texture classification using gabor co-occurrences. Advances in Visual Computing, pages 699-677, 2010. [3] T. Beghin, J. Cope, P. Remagnino, and S. Barman. Shape and texture based plant leaf classification. In: Advanced Concepts for Intelligent Vision Systems, pages 345-353. Springer, 2010. """ dataset = PlantsShapeClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download, normalize=process_features) super(PlantsShape, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class PlantsShapeClassDataset(ClassDataset): open_ml_id = 1492 open_ml_url = 'https://www.openml.org/d/' + str(open_ml_id) dataset_name = "one_hundred_plants_shape" folder = "one_hundred_plants_shape" filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' filename_mean_std = 'features_mean_std.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False, normalize=True): super(PlantsShapeClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self.split_filename_mean_std = os.path.join(self.root, self.filename_mean_std) self._data_file = None self._data = None self._labels = None self._mean_std = None if download: self.download(normalize) if not self._check_integrity(): raise RuntimeError('PlantsShape integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return PlantsShapeDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels @property def mean_std(self): if self._mean_std is None: with open(self.split_filename_mean_std, 'r') as f: self._mean_std = json.load(f) return self._mean_std def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data is not None: self._data.close() self._data = None def download(self, normalize): if self._check_integrity(): return from sklearn.datasets import fetch_openml data = fetch_openml(data_id=self.open_ml_id) features = data.data targets = data.target os.makedirs(self.root, exist_ok=True) # for each meta-data-split, get the labels, then check which data-point belongs to the set (via a mask). # then, retrieve the features and targets belonging to the set. Then create hdf5 file for these features. for s, split in enumerate(['train', 'val', 'test']): targets_assets_split = get_asset(self.folder, '{0}.json'.format(split)) is_in_split = [t in targets_assets_split for t in targets] features_split = features.loc[is_in_split] targets_split = targets.loc[is_in_split] assert targets_split.shape[0] == features_split.shape[0] unique_targets_split = np.sort(np.unique(targets_split)) if len(targets_assets_split) > unique_targets_split.shape[0]: print(f"unique set of labels ({(unique_targets_split.shape[0])}) is smaller than set of labels " f"given by assets ({len(targets_assets_split)}). Proceeding with unique set of labels.") # write unique targets to json file. labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(unique_targets_split.tolist(), f) # normalize to zero mean and standard deviation 1 with stats from 'train' split only if split == 'train': mean, std = np.zeros(features.shape[1]), np.ones(features.shape[1]) if normalize: mean = np.mean(features_split, axis=0) std = np.std(features_split, axis=0) self._mean_std = {'mean': mean.tolist(), 'std': std.tolist()} mean_std_filename = os.path.join(self.root, self.filename_mean_std) with open(mean_std_filename, 'w') as f: json.dump(self._mean_std, f) mean_std = self.mean_std mean = np.array(mean_std['mean']) std = np.array(mean_std['std']) features_split = (features_split - mean) / (std + 1e-10) # write data (features and class labels) filename = os.path.join(self.root, self.filename.format(split)) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') for i, label in enumerate(tqdm(unique_targets_split, desc=filename)): data_class = features_split.loc[targets_split == label] group.create_dataset(label, data=data_class) class PlantsShapeDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(PlantsShapeDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): features = self.data[index, :] target = self.label if self.transform is not None: features = self.transform(features) if self.target_transform is not None: target = self.target_transform(target) return features, target def create_asset(root='data', fractions=None, seed=42): """This methods creates the assets of the PlantsShape dataset. These are the meta-dataset splits from the original data. Only run this method in case you want to create new assets. Once created, copy the assets to this directory: torchmeta_local.datasets.assets.one_hundred_plants_shape. You can also manually change the assets.""" # split fractions: train, valid, test if fractions is None: fractions = [0.7, 0.15, 0.15] assert sum(fractions) == 1 from sklearn.datasets import fetch_openml data = fetch_openml(data_id=PlantsShapeClassDataset.open_ml_id) unique_targets = np.unique(data.target) num_unique_targets = len(unique_targets) num_split = [int(f * num_unique_targets) for f in fractions] num_split[1] = num_unique_targets - num_split[0] - num_split[2] assert sum(num_split) == num_unique_targets # split unique labels randomly np.random.seed(seed) perm = np.random.permutation(num_unique_targets) targets_split = {'train': [unique_targets[i] for i in perm[:num_split[0]]], 'val': [unique_targets[i] for i in perm[num_split[0]: num_split[0] + num_split[1]]], 'test': [unique_targets[i] for i in perm[num_split[0] + num_split[1]:]]} # write splits root_path = os.path.join(os.path.expanduser(root), PlantsShapeClassDataset.folder) for split in ["train", "val", "test"]: asset_filename = os.path.join(root_path, "{0}.json".format(split)) with open(asset_filename, 'w') as f: json.dump(targets_split[split], f)
14,340
43.676012
121
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/doublemnist.py
import numpy as np from PIL import Image import os import io import json import glob import h5py from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset # QKFIX: See torchmeta_local.datasets.utils for more informations from torchmeta_local.datasets.utils import download_file_from_google_drive from torchmeta_local.datasets.utils import get_asset class DoubleMNIST(CombinationMetaDataset): """ The Double MNIST dataset, introduced in [1]. This dataset is based on the MNIST dataset [2]. It consists of sampled images from MNIST that are put together to create images with multiple digits. It contains 100,000 images from 100 different classes (1000 images per class, for the numbers 00 to 99). Parameters ---------- root : string Root directory where the dataset folder `doublemnist` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `torchmeta_local.transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the pickle files and processes the dataset in the root directory (under the `doublemnist` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The dataset is downloaded from the Multi-digit MNIST repository [1](https://github.com/shaohua0116/MultiDigitMNIST). The dataset contains images (MNIST double digits) from 100 classes, for the numbers 00 to 99. The meta train/validation/test splits are 64/16/20 classes. The splits are taken from [1]. References ---------- .. [1] Sun, S. (2019). Multi-digit MNIST for Few-shot Learning. (https://github.com/shaohua0116/MultiDigitMNIST) .. [2] LeCun, Y., Cortes, C., and Burges, CJ. (2010). MNIST Handwritten Digit Database. (http://yann.lecun.com/exdb/mnist) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = DoubleMNISTClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(DoubleMNIST, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class DoubleMNISTClassDataset(ClassDataset): folder = 'doublemnist' # Google Drive ID from https://github.com/shaohua0116/MultiDigitMNIST gdrive_id = '1MqQCdLt9TVE3joAMw4FwJp_B8F-htrAo' zip_filename = 'double_mnist_seed_123_image_size_64_64.zip' zip_md5 = '6d8b185c0cde155eb39d0e3615ab4f23' filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' image_folder = 'double_mnist_seed_123_image_size_64_64' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(DoubleMNISTClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self._data_file = None self._data = None self._labels = None if download: self.download() if not self._check_integrity(): raise RuntimeError('Double MNIST integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return DoubleMNISTDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data_file is not None: self._data_file.close() self._data_file = None self._data = None def download(self): import zipfile import shutil import glob from tqdm import tqdm if self._check_integrity(): return zip_filename = os.path.join(self.root, self.zip_filename) if not os.path.isfile(zip_filename): download_file_from_google_drive(self.gdrive_id, self.root, self.zip_filename, md5=self.zip_md5) zip_foldername = os.path.join(self.root, self.image_folder) if not os.path.isdir(zip_foldername): with zipfile.ZipFile(zip_filename, 'r') as f: for member in tqdm(f.infolist(), desc='Extracting '): try: f.extract(member, self.root) except zipfile.BadZipFile: print('Error: Zip file is corrupted') for split in ['train', 'val', 'test']: filename = os.path.join(self.root, self.filename.format(split)) if os.path.isfile(filename): continue labels = get_asset(self.folder, '{0}.json'.format(split)) labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(labels, f) image_folder = os.path.join(zip_foldername, split) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') dtype = h5py.special_dtype(vlen=np.uint8) for i, label in enumerate(tqdm(labels, desc=filename)): images = glob.glob(os.path.join(image_folder, label, '*.png')) images.sort() dataset = group.create_dataset(label, (len(images),), dtype=dtype) for i, image in enumerate(images): with open(image, 'rb') as f: array = bytearray(f.read()) dataset[i] = np.asarray(array, dtype=np.uint8) if os.path.isdir(zip_foldername): shutil.rmtree(zip_foldername) class DoubleMNISTDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(DoubleMNISTDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): image = Image.open(io.BytesIO(self.data[index])).convert('RGB') target = self.label if self.transform is not None: image = self.transform(image) if self.target_transform is not None: target = self.target_transform(target) return (image, target)
10,042
38.53937
85
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/__init__.py
from torchmeta_local.datasets.triplemnist import TripleMNIST from torchmeta_local.datasets.doublemnist import DoubleMNIST from torchmeta_local.datasets.cub import CUB from torchmeta_local.datasets.cifar100 import CIFARFS, FC100 from torchmeta_local.datasets.miniimagenet import MiniImagenet from torchmeta_local.datasets.omniglot import Omniglot from torchmeta_local.datasets.tieredimagenet import TieredImagenet from torchmeta_local.datasets.tcga import TCGA from torchmeta_local.datasets.pascal5i import Pascal5i from torchmeta_local.datasets.letter import Letter from torchmeta_local.datasets.one_hundred_plants_texture import PlantsTexture from torchmeta_local.datasets.one_hundred_plants_shape import PlantsShape from torchmeta_local.datasets.one_hundred_plants_margin import PlantsMargin from torchmeta_local.datasets.bach import Bach from torchmeta_local.datasets import helpers from torchmeta_local.datasets import helpers_tabular __all__ = [ # image data 'TCGA', 'Omniglot', 'MiniImagenet', 'TieredImagenet', 'CIFARFS', 'FC100', 'CUB', 'DoubleMNIST', 'TripleMNIST', 'Pascal5i', 'helpers', # tabular data 'Letter', 'PlantsTexture', 'PlantsShape', 'PlantsMargin', 'Bach', 'helpers_tabular' ]
1,277
30.95
77
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/one_hundred_plants_texture.py
import numpy as np import os import json import h5py from tqdm import tqdm from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset from torchmeta_local.datasets.utils import get_asset class PlantsTexture(CombinationMetaDataset): """The PlantsTexture dataset """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False, process_features=False): """ One-hundred plant species leaves dataset (Class = Texture) [1], [2], [3] open-ml-id: 1493 https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set) - 2010 (a) Original owners of colour Leaves Samples: James Cope, Thibaut Beghin, Paolo Remagnino, Sarah Barman. The colour images are not included. The Leaves were collected in the Royal Botanic Gardens, Kew, UK. email: james.cope@kingston.ac.uk (b) This dataset consists of work carried out by James Cope, Charles Mallah, and James Orwell. Donor of database Charles Mallah: charles.mallah@kingston.ac.uk; James Cope: james.cope@kingston.ac.uk The original data directory contains the binary images (masks) of the leaf samples (colour images not included). There are three features for each image: Shape, Margin and Texture. For each feature, a 64 element vector is given per leaf sample. These vectors are taken as a contiguous descriptor (for shape) or histograms (for texture and margin). So, there are three different files, one for each feature problem: * 'data_Sha_64.txt' -> prediction based on shape * 'data_Tex_64.txt' -> prediction based on texture [dataset provided here] * 'data_Mar_64.txt' -> prediction based on margin Each row has a 64-element feature vector followed by the Class label. There is a total of 1600 samples with 16 samples per leaf class (100 classes), and no missing values. Three 64 element feature vectors per sample. Parameters ---------- root : string Root directory where the dataset folder `one_hundred_plants_texture` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a numpy array or a pytorch array (depending when the transforms is applied), and returns a transformed version. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. download : bool (default: `False`) If `True`, downloads the original files and processes the dataset in the root directory (under the `one_hundred_plants_texture' folder). If the dataset is already available, this does not download/process the dataset again. process_features : bool (default: `False`) If `True`, normalizes each feature f with (f-lower) / (upper - lower) where upper and lower are the min and max values of feature f of the meta-train dataset. References ----- [1] Charles Mallah, James Cope, James Orwell. Plant Leaf Classification Using Probabilistic Integration of Shape, Texture and Margin Features. Signal Processing, Pattern Recognition and Applications, in press. [2] J. Cope, P. Remagnino, S. Barman, and P. Wilkin. Plant texture classification using gabor co-occurrences. Advances in Visual Computing, pages 699-677, 2010. [3] T. Beghin, J. Cope, P. Remagnino, and S. Barman. Shape and texture based plant leaf classification. In: Advanced Concepts for Intelligent Vision Systems, pages 345-353. Springer, 2010. """ dataset = PlantsTextureClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download, normalize=process_features) super(PlantsTexture, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class PlantsTextureClassDataset(ClassDataset): open_ml_id = 1493 open_ml_url = 'https://www.openml.org/d/' + str(open_ml_id) dataset_name = "one_hundred_plants_texture" folder = "one_hundred_plants_texture" filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' filename_lower_upper = 'features_lower_upper.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False, normalize=True): super(PlantsTextureClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self.split_filename_lower_upper = os.path.join(self.root, self.filename_lower_upper) self._data_file = None self._data = None self._labels = None self._lower_upper = None if download: self.download(normalize) if not self._check_integrity(): raise RuntimeError('PlantsTexture integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return PlantsTextureDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels @property def lower_upper(self): if self._lower_upper is None: with open(self.split_filename_lower_upper, 'r') as f: self._lower_upper = json.load(f) return self._lower_upper def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data is not None: self._data.close() self._data = None def download(self, normalize): if self._check_integrity(): return from sklearn.datasets import fetch_openml data = fetch_openml(data_id=self.open_ml_id) features = data.data targets = data.target os.makedirs(self.root, exist_ok=True) # for each meta-data-split, get the labels, then check which data-point belongs to the set (via a mask). # then, retrieve the features and targets belonging to the set. Then create hdf5 file for these features. for s, split in enumerate(['train', 'val', 'test']): targets_assets_split = get_asset(self.folder, '{0}.json'.format(split)) is_in_split = [t in targets_assets_split for t in targets] features_split = features.loc[is_in_split] targets_split = targets.loc[is_in_split] assert targets_split.shape[0] == features_split.shape[0] unique_targets_split = np.unique(targets_split) if len(targets_assets_split) > unique_targets_split.shape[0]: print(f"unique set of labels ({(unique_targets_split.shape[0])}) is smaller than set of labels " f"given by assets ({len(targets_assets_split)}). Proceeding with unique set of labels.") # write unique targets to json file. labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(unique_targets_split.tolist(), f) # normalize between 0 and 1 with stats from 'train' split only if split == 'train': lower, upper = np.zeros(features.shape[1]), np.ones(features.shape[1]) if normalize: lower = np.min(features_split, axis=0) upper = np.max(features_split, axis=0) self._lower_upper = {'lower': lower.tolist(), 'upper': upper.tolist()} lower_upper_filename = os.path.join(self.root, self.filename_lower_upper) with open(lower_upper_filename, 'w') as f: json.dump(self._lower_upper, f) lower_upper = self.lower_upper lower = np.array(lower_upper['lower']) upper = np.array(lower_upper['upper']) features_split = np.true_divide((features_split - lower), (upper - lower)) # write data (features and class labels) filename = os.path.join(self.root, self.filename.format(split)) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') for i, label in enumerate(tqdm(unique_targets_split, desc=filename)): data_class = features_split.loc[targets_split == label] group.create_dataset(label, data=data_class) class PlantsTextureDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(PlantsTextureDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): features = self.data[index, :] target = self.label if self.transform is not None: features = self.transform(features) if self.target_transform is not None: target = self.target_transform(target) return features, target def create_asset(root='data', fractions=None, seed=42): """This methods creates the assets of the PlantsTexture dataset. These are the meta-dataset splits from the original data. Only run this method in case you want to create new assets. Once created, copy the assets to this directory: torchmeta_local.datasets.assets.one_hundred_plants_texture. You can also manually change the assets.""" # split fractions: train, valid, test if fractions is None: fractions = [0.7, 0.15, 0.15] assert sum(fractions) == 1 from sklearn.datasets import fetch_openml data = fetch_openml(data_id=PlantsTextureClassDataset.open_ml_id) unique_targets = np.unique(data.target) num_unique_targets = len(unique_targets) num_split = [int(f * num_unique_targets) for f in fractions] num_split[1] = num_unique_targets - num_split[0] - num_split[2] assert sum(num_split) == num_unique_targets # split unique labels randomly np.random.seed(seed) perm = np.random.permutation(num_unique_targets) targets_split = {'train': [unique_targets[i] for i in perm[:num_split[0]]], 'val': [unique_targets[i] for i in perm[num_split[0]: num_split[0] + num_split[1]]], 'test': [unique_targets[i] for i in perm[num_split[0] + num_split[1]:]]} # write splits root_path = os.path.join(os.path.expanduser(root), PlantsTextureClassDataset.folder) for split in ["train", "val", "test"]: asset_filename = os.path.join(root_path, "{0}.json".format(split)) with open(asset_filename, 'w') as f: json.dump(targets_split[split], f)
14,441
44.13125
123
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/one_hundred_plants_margin.py
import numpy as np import os import json import h5py from tqdm import tqdm from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset from torchmeta_local.datasets.utils import get_asset class PlantsMargin(CombinationMetaDataset): """The PlantsMargin dataset """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False, process_features=False): """ One-hundred plant species leaves dataset (Class = Margin) [1], [2], [3] open-ml-id: 1491 https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set) - 2010 (a) Original owners of colour Leaves Samples: James Cope, Thibaut Beghin, Paolo Remagnino, Sarah Barman. The colour images are not included. The Leaves were collected in the Royal Botanic Gardens, Kew, UK. email: james.cope@kingston.ac.uk (b) This dataset consists of work carried out by James Cope, Charles Mallah, and James Orwell. Donor of database Charles Mallah: charles.mallah@kingston.ac.uk; James Cope: james.cope@kingston.ac.uk The original data directory contains the binary images (masks) of the leaf samples (colour images not included). There are three features for each image: Shape, Margin and Texture. For each feature, a 64 element vector is given per leaf sample. These vectors are taken as a contiguous descriptor (for shape) or histograms (for texture and margin). So, there are three different files, one for each feature problem: * 'data_Sha_64.txt' -> prediction based on shape * 'data_Tex_64.txt' -> prediction based on texture * 'data_Mar_64.txt' -> prediction based on margin [dataset provided here] Each row has a 64-element feature vector followed by the Class label. There is a total of 1600 samples with 16 samples per leaf class (100 classes), and no missing values. Three 64 element feature vectors per sample. Parameters ---------- root : string Root directory where the dataset folder `one_hundred_plants_margin` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to "N" in "N-way" classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a numpy array or a pytorch array (depending when the transforms is applied), and returns a transformed version. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. download : bool (default: `False`) If `True`, downloads the original files and processes the dataset in the root directory (under the `one_hundred_plants_margin' folder). If the dataset is already available, this does not download/process the dataset again. process_features : bool (default: `False`) If `True`, normalizes each feature f with (f-lower) / (upper - lower) where upper and lower are the min and max values of feature f of the meta-train dataset. References ----- [1] Charles Mallah, James Cope, James Orwell. Plant Leaf Classification Using Probabilistic Integration of Shape, Texture and Margin Features. Signal Processing, Pattern Recognition and Applications, in press. [2] J. Cope, P. Remagnino, S. Barman, and P. Wilkin. Plant texture classification using gabor co-occurrences. Advances in Visual Computing, pages 699-677, 2010. [3] T. Beghin, J. Cope, P. Remagnino, and S. Barman. Shape and texture based plant leaf classification. In: Advanced Concepts for Intelligent Vision Systems, pages 345-353. Springer, 2010. """ dataset = PlantsMarginClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download, normalize=process_features) super(PlantsMargin, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class PlantsMarginClassDataset(ClassDataset): open_ml_id = 1491 open_ml_url = 'https://www.openml.org/d/' + str(open_ml_id) dataset_name = "one_hundred_plants_margin" folder = "one_hundred_plants_margin" filename = '{0}_data.hdf5' filename_labels = '{0}_labels.json' filename_lower_upper = 'features_lower_upper.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False, normalize=True): super(PlantsMarginClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename = os.path.join(self.root, self.filename.format(self.meta_split)) self.split_filename_labels = os.path.join(self.root, self.filename_labels.format(self.meta_split)) self.split_filename_lower_upper = os.path.join(self.root, self.filename_lower_upper) self._data_file = None self._data = None self._labels = None self._lower_upper = None if download: self.download(normalize) if not self._check_integrity(): raise RuntimeError('PlantsMargin integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): label = self.labels[index % self.num_classes] data = self.data[label] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return PlantsMarginDataset(index, data, label, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data_file = h5py.File(self.split_filename, 'r') self._data = self._data_file['datasets'] return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels @property def lower_upper(self): if self._lower_upper is None: with open(self.split_filename_lower_upper, 'r') as f: self._lower_upper = json.load(f) return self._lower_upper['lower'], self._lower_upper['upper'] def _check_integrity(self): return (os.path.isfile(self.split_filename) and os.path.isfile(self.split_filename_labels)) def close(self): if self._data is not None: self._data.close() self._data = None def download(self, normalize): if self._check_integrity(): return from sklearn.datasets import fetch_openml data = fetch_openml(data_id=self.open_ml_id) features = data.data targets = data.target os.makedirs(self.root, exist_ok=True) # for each meta-data-split, get the labels, then check which data-point belongs to the set (via a mask). # then, retrieve the features and targets belonging to the set. Then create hdf5 file for these features. for s, split in enumerate(['train', 'val', 'test']): targets_assets_split = get_asset(self.folder, '{0}.json'.format(split)) is_in_split = [t in targets_assets_split for t in targets] features_split = features.loc[is_in_split] targets_split = targets.loc[is_in_split] assert targets_split.shape[0] == features_split.shape[0] unique_targets_split = np.unique(targets_split) if len(targets_assets_split) > unique_targets_split.shape[0]: print(f"unique set of labels ({(unique_targets_split.shape[0])}) is smaller than set of labels " f"given by assets ({len(targets_assets_split)}). Proceeding with unique set of labels.") # write unique targets to json file. labels_filename = os.path.join(self.root, self.filename_labels.format(split)) with open(labels_filename, 'w') as f: json.dump(unique_targets_split.tolist(), f) # normalize between 0 and 1 with stats from 'train' split only if split == 'train': lower, upper = np.zeros(features.shape[1]), np.ones(features.shape[1]) if normalize: lower = np.min(features_split, axis=0) upper = np.max(features_split, axis=0) self._lower_upper = {'lower': lower.tolist(), 'upper': upper.tolist()} lower_upper_filename = os.path.join(self.root, self.filename_lower_upper) with open(lower_upper_filename, 'w') as f: json.dump(self._lower_upper, f) lower, upper = self.lower_upper lower = np.array(lower) upper = np.array(upper) features_split = np.true_divide((features_split - lower), (upper - lower)) # write data (features and class labels) filename = os.path.join(self.root, self.filename.format(split)) with h5py.File(filename, 'w') as f: group = f.create_group('datasets') for i, label in enumerate(tqdm(unique_targets_split, desc=filename)): data_class = features_split.loc[targets_split == label] group.create_dataset(label, data=data_class) class PlantsMarginDataset(Dataset): def __init__(self, index, data, label, transform=None, target_transform=None): super(PlantsMarginDataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.label = label def __len__(self): return len(self.data) def __getitem__(self, index): features = self.data[index, :] target = self.label if self.transform is not None: features = self.transform(features) if self.target_transform is not None: target = self.target_transform(target) return features, target def create_asset(root='data', fractions=None, seed=42): """This methods creates the assets of the PlantsMargin dataset. These are the meta-dataset splits from the original data. Only run this method in case you want to create new assets. Once created, copy the assets to this directory: torchmeta_local.datasets.assets.one_hundred_plants_margin. You can also manually change the assets.""" # split fractions: train, valid, test if fractions is None: fractions = [0.7, 0.15, 0.15] assert sum(fractions) == 1 from sklearn.datasets import fetch_openml data = fetch_openml(data_id=PlantsMarginClassDataset.open_ml_id) unique_targets = np.unique(data.target) num_unique_targets = len(unique_targets) num_split = [int(f * num_unique_targets) for f in fractions] num_split[1] = num_unique_targets - num_split[0] - num_split[2] assert sum(num_split) == num_unique_targets # split unique labels randomly np.random.seed(seed) perm = np.random.permutation(num_unique_targets) targets_split = {'train': [unique_targets[i] for i in perm[:num_split[0]]], 'val': [unique_targets[i] for i in perm[num_split[0]: num_split[0] + num_split[1]]], 'test': [unique_targets[i] for i in perm[num_split[0] + num_split[1]:]]} # write splits root_path = os.path.join(os.path.expanduser(root), PlantsMarginClassDataset.folder) for split in ["train", "val", "test"]: asset_filename = os.path.join(root_path, "{0}.json".format(split)) with open(asset_filename, 'w') as f: json.dump(targets_split[split], f)
14,418
44.059375
122
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/helpers.py
import warnings from torchmeta_local.datasets import ( Omniglot, MiniImagenet, TieredImagenet, CIFARFS, FC100, CUB, DoubleMNIST, TripleMNIST, Pascal5i) from torchmeta_local.transforms import ( Categorical, ClassSplitter, Rotation, SegmentationPairTransform) from torchvision.transforms import ( Compose, Resize, CenterCrop, ToTensor, Lambda, Normalize) __all__ = [ 'omniglot', 'omniglot_norm', 'omniglot_rgb84x84', 'omniglot_rgb84x84_norm', 'miniimagenet', 'miniimagenet_norm', 'tieredimagenet', 'cifar_fs', 'fc100', 'fc100_norm', 'cub', 'doublemnist', 'triplemnist' ] def helper_with_default(klass, folder, shots, ways, shuffle=True, test_shots=None, seed=None, defaults={}, num_samples_per_class=None, **kwargs): if 'num_classes_per_task' in kwargs: warnings.warn('Both arguments `ways` and `num_classes_per_task` were ' 'set in the helper function for the number of classes per task. ' 'Ignoring the argument `ways`.', stacklevel=2) ways = kwargs['num_classes_per_task'] if 'transform' not in kwargs: kwargs['transform'] = defaults.get('transform', ToTensor()) if 'target_transform' not in kwargs: kwargs['target_transform'] = defaults.get('target_transform', Categorical(ways)) if 'class_augmentations' not in kwargs: kwargs['class_augmentations'] = defaults.get('class_augmentations', None) if test_shots is None: test_shots = shots dataset = klass(folder, num_classes_per_task=ways, **kwargs) if num_samples_per_class is not None: dataset = ClassSplitter( dataset, shuffle=shuffle, num_samples_per_class=num_samples_per_class) else: dataset = ClassSplitter(dataset, shuffle=shuffle, num_train_per_class=shots, num_test_per_class=test_shots) dataset.seed(seed) return dataset def omniglot(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Omniglot dataset. Parameters ---------- folder : string Root directory where the dataset folder `omniglot` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `Omniglot` class. See also -------- `datasets.Omniglot` : Meta-dataset for the Omniglot dataset. """ defaults = { 'transform': Compose([Resize(28), ToTensor()]), 'class_augmentations': [Rotation([90, 180, 270])] } return helper_with_default(Omniglot, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def omniglot_norm(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Omniglot dataset. Parameters ---------- folder : string Root directory where the dataset folder `omniglot` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `Omniglot` class. See also -------- `datasets.Omniglot` : Meta-dataset for the Omniglot dataset. """ norm_params = {'mean': [0.922], 'std': [0.084]} defaults = { 'transform': Compose( [Resize(28), ToTensor(), Normalize(**norm_params)]), 'class_augmentations': [Rotation([90, 180, 270])] } return helper_with_default(Omniglot, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def omniglot_rgb84x84(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Omniglot dataset. Parameters ---------- folder : string Root directory where the dataset folder `omniglot` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `Omniglot` class. See also -------- `datasets.Omniglot` : Meta-dataset for the Omniglot dataset. """ defaults = { 'transform': Compose( [Resize(84), ToTensor(), Lambda(lambda x: x.repeat(3, 1, 1))]), 'class_augmentations': [Rotation([90, 180, 270])] } return helper_with_default(Omniglot, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def omniglot_rgb84x84_norm(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Omniglot dataset. Parameters ---------- folder : string Root directory where the dataset folder `omniglot` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `Omniglot` class. See also -------- `datasets.Omniglot` : Meta-dataset for the Omniglot dataset. """ norm_params = {'mean': [0.922, 0.922, 0.922], 'std': [0.084, 0.084, 0.084]} defaults = { 'transform': Compose( [Resize(84), ToTensor(), Lambda(lambda x: x.repeat(3, 1, 1)), Normalize(**norm_params)]), 'class_augmentations': [Rotation([90, 180, 270])] } return helper_with_default(Omniglot, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def miniimagenet(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Mini-Imagenet dataset. Parameters ---------- folder : string Root directory where the dataset folder `miniimagenet` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `MiniImagenet` class. See also -------- `datasets.MiniImagenet` : Meta-dataset for the Mini-Imagenet dataset. """ defaults = { 'transform': Compose([Resize(84), ToTensor()]) } return helper_with_default(MiniImagenet, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def miniimagenet_norm(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Mini-Imagenet dataset. Parameters ---------- folder : string Root directory where the dataset folder `miniimagenet` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `MiniImagenet` class. See also -------- `datasets.MiniImagenet` : Meta-dataset for the Mini-Imagenet dataset. """ # params taken from https://github.com/yinboc/few-shot-meta-baseline/blob/master/datasets/mini_imagenet.py norm_params = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} defaults = { 'transform': Compose( [Resize(84), ToTensor(), Normalize(**norm_params)]) } return helper_with_default(MiniImagenet, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def tieredimagenet(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Tiered-Imagenet dataset. Parameters ---------- folder : string Root directory where the dataset folder `tieredimagenet` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `TieredImagenet` class. See also -------- `datasets.TieredImagenet` : Meta-dataset for the Tiered-Imagenet dataset. """ defaults = { 'transform': Compose([Resize(84), ToTensor()]) } return helper_with_default(TieredImagenet, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def cifar_fs(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the CIFAR-FS dataset. Parameters ---------- folder : string Root directory where the dataset folder `cifar100` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `CIFARFS` class. See also -------- `datasets.cifar100.CIFARFS` : Meta-dataset for the CIFAR-FS dataset. """ return helper_with_default(CIFARFS, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults={}, **kwargs) def fc100(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the FC100 dataset. Parameters ---------- folder : string Root directory where the dataset folder `cifar100` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `FC100` class. See also -------- `datasets.cifar100.FC100` : Meta-dataset for the FC100 dataset. """ return helper_with_default(FC100, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults={}, **kwargs) def fc100_norm(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the FC100 dataset. Parameters ---------- folder : string Root directory where the dataset folder `cifar100` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `FC100` class. See also -------- `datasets.cifar100.FC100` : Meta-dataset for the FC100 dataset. """ norm_params = {'mean': [0.507, 0.487, 0.441], 'std': [0.267, 0.256, 0.276]} defaults = { 'transform': Compose( [ToTensor(), Normalize(**norm_params)]) } return helper_with_default(FC100, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def cub(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Caltech-UCSD Birds dataset. Parameters ---------- folder : string Root directory where the dataset folder `cub` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `CUB` class. See also -------- `datasets.cub.CUB` : Meta-dataset for the Caltech-UCSD Birds dataset. """ image_size = 84 defaults = { 'transform': Compose([ Resize(int(image_size * 1.5)), CenterCrop(image_size), ToTensor() ]) } return helper_with_default(CUB, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs) def doublemnist(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Double MNIST dataset. Parameters ---------- folder : string Root directory where the dataset folder `doublemnist` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `DoubleMNIST` class. See also -------- `datasets.doublemnist.DoubleMNIST` : Meta-dataset for the Double MNIST dataset. """ return helper_with_default(DoubleMNIST, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults={}, **kwargs) def triplemnist(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Triple MNIST dataset. Parameters ---------- folder : string Root directory where the dataset folder `triplemnist` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `TripleMNIST` class. See also -------- `datasets.triplemnist.TripleMNIST` : Meta-dataset for the Triple MNIST dataset. """ return helper_with_default(TripleMNIST, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults={}, **kwargs) def pascal5i(folder, shots, ways=1, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the PASCAL-VOC dataset. Parameters ---------- folder : string Root directory where the dataset folder `omniglot` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. Only supports 1-way currently shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `Omniglot` class. """ defaults = { 'transform': SegmentationPairTransform(500), 'class_augmentations': [] } return helper_with_default(Pascal5i, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs)
22,150
31.962798
110
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/cifar100/base.py
import numpy as np import os import json import h5py from PIL import Image from torchvision.datasets.utils import check_integrity, download_url from torchmeta_local.utils.data import Dataset, ClassDataset class CIFAR100ClassDataset(ClassDataset): folder = 'cifar100' subfolder = None download_url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' gz_folder = 'cifar-100-python' gz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' files_md5 = { 'train': '16019d7e3df5f24257cddd939b257f8d', 'test': 'f0ef6b0ae62326f3e7ffdfab6717acfc', 'meta': '7973b15100ade9c7d40fb424638fde48' } filename = 'data.hdf5' filename_labels = '{0}_labels.json' filename_fine_names = 'fine_names.json' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(CIFAR100ClassDataset, self).__init__(meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, class_augmentations=class_augmentations) if self.subfolder is None: raise ValueError() self.root = os.path.join(os.path.expanduser(root), self.folder) self.transform = transform self.split_filename_labels = os.path.join(self.root, self.subfolder, self.filename_labels.format(self.meta_split)) self._data = None self._labels = None if download: self.download() if not self._check_integrity(): raise RuntimeError('CIFAR100 integrity check failed') self._num_classes = len(self.labels) def __getitem__(self, index): coarse_label_name, fine_label_name = self.labels[index % self.num_classes] data = self.data['{0}/{1}'.format(coarse_label_name, fine_label_name)] transform = self.get_transform(index, self.transform) target_transform = self.get_target_transform(index) return CIFAR100Dataset(index, data, coarse_label_name, fine_label_name, transform=transform, target_transform=target_transform) @property def num_classes(self): return self._num_classes @property def data(self): if self._data is None: self._data = h5py.File(os.path.join(self.root, self.filename), 'r') return self._data @property def labels(self): if self._labels is None: with open(self.split_filename_labels, 'r') as f: self._labels = json.load(f) return self._labels def _check_integrity(self): return (self._check_integrity_data() and os.path.isfile(self.split_filename_labels) and os.path.isfile(os.path.join(self.root, self.filename_fine_names))) def _check_integrity_data(self): return os.path.isfile(os.path.join(self.root, self.filename)) def close(self): if self._data is not None: self._data.close() self._data = None def download(self): import tarfile import pickle import shutil if self._check_integrity_data(): return gz_filename = '{0}.tar.gz'.format(self.gz_folder) download_url(self.download_url, self.root, filename=gz_filename, md5=self.gz_md5) with tarfile.open(os.path.join(self.root, gz_filename), 'r:gz') as tar: tar.extractall(path=self.root) train_filename = os.path.join(self.root, self.gz_folder, 'train') check_integrity(train_filename, self.files_md5['train']) with open(train_filename, 'rb') as f: data = pickle.load(f, encoding='bytes') images = data[b'data'] fine_labels = data[b'fine_labels'] coarse_labels = data[b'coarse_labels'] test_filename = os.path.join(self.root, self.gz_folder, 'test') check_integrity(test_filename, self.files_md5['test']) with open(test_filename, 'rb') as f: data = pickle.load(f, encoding='bytes') images = np.concatenate((images, data[b'data']), axis=0) fine_labels = np.concatenate((fine_labels, data[b'fine_labels']), axis=0) coarse_labels = np.concatenate((coarse_labels, data[b'coarse_labels']), axis=0) images = images.reshape((-1, 3, 32, 32)) images = images.transpose((0, 2, 3, 1)) meta_filename = os.path.join(self.root, self.gz_folder, 'meta') check_integrity(meta_filename, self.files_md5['meta']) with open(meta_filename, 'rb') as f: data = pickle.load(f, encoding='latin1') fine_label_names = data['fine_label_names'] coarse_label_names = data['coarse_label_names'] filename = os.path.join(self.root, self.filename) fine_names = dict() with h5py.File(filename, 'w') as f: for i, coarse_name in enumerate(coarse_label_names): group = f.create_group(coarse_name) fine_indices = np.unique(fine_labels[coarse_labels == i]) for j in fine_indices: dataset = group.create_dataset(fine_label_names[j], data=images[fine_labels == j]) fine_names[coarse_name] = [fine_label_names[j] for j in fine_indices] filename_fine_names = os.path.join(self.root, self.filename_fine_names) with open(filename_fine_names, 'w') as f: json.dump(fine_names, f) gz_folder = os.path.join(self.root, self.gz_folder) if os.path.isdir(gz_folder): shutil.rmtree(gz_folder) if os.path.isfile('{0}.tar.gz'.format(gz_folder)): os.remove('{0}.tar.gz'.format(gz_folder)) class CIFAR100Dataset(Dataset): def __init__(self, index, data, coarse_label_name, fine_label_name, transform=None, target_transform=None): super(CIFAR100Dataset, self).__init__(index, transform=transform, target_transform=target_transform) self.data = data self.coarse_label_name = coarse_label_name self.fine_label_name = fine_label_name def __len__(self): return self.data.shape[0] def __getitem__(self, index): image = Image.fromarray(self.data[index]) target = (self.coarse_label_name, self.fine_label_name) if self.transform is not None: image = self.transform(image) if self.target_transform is not None: target = self.target_transform(target) return (image, target)
6,667
36.886364
91
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/cifar100/cifar_fs.py
import os import json from torchmeta_local.datasets.cifar100.base import CIFAR100ClassDataset from torchmeta_local.datasets.utils import get_asset from torchmeta_local.utils.data import ClassDataset, CombinationMetaDataset class CIFARFS(CombinationMetaDataset): """ The CIFAR-FS dataset, introduced in [1]. This dataset contains images of 100 different classes from the CIFAR100 dataset [2]. Parameters ---------- root : string Root directory where the dataset folder `cifar100` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to `N` in `N-way` classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the pickle files and processes the dataset in the root directory (under the `cifar100` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The meta train/validation/test splits are over 64/16/20 classes from the CIFAR100 dataset. References ---------- .. [1] Bertinetto L., Henriques J. F., Torr P. H.S., Vedaldi A. (2019). Meta-learning with differentiable closed-form solvers. In International Conference on Learning Representations (https://arxiv.org/abs/1805.08136) .. [2] Krizhevsky A. (2009). Learning Multiple Layers of Features from Tiny Images. (https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = CIFARFSClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(CIFARFS, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class CIFARFSClassDataset(CIFAR100ClassDataset): subfolder = 'cifar-fs' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(CIFARFSClassDataset, self).__init__(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) def download(self): if self._check_integrity(): return super(CIFARFSClassDataset, self).download() subfolder = os.path.join(self.root, self.subfolder) if not os.path.exists(subfolder): os.makedirs(subfolder) for split in ['train', 'val', 'test']: split_filename_labels = os.path.join(subfolder, self.filename_labels.format(split)) if os.path.isfile(split_filename_labels): continue data = get_asset(self.folder, self.subfolder, '{0}.json'.format(split), dtype='json') with open(split_filename_labels, 'w') as f: json.dump(data, f)
5,157
42.344538
85
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/cifar100/__init__.py
from torchmeta_local.datasets.cifar100.cifar_fs import CIFARFS from torchmeta_local.datasets.cifar100.fc100 import FC100 __all__ = ['CIFARFS', 'FC100']
153
29.8
62
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/datasets/cifar100/fc100.py
import os import json from torchmeta_local.datasets.cifar100.base import CIFAR100ClassDataset from torchmeta_local.datasets.utils import get_asset from torchmeta_local.utils.data import ClassDataset, CombinationMetaDataset class FC100(CombinationMetaDataset): """ The Fewshot-CIFAR100 dataset, introduced in [1]. This dataset contains images of 100 different classes from the CIFAR100 dataset [2]. Parameters ---------- root : string Root directory where the dataset folder `cifar100` exists. num_classes_per_task : int Number of classes per tasks. This corresponds to `N` in `N-way` classification. meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test` if all three are set to `False`. transform : callable, optional A function/transform that takes a `PIL` image, and returns a transformed version. See also `torchvision.transforms`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `transforms.ClassSplitter()`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `transforms.HorizontalFlip()`. download : bool (default: `False`) If `True`, downloads the pickle files and processes the dataset in the root directory (under the `cifar100` folder). If the dataset is already available, this does not download/process the dataset again. Notes ----- The meta train/validation/test splits are over 12/4/4 superclasses from the CIFAR100 dataset. The meta train/validation/test splits contain 60/20/20 classes. References ---------- .. [1] Oreshkin B. N., Rodriguez P., Lacoste A. (2018). TADAM: Task dependent adaptive metric for improved few-shot learning. In Advances in Neural Information Processing Systems (https://arxiv.org/abs/1805.10123) .. [2] Krizhevsky A. (2009). Learning Multiple Layers of Features from Tiny Images. (https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf) """ def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False): dataset = FC100ClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) super(FC100, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) class FC100ClassDataset(CIFAR100ClassDataset): subfolder = 'fc100' def __init__(self, root, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, class_augmentations=None, download=False): super(FC100ClassDataset, self).__init__(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download) def download(self): if self._check_integrity(): return super(FC100ClassDataset, self).download() subfolder = os.path.join(self.root, self.subfolder) if not os.path.exists(subfolder): os.makedirs(subfolder) filename_fine_names = os.path.join(self.root, self.filename_fine_names) with open(filename_fine_names, 'r') as f: fine_names = json.load(f) for split in ['train', 'val', 'test']: split_filename_labels = os.path.join(subfolder, self.filename_labels.format(split)) if os.path.isfile(split_filename_labels): continue data = get_asset(self.folder, self.subfolder, '{0}.json'.format(split), dtype='json') with open(split_filename_labels, 'w') as f: labels = [[coarse_name, fine_name] for coarse_name in data for fine_name in fine_names[coarse_name]] json.dump(labels, f)
5,526
42.865079
85
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/utils/__init__.py
from torchmeta_local.utils import data # from torchmeta_local.utils.gradient_based import gradient_update_parameters # from torchmeta_local.utils.metrics import hardness_metric # from torchmeta_local.utils.prototype import get_num_samples, get_prototypes, prototypical_loss # from torchmeta_local.utils.matching import pairwise_cosine_similarity, matching_log_probas, matching_probas, matching_loss # from torchmeta_local.utils.r2d2 import ridge_regression __all__ = [ 'data', # 'gradient_update_parameters', # 'hardness_metric', # 'get_num_samples', # 'get_prototypes', # 'prototypical_loss', # 'pairwise_cosine_similarity', # 'matching_log_probas', # 'matching_probas', # 'matching_loss', # 'ridge_regression' ]
749
34.714286
124
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/utils/data/sampler.py
import random import warnings from itertools import combinations from torch.utils.data.sampler import SequentialSampler, RandomSampler from torchmeta_local.utils.data.dataset import CombinationMetaDataset __all__ = ['CombinationSequentialSampler', 'CombinationRandomSampler'] class CombinationSequentialSampler(SequentialSampler): def __init__(self, data_source): if not isinstance(data_source, CombinationMetaDataset): raise TypeError('Expected `data_source` to be an instance of ' '`CombinationMetaDataset`, but found ' '{0}'.format(type(data_source))) super(CombinationSequentialSampler, self).__init__(data_source) def __iter__(self): num_classes = len(self.data_source.dataset) num_classes_per_task = self.data_source.num_classes_per_task return combinations(range(num_classes), num_classes_per_task) class CombinationRandomSampler(RandomSampler): def __init__(self, data_source): if not isinstance(data_source, CombinationMetaDataset): raise TypeError('Expected `data_source` to be an instance of ' '`CombinationMetaDataset`, but found ' '{0}'.format(type(data_source))) # Temporarily disable the warning if the length of the length of the # dataset exceeds the machine precision. This avoids getting this # warning shown with MetaDataLoader, even though MetaDataLoader itself # does not use the length of the dataset. with warnings.catch_warnings(): warnings.simplefilter('ignore') super(CombinationRandomSampler, self).__init__(data_source, replacement=True) def __iter__(self): num_classes = len(self.data_source.dataset) num_classes_per_task = self.data_source.num_classes_per_task for _ in combinations(range(num_classes), num_classes_per_task): yield tuple(random.sample(range(num_classes), num_classes_per_task))
2,094
45.555556
80
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/utils/data/task.py
import random from torch.utils.data import ConcatDataset, Subset from torch.utils.data import Dataset as Dataset_ from torchvision.transforms import Compose __all__ = ['Dataset', 'Task', 'ConcatTask', 'SubsetTask'] class Dataset(Dataset_): def __init__(self, index, transform=None, target_transform=None): self.index = index self.transform = transform self.target_transform = target_transform def target_transform_append(self, transform): if transform is None: return if self.target_transform is None: self.target_transform = transform else: self.target_transform = Compose([self.target_transform, transform]) def __hash__(self): return hash(self.index) class Task(Dataset): """Base class for a classification task. Parameters ---------- num_classes : int Number of classes for the classification task. """ def __init__(self, index, num_classes, transform=None, target_transform=None): super(Task, self).__init__(index, transform=transform, target_transform=target_transform) self.num_classes = num_classes class ConcatTask(Task, ConcatDataset): def __init__(self, datasets, num_classes, target_transform=None): index = tuple(task.index for task in datasets) Task.__init__(self, index, num_classes) ConcatDataset.__init__(self, datasets) for task in self.datasets: task.target_transform_append(target_transform) def __getitem__(self, index): return ConcatDataset.__getitem__(self, index) class SubsetTask(Task, Subset): def __init__(self, dataset, indices, num_classes=None, target_transform=None): if num_classes is None: num_classes = dataset.num_classes Task.__init__(self, dataset.index, num_classes) # Change: randomize the order of indices to avoid same classes # to be grouped within the sequence. random.shuffle(indices) Subset.__init__(self, dataset, indices) self.dataset.target_transform_append(target_transform) def __getitem__(self, index): return Subset.__getitem__(self, index) def __hash__(self): return hash((self.index, tuple(self.indices)))
2,352
32.140845
79
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/utils/data/dataloader.py
from collections import OrderedDict from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.dataset import Dataset as TorchDataset from torchmeta_local.utils.data.dataset import CombinationMetaDataset from torchmeta_local.utils.data.sampler import (CombinationSequentialSampler, CombinationRandomSampler) class BatchMetaCollate(object): def __init__(self, collate_fn): super().__init__() self.collate_fn = collate_fn def collate_task(self, task): if isinstance(task, TorchDataset): return self.collate_fn([task[idx] for idx in range(len(task))]) elif isinstance(task, OrderedDict): return OrderedDict([(key, self.collate_task(subtask)) for (key, subtask) in task.items()]) else: raise NotImplementedError() def __call__(self, batch): return self.collate_fn([self.collate_task(task) for task in batch]) def no_collate(batch): return batch class MetaDataLoader(DataLoader): def __init__(self, dataset, batch_size=1, shuffle=True, sampler=None, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): if collate_fn is None: collate_fn = no_collate if isinstance(dataset, CombinationMetaDataset) and (sampler is None): if shuffle: sampler = CombinationRandomSampler(dataset) else: sampler = CombinationSequentialSampler(dataset) shuffle = False super(MetaDataLoader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn) class BatchMetaDataLoader(MetaDataLoader): def __init__(self, dataset, batch_size=1, shuffle=True, sampler=None, num_workers=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): collate_fn = BatchMetaCollate(default_collate) super(BatchMetaDataLoader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, batch_sampler=None, num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn)
2,613
39.84375
88
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/utils/data/dataset.py
import sys import numpy as np import warnings from copy import deepcopy from itertools import combinations from ordered_set import OrderedSet from torchvision.transforms import Compose from torchmeta_local.utils.data.task import ConcatTask from torchmeta_local.transforms import FixedCategory, Categorical, DefaultTargetTransform from torchmeta_local.transforms.utils import wrap_transform __all__ = ['ClassDataset', 'MetaDataset', 'CombinationMetaDataset'] class ClassDataset(object): """Base class for a dataset of classes. Each item from a `ClassDataset` is a dataset containing examples from the same class. Parameters ---------- meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test`. class_augmentations : list of callable, optional A list of functions that augment the dataset with new classes. These classes are transformations of existing classes. E.g. `transforms.HorizontalFlip()`. """ def __init__(self, meta_train=False, meta_val=False, meta_test=False, meta_split=None, class_augmentations=None): if meta_train + meta_val + meta_test == 0: if meta_split is None: raise ValueError('The meta-split is undefined. Use either the ' 'argument `meta_train=True` (or `meta_val`/`meta_test`), or ' 'the argument `meta_split="train"` (or "val"/"test").') elif meta_split not in ['train', 'val', 'test']: raise ValueError('Unknown meta-split name `{0}`. The meta-split ' 'must be in [`train`, `val`, `test`].'.format(meta_split)) meta_train = (meta_split == 'train') meta_val = (meta_split == 'val') meta_test = (meta_split == 'test') elif meta_train + meta_val + meta_test > 1: raise ValueError('Multiple arguments among `meta_train`, `meta_val` ' 'and `meta_test` are set to `True`. Exactly one must be set to ' '`True`.') self.meta_train = meta_train self.meta_val = meta_val self.meta_test = meta_test self._meta_split = meta_split if class_augmentations is not None: if not isinstance(class_augmentations, list): raise TypeError('Unknown type for `class_augmentations`. ' 'Expected `list`, got `{0}`.'.format(type(class_augmentations))) unique_augmentations = OrderedSet() for augmentations in class_augmentations: for transform in augmentations: if transform in unique_augmentations: warnings.warn('The class augmentation `{0}` already ' 'exists in the list of class augmentations (`{1}`). ' 'To avoid any duplicate, this transformation is ' 'ignored.'.format(transform, repr(transform)), UserWarning, stacklevel=2) unique_augmentations.add(transform) class_augmentations = list(unique_augmentations) else: class_augmentations = [] self.class_augmentations = class_augmentations def get_class_augmentation(self, index): transform_index = (index // self.num_classes) - 1 if transform_index < 0: return None return self.class_augmentations[transform_index] def get_transform(self, index, transform=None): class_transform = self.get_class_augmentation(index) if class_transform is None: return transform if transform is None: return class_transform return Compose([class_transform, transform]) def get_target_transform(self, index): class_transform = self.get_class_augmentation(index) return FixedCategory(class_transform) @property def meta_split(self): if self._meta_split is None: if self.meta_train: self._meta_split = 'train' elif self.meta_val: self._meta_split = 'val' elif self.meta_test: self._meta_split = 'test' else: raise NotImplementedError() return self._meta_split def __getitem__(self, index): raise NotImplementedError() @property def num_classes(self): raise NotImplementedError() def __len__(self): return self.num_classes * (len(self.class_augmentations) + 1) class MetaDataset(object): """Base class for a meta-dataset. Parameters ---------- meta_train : bool (default: `False`) Use the meta-train split of the dataset. If set to `True`, then the arguments `meta_val` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_val : bool (default: `False`) Use the meta-validation split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_test` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_test : bool (default: `False`) Use the meta-test split of the dataset. If set to `True`, then the arguments `meta_train` and `meta_val` must be set to `False`. Exactly one of these three arguments must be set to `True`. meta_split : string in {'train', 'val', 'test'}, optional Name of the split to use. This overrides the arguments `meta_train`, `meta_val` and `meta_test`. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `transforms.ClassSplitter()`. """ def __init__(self, meta_train=False, meta_val=False, meta_test=False, meta_split=None, target_transform=None, dataset_transform=None): if meta_train + meta_val + meta_test == 0: if meta_split is None: raise ValueError('The meta-split is undefined. Use either the ' 'argument `meta_train=True` (or `meta_val`/`meta_test`), or ' 'the argument `meta_split="train"` (or "val"/"test").') elif meta_split not in ['train', 'val', 'test']: raise ValueError('Unknown meta-split name `{0}`. The meta-split ' 'must be in [`train`, `val`, `test`].'.format(meta_split)) meta_train = (meta_split == 'train') meta_val = (meta_split == 'val') meta_test = (meta_split == 'test') elif meta_train + meta_val + meta_test > 1: raise ValueError('Multiple arguments among `meta_train`, `meta_val` ' 'and `meta_test` are set to `True`. Exactly one must be set to ' '`True`.') self.meta_train = meta_train self.meta_val = meta_val self.meta_test = meta_test self._meta_split = meta_split self.target_transform = target_transform self.dataset_transform = dataset_transform self.seed() @property def meta_split(self): if self._meta_split is None: if self.meta_train: self._meta_split = 'train' elif self.meta_val: self._meta_split = 'val' elif self.meta_test: self._meta_split = 'test' else: raise NotImplementedError() return self._meta_split def seed(self, seed=None): self.np_random = np.random.RandomState(seed=seed) # Seed the dataset transform _seed_dataset_transform(self.dataset_transform, seed=seed) def __iter__(self): for index in range(len(self)): yield self[index] def sample_task(self): index = self.np_random.randint(len(self)) return self[index] def __getitem__(self, index): raise NotImplementedError() def __len__(self): raise NotImplementedError() class CombinationMetaDataset(MetaDataset): """Base class for a meta-dataset, where the classification tasks are over multiple classes from a `ClassDataset`. Parameters ---------- dataset : `ClassDataset` instance A dataset of classes. Each item of `dataset` is a dataset, containing all the examples from the same class. num_classes_per_task : int Number of classes per tasks. This corresponds to `N` in `N-way` classification. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. dataset_transform : callable, optional A function/transform that takes a dataset (ie. a task), and returns a transformed version of it. E.g. `transforms.ClassSplitter()`. """ def __init__(self, dataset, num_classes_per_task, target_transform=None, dataset_transform=None): if not isinstance(num_classes_per_task, int): raise TypeError('Unknown type for `num_classes_per_task`. Expected ' '`int`, got `{0}`.'.format(type(num_classes_per_task))) self.dataset = dataset self.num_classes_per_task = num_classes_per_task # If no target_transform, then use a default target transform that # is well behaved for the `default_collate` function (assign class # augmentations ot integers). if target_transform is None: target_transform = DefaultTargetTransform(dataset.class_augmentations) super(CombinationMetaDataset, self).__init__(meta_train=dataset.meta_train, meta_val=dataset.meta_val, meta_test=dataset.meta_test, meta_split=dataset.meta_split, target_transform=target_transform, dataset_transform=dataset_transform) def __iter__(self): num_classes = len(self.dataset) for index in combinations(num_classes, self.num_classes_per_task): yield self[index] def sample_task(self): index = self.np_random.choice(len(self.dataset), size=self.num_classes_per_task, replace=False) return self[tuple(index)] def __getitem__(self, index): if isinstance(index, int): raise ValueError('The index of a `CombinationMetaDataset` must be ' 'a tuple of integers, and not an integer. For example, call ' '`dataset[({0})]` to get a task with classes from 0 to {1} ' '(got `{2}`).'.format(', '.join([str(idx) for idx in range(self.num_classes_per_task)]), self.num_classes_per_task - 1, index)) assert len(index) == self.num_classes_per_task datasets = [self.dataset[i] for i in index] # Use deepcopy on `Categorical` target transforms, to avoid any side # effect across tasks. task = ConcatTask(datasets, self.num_classes_per_task, target_transform=wrap_transform(self.target_transform, self._copy_categorical, transform_type=Categorical)) if self.dataset_transform is not None: task = self.dataset_transform(task) return task def _copy_categorical(self, transform): assert isinstance(transform, Categorical) transform.reset() if transform.num_classes is None: transform.num_classes = self.num_classes_per_task return deepcopy(transform) def __len__(self): num_classes, length = len(self.dataset), 1 for i in range(1, self.num_classes_per_task + 1): length *= (num_classes - i + 1) / i if length > sys.maxsize: warnings.warn('The number of possible tasks in {0} is ' 'combinatorially large (equal to C({1}, {2})), and exceeds ' 'machine precision. Setting the length of the dataset to the ' 'maximum integer value, which undervalues the actual number of ' 'possible tasks in the dataset. Therefore the value returned by ' '`len(dataset)` should not be trusted as being representative ' 'of the true number of tasks.'.format(self, len(self.dataset), self.num_classes_per_task), UserWarning, stacklevel=2) length = sys.maxsize return int(length) def _seed_dataset_transform(transform, seed=None): if isinstance(transform, Compose): for subtransform in transform.transforms: _seed_dataset_transform(subtransform, seed=seed) elif hasattr(transform, 'seed'): transform.seed(seed=seed)
13,749
41.701863
89
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/utils/data/wrappers.py
import numpy as np import io from PIL import Image from torch.utils.data import Dataset from torchmeta_local.utils.data.dataset import CombinationMetaDataset class NonEpisodicWrapper(Dataset): """Non-episodic wrapper to convert a CombinationMetaDataset into a standard PyTorch Dataset, compatible with (non-episodic) training. Parameters ---------- dataset : `CombinationMetaDataset` instance The meta-dataset to be wrapped around. target_transform : callable, optional A function/transform that takes a target, and returns a transformed version. See also `torchvision.transforms`. """ def __init__(self, dataset, target_transform=None): super(NonEpisodicWrapper, self).__init__() if not isinstance(dataset, CombinationMetaDataset): raise ValueError('`NonEpisodicWrapper` can only be wrapped around a ' '`CombinationMetaDataset`. The dataset `{0}` is not an instance ' 'of `CombinationMetaDataset`.'.format(dataset)) self.dataset = dataset self.target_transform = target_transform class_dataset = self.dataset.dataset self._labels, pointer = [], 0 self._offsets = np.zeros((class_dataset.num_classes,), dtype=np.int_) for index, label in enumerate(class_dataset.labels): if isinstance(label, list): label = '/'.join(label) num_samples = len(class_dataset.data[label]) self._labels.append(label) self._offsets[index] = pointer pointer += num_samples self._num_samples = pointer def __getitem__(self, index): class_dataset = self.dataset.dataset class_index = np.maximum(np.searchsorted(self._offsets, index % self.num_samples, side='left') - 1, 0) offset = (index % self.num_samples) - self._offsets[class_index] label = self._labels[class_index] array = class_dataset.data[label][offset] image = (Image.open(io.BytesIO(array)) if array.ndim < 2 else Image.fromarray(array)) class_augmented_index = (class_dataset.num_classes * (index // self.num_samples) + class_index) transform = class_dataset.get_transform(class_augmented_index, class_dataset.transform) if transform is not None: image = transform(image) class_transform = class_dataset.get_class_augmentation(class_augmented_index) label = (label, index // self.num_samples) if self.target_transform is not None: label = self.target_transform(label) return image, label @property def num_samples(self): return self._num_samples @property def num_classes(self): num_augmentations = len(self.dataset.dataset.class_augmentations) return len(self._labels) * (num_augmentations + 1) def __len__(self): num_augmentations = len(self.dataset.dataset.class_augmentations) return self.num_samples * (num_augmentations + 1)
3,110
36.481928
85
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/utils/data/__init__.py
from torchmeta_local.utils.data.dataloader import MetaDataLoader, BatchMetaDataLoader from torchmeta_local.utils.data.dataset import ClassDataset, MetaDataset, CombinationMetaDataset from torchmeta_local.utils.data.sampler import CombinationSequentialSampler, CombinationRandomSampler from torchmeta_local.utils.data.task import Dataset, Task, ConcatTask, SubsetTask from torchmeta_local.utils.data.wrappers import NonEpisodicWrapper __all__ = [ 'MetaDataLoader', 'BatchMetaDataLoader', 'ClassDataset', 'MetaDataset', 'CombinationMetaDataset', 'CombinationSequentialSampler', 'CombinationRandomSampler', 'Dataset', 'Task', 'ConcatTask', 'SubsetTask', 'NonEpisodicWrapper' ]
723
33.47619
101
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/transforms/augmentations.py
import torchvision.transforms.functional as F class Rotation(object): def __init__(self, angle, resample=False, expand=False, center=None): super(Rotation, self).__init__() if isinstance(angle, (list, tuple)): self._angles = angle self.angle = None else: self._angles = [angle] self.angle = angle if angle % 360 == 0: import warnings warnings.warn('Applying a rotation of {0} degrees (`{1}`) as a ' 'class augmentation on a dataset is equivalent to the original ' 'dataset.'.format(angle, self), UserWarning, stacklevel=2) self.resample = resample self.expand = expand self.center = center def __iter__(self): return iter(Rotation(angle, resample=self.resample, expand=self.expand, center=self.center) for angle in self._angles) def __call__(self, image): if self.angle is None: raise ValueError('The value of the angle is unspecified.') # QKFIX: Explicitly compute the pixel fill value due to an # incompatibility between Torchvision 0.5 and Pillow 7.0.0 # https://github.com/pytorch/vision/issues/1759#issuecomment-583826810 # Will be fixed in Torchvision 0.6 fill = tuple([0] * len(image.getbands())) return F.rotate(image, self.angle % 360, self.resample, self.expand, self.center, fill=fill) def __hash__(self): return hash(repr(self)) def __eq__(self, other): if (self.angle is None) or (other.angle is None): return self._angles == other._angles return (self.angle % 360) == (other.angle % 360) def __repr__(self): if self.angle is None: return 'Rotation({0})'.format(', '.join(map(str, self._angles))) else: return 'Rotation({0})'.format(self.angle % 360) def __str__(self): if self.angle is None: return 'Rotation({0})'.format(', '.join(map(str, self._angles))) else: return 'Rotation({0})'.format(self.angle) class HorizontalFlip(object): def __iter__(self): return iter([HorizontalFlip()]) def __call__(self, image): return F.hflip(image) def __repr__(self): return 'HorizontalFlip()' class VerticalFlip(object): def __iter__(self): return iter([VerticalFlip()]) def __call__(self, image): return F.vflip(image) def __repr__(self): return 'VerticalFlip()'
2,589
33.078947
84
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/transforms/utils.py
from torchvision.transforms import Compose from torchmeta_local.utils.data.task import Task def apply_wrapper(wrapper, task_or_dataset=None): if task_or_dataset is None: return wrapper from torchmeta_local.utils.data import MetaDataset if isinstance(task_or_dataset, Task): return wrapper(task_or_dataset) elif isinstance(task_or_dataset, MetaDataset): if task_or_dataset.dataset_transform is None: dataset_transform = wrapper else: dataset_transform = Compose([ task_or_dataset.dataset_transform, wrapper]) task_or_dataset.dataset_transform = dataset_transform return task_or_dataset else: raise NotImplementedError() def wrap_transform(transform, fn, transform_type=None): if (transform_type is None) or isinstance(transform, transform_type): return fn(transform) elif isinstance(transform, Compose): return Compose([wrap_transform(subtransform, fn, transform_type) for subtransform in transform.transforms]) else: return transform
1,101
35.733333
73
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/transforms/target_transforms.py
from torchvision.transforms import Compose, Resize, ToTensor import PIL class SegmentationPairTransform(object): def __init__(self, target_size): self.image_transform = Compose([Resize((target_size, target_size)), ToTensor()]) self.mask_transform = Compose([Resize((target_size, target_size), interpolation=PIL.Image.NEAREST), ToTensor()]) def __call__(self, image, mask): image = self.image_transform(image) mask = self.mask_transform(mask) return image, mask class TargetTransform(object): def __call__(self, target): raise NotImplementedError() def __repr__(self): return str(self.__class__.__name__) class DefaultTargetTransform(TargetTransform): def __init__(self, class_augmentations): super(DefaultTargetTransform, self).__init__() self.class_augmentations = class_augmentations self._augmentations = dict((augmentation, i + 1) for (i, augmentation) in enumerate(class_augmentations)) self._augmentations[None] = 0 def __call__(self, target): assert isinstance(target, tuple) and len(target) == 2 label, augmentation = target return (label, self._augmentations[augmentation])
1,325
34.837838
88
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/transforms/categorical.py
import torch from torchmeta_local.transforms.utils import apply_wrapper from collections import defaultdict from torchmeta_local.transforms.target_transforms import TargetTransform class Categorical(TargetTransform): """Target transform to return labels in `[0, num_classes)`. Parameters ---------- num_classes : int, optional Number of classes. If `None`, then the number of classes is inferred from the number of individual labels encountered. Examples -------- >>> dataset = Omniglot('data', num_classes_per_task=5, meta_train=True) >>> task = dataset.sample_task() >>> task[0] (<PIL.Image.Image image mode=L size=105x105 at 0x11EC797F0>, ('images_evaluation/Glagolitic/character12', None)) >>> dataset = Omniglot('data', num_classes_per_task=5, meta_train=True, ... target_transform=Categorical(5)) >>> task = dataset.sample_task() >>> task[0] (<PIL.Image.Image image mode=L size=105x105 at 0x11ED3F668>, 2) """ def __init__(self, num_classes=None): super(Categorical, self).__init__() self.num_classes = num_classes self._classes = None self._labels = None def reset(self): self._classes = None self._labels = None @property def classes(self): if self._classes is None: self._classes = defaultdict(None) if self.num_classes is None: default_factory = lambda: len(self._classes) else: default_factory = lambda: self.labels[len(self._classes)] self._classes.default_factory = default_factory if (self.num_classes is not None) and (len(self._classes) > self.num_classes): raise ValueError('The number of individual labels ({0}) is greater ' 'than the number of classes defined by `num_classes` ' '({1}).'.format(len(self._classes), self.num_classes)) return self._classes @property def labels(self): if (self._labels is None) and (self.num_classes is not None): # TODO: Replace torch.randperm with seed-friendly counterpart self._labels = torch.randperm(self.num_classes).tolist() return self._labels def __call__(self, target): return self.classes[target] def __repr__(self): return '{0}({1})'.format(self.__class__.__name__, self.num_classes or '') class FixedCategory(object): def __init__(self, transform=None): self.transform = transform def __call__(self, index): return (index, self.transform) def __repr__(self): return ('{0}({1})'.format(self.__class__.__name__, self.transform))
2,710
33.316456
86
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/transforms/splitters.py
import torch import numpy as np from collections import OrderedDict, defaultdict from torchmeta_local.utils.data.task import Task, ConcatTask, SubsetTask from torchmeta_local.transforms.utils import apply_wrapper __all__ = ['Splitter', 'ClassSplitter', 'WeightedClassSplitter'] class Splitter(object): def __init__(self, splits, random_state_seed): self.splits = splits self.random_state_seed = random_state_seed self.seed(random_state_seed) def seed(self, seed): self.np_random = np.random.RandomState(seed=seed) def get_indices(self, task): if isinstance(task, ConcatTask): indices = self.get_indices_concattask(task) elif isinstance(task, Task): indices = self.get_indices_task(task) else: raise ValueError('The task must be of type `ConcatTask` or `Task`, ' 'Got type `{0}`.'.format(type(task))) return indices def get_indices_task(self, task): raise NotImplementedError('Method `get_indices_task` must be ' 'implemented in classes inherited from `Splitter`.') def get_indices_concattask(self, task): raise NotImplementedError('Method `get_indices_concattask` must be ' 'implemented in classes inherited from `Splitter`.') def _get_class_indices(self, task): class_indices = defaultdict(list) if task.num_classes is None: # Regression task class_indices['regression'] = range(len(task)) else: for index in range(len(task)): sample = task[index] if (not isinstance(sample, tuple)) or (len(sample) < 2): raise ValueError('In order to split the dataset in train/' 'test splits, `Splitter` must access the targets. Each ' 'sample from a task must be a tuple with at least 2 ' 'elements, with the last one being the target.') class_indices[sample[-1]].append(index) if len(class_indices) != task.num_classes: raise ValueError('The number of classes detected in `Splitter` ' '({0}) is different from the property `num_classes` ({1}) ' 'in task `{2}`.'.format(len(class_indices), task.num_classes, task)) return class_indices def __call__(self, task): indices = self.get_indices(task) return OrderedDict([(split, SubsetTask(task, indices[split])) for split in self.splits]) def __len__(self): return len(self.splits) class ClassSplitter_(Splitter): def __init__(self, shuffle=True, num_samples_per_class=None, num_train_per_class=None, num_test_per_class=None, num_support_per_class=None, num_query_per_class=None, random_state_seed=0): """ Transforms a dataset into train/test splits for few-shot learning tasks, based on a fixed number of samples per class for each split. This is a dataset transformation to be applied as a `dataset_transform` in a `MetaDataset`. Parameters ---------- shuffle : bool (default: `True`) Shuffle the data in the dataset before the split. num_samples_per_class : dict, optional Dictionary containing the names of the splits (as keys) and the corresponding number of samples per class in each split (as values). If not `None`, then the arguments `num_train_per_class`, `num_test_per_class`, `num_support_per_class` and `num_query_per_class` are ignored. num_train_per_class : int, optional Number of samples per class in the training split. This corresponds to the number of "shots" in "k-shot learning". If not `None`, this creates an item `train` for each task. num_test_per_class : int, optional Number of samples per class in the test split. If not `None`, this creates an item `test` for each task. num_support_per_class : int, optional Alias for `num_train_per_class`. If `num_train_per_class` is not `None`, then this argument is ignored. If not `None`, this creates an item `support` for each task. num_query_per_class : int, optional Alias for `num_test_per_class`. If `num_test_per_class` is not `None`, then this argument is ignored. If not `None`, this creates an item `query` for each task. random_state_seed : int, optional seed of the np.RandomState. Defaults to '0'. Examples -------- >>> transform = ClassSplitter(num_samples_per_class={ ... 'train': 5, 'test': 15}) >>> dataset = Omniglot('data', num_classes_per_task=5, ... dataset_transform=transform, meta_train=True) >>> task = dataset.sample_task() >>> task.keys() ['train', 'test'] >>> len(task['train']), len(task['test']) (25, 75) """ self.shuffle = shuffle if num_samples_per_class is None: num_samples_per_class = OrderedDict() if num_train_per_class is not None: num_samples_per_class['train'] = num_train_per_class elif num_support_per_class is not None: num_samples_per_class['support'] = num_support_per_class if num_test_per_class is not None: num_samples_per_class['test'] = num_test_per_class elif num_query_per_class is not None: num_samples_per_class['query'] = num_query_per_class assert len(num_samples_per_class) > 0 self._min_samples_per_class = sum(num_samples_per_class.values()) super(ClassSplitter_, self).__init__(num_samples_per_class, random_state_seed) def get_indices_task(self, task): all_class_indices = self._get_class_indices(task) indices = OrderedDict([(split, []) for split in self.splits]) for i, (name, class_indices) in enumerate(all_class_indices.items()): num_samples = len(class_indices) if num_samples < self._min_samples_per_class: raise ValueError('The number of samples for class `{0}` ({1}) ' 'is smaller than the minimum number of samples per class ' 'required by `ClassSplitter` ({2}).'.format(name, num_samples, self._min_samples_per_class)) if self.shuffle: seed = (hash(task) + i + self.random_state_seed) % (2 ** 32) dataset_indices = np.random.RandomState(seed).permutation(num_samples) else: dataset_indices = np.arange(num_samples) ptr = 0 for split, num_split in self.splits.items(): split_indices = dataset_indices[ptr:ptr + num_split] if self.shuffle: self.np_random.shuffle(split_indices) indices[split].extend([class_indices[idx] for idx in split_indices]) ptr += num_split return indices def get_indices_concattask(self, task): indices = OrderedDict([(split, []) for split in self.splits]) cum_size = 0 for dataset in task.datasets: num_samples = len(dataset) if num_samples < self._min_samples_per_class: raise ValueError('The number of samples for one class ({0}) ' 'is smaller than the minimum number of samples per class ' 'required by `ClassSplitter` ({1}).'.format(num_samples, self._min_samples_per_class)) if self.shuffle: seed = (hash(task) + hash(dataset) + self.random_state_seed) % (2 ** 32) dataset_indices = np.random.RandomState(seed).permutation(num_samples) else: dataset_indices = np.arange(num_samples) ptr = 0 for split, num_split in self.splits.items(): split_indices = dataset_indices[ptr:ptr + num_split] if self.shuffle: self.np_random.shuffle(split_indices) indices[split].extend(split_indices + cum_size) ptr += num_split cum_size += num_samples return indices class WeightedClassSplitter_(Splitter): def __init__(self, shuffle=True, min_num_samples=1, max_num_samples=None, weights=None, train_weights=None, test_weights=None, support_weights=None, query_weights=None, force_equal_per_class=False, random_state_seed=0): """ Transforms a dataset into train/test splits for few-shot learning tasks. The number of samples per class is proportional to the number of samples per class in the original dataset. This is a dataset transformation to be applied as a `dataset_transform` in a `MetaDataset`. Parameters ---------- shuffle : bool (default: `True`) Shuffle the data in the dataset before the split. min_num_samples : int or dict, optional (default: 1) Minimum number of samples per class. max_num_samples : int or dict, optional Maximum number of samples per class. weights : dict, optional Dictionary containing the names of the splits (as keys) and the corresponding proportions of samples per class in each split (as values). If not `None`, then the arguments `train_weights`, `test_weights`, `support_weights` and `query_weights` are ignored. train_weights : float, optional Proportion of samples from each class in the training split. If not `None`, this creates an item `train` for each task. test_weights : float, optional Proportion of samples from each class in the training split. If not `None`, this creates an item `test` for each task. support_weights : float, optional Alias for `train_weights`. If `train_weights` is not `None`, then this argument is ignored. If not `None`, this creates an item `support` for each task. query_weights : float, optional Alias for `test_weights`. If `test_weights` is not `None`, then this argument is ignored. If not `None`, this creates an item `query` for each task. force_equal_per_class : bool (default: `False`) If `True`, then the number of samples per class is equal for each class; this is then proportional to the number of samples in the class with the minimum number of samples. random_state_seed : int, optional seed of the np.RandomState. Defaults to '0'. """ self.shuffle = shuffle self.force_equal_per_class = force_equal_per_class if weights is None: weights = OrderedDict() if train_weights is not None: weights['train'] = train_weights elif support_weights is not None: weights['support'] = support_weights if test_weights is not None: weights['test'] = test_weights elif query_weights is not None: weights['query'] = query_weights assert len(weights) > 0 assert sum(weights.values()) <= 1. if (min_num_samples is None) or isinstance(min_num_samples, int): if min_num_samples is None: min_num_samples = 0 self.min_num_samples = OrderedDict([(split, min_num_samples) for split in weights]) elif isinstance(min_num_samples, dict): self.min_num_samples = OrderedDict(min_num_samples) else: raise NotImplementedError('Argument `min_num_samples` in ' '`WeightedClassSplitter` must be of type `dict` or `int`. Got ' 'type `{0}`.'.format(type(min_num_samples))) if max_num_samples is None: self.max_num_samples = None elif isinstance(max_num_samples, int): self.max_num_samples = OrderedDict([(split, max_num_samples) for split in weights]) elif isinstance(max_num_samples, dict): self.max_num_samples = OrderedDict(max_num_samples) else: raise NotImplementedError('Argument `max_num_samples` in ' '`WeightedClassSplitter` must be of type `dict` or `int`. Got ' 'type `{0}`.'.format(type(min_num_samples))) self._min_samples_per_class = sum(self.min_num_samples.values()) super(WeightedClassSplitter_, self).__init__(weights, random_state_seed) def get_indices_task(self, task): all_class_indices = self._get_class_indices(task) indices = OrderedDict([(split, []) for split in self.splits]) min_samples = min([len(class_indices) for class_indices in all_class_indices.values()]) if min_samples < self._min_samples_per_class: raise ValueError('The smallest number of samples in a class ({0}) ' 'is smaller than the minimum number of samples per class ' 'required by `WeightedClassSplitter` ({1}).'.format( min_samples, self._min_samples_per_class)) for i, class_indices in enumerate(all_class_indices.values()): num_samples = (min_samples if self.force_equal_per_class else len(class_indices)) if self.shuffle: seed = (hash(task) + i + self.random_state_seed) % (2 ** 32) dataset_indices = np.random.RandomState(seed).permutation(num_samples) else: dataset_indices = np.arange(num_samples) ptr = 0 for split, weight in self.splits.items(): num_split = max(self.min_num_samples[split], int(weight * num_samples)) if self.max_num_samples is not None: num_split = min(self.max_num_samples[split], num_split) split_indices = dataset_indices[ptr:ptr + num_split] if self.shuffle: self.np_random.shuffle(split_indices) indices[split].extend([class_indices[idx] for idx in split_indices]) ptr += num_split return indices def get_indices_concattask(self, task): indices = OrderedDict([(split, []) for split in self.splits]) cum_size = 0 min_samples = min([len(dataset) for dataset in task.datasets]) if min_samples < self._min_samples_per_class: raise ValueError('The smallest number of samples in a class ({0}) ' 'is smaller than the minimum number of samples per class ' 'required by `WeightedClassSplitter` ({1}).'.format( min_samples, self._min_samples_per_class)) for dataset in task.datasets: num_samples = (min_samples if self.force_equal_per_class else len(dataset)) if self.shuffle: seed = (hash(task) + hash(dataset) + self.random_state_seed) % (2 ** 32) dataset_indices = np.random.RandomState(seed).permutation(num_samples) else: dataset_indices = np.arange(num_samples) ptr = 0 for split, weight in self.splits.items(): num_split = max(self.min_num_samples, int(weight * num_samples)) split_indices = dataset_indices[ptr:ptr + num_split] if self.shuffle: self.np_random.shuffle(split_indices) indices[split].extend(split_indices + cum_size) cum_size += num_samples return indices def ClassSplitter(task=None, *args, **kwargs): return apply_wrapper(ClassSplitter_(*args, **kwargs), task) def WeightedClassSplitter(task=None, *args, **kwargs): return apply_wrapper(WeightedClassSplitter_(*args, **kwargs), task)
16,290
43.149051
88
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/transforms/__init__.py
from torchmeta_local.transforms.categorical import Categorical, FixedCategory from torchmeta_local.transforms.augmentations import Rotation, HorizontalFlip, VerticalFlip from torchmeta_local.transforms.splitters import Splitter, ClassSplitter, WeightedClassSplitter from torchmeta_local.transforms.target_transforms import TargetTransform, DefaultTargetTransform, SegmentationPairTransform from torchmeta_local.transforms.tabular_transforms import NumpyToTorch
461
76
123
py
modern-srwm
modern-srwm-main/supervised_learning/torchmeta_local/transforms/tabular_transforms.py
import torch import numpy as np class NumpyToTorch: """Convert a numpy.ndarray to a pytorch.tensor.""" def __call__(self, numpy_array: np.ndarray) -> torch.tensor: """ Parameters ---------- numpy_array : np.ndarray the numpy array Returns ------- torch.tensor converted torch array with the same values as the numpy array """ return torch.from_numpy(numpy_array).contiguous() def __repr__(self): return self.__class__.__name__ + '()'
555
22.166667
73
py
auto-attack
auto-attack-master/setup.py
import setuptools with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="autoattack", version="0.1", author="Francesco Croce, Matthias Hein", author_email="francesco.croce@uni-tuebingen.de", description="This package provides the implementation of AutoAttack.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/fra31/auto-attack", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", ], )
703
27.16
74
py
auto-attack
auto-attack-master/autoattack/utils_tf.py
import tensorflow as tf import numpy as np import torch class ModelAdapter(): def __init__(self, logits, x, y, sess, num_classes=10): self.logits = logits self.sess = sess self.x_input = x self.y_input = y self.num_classes = num_classes # gradients of logits if num_classes <= 10: self.grads = [None] * num_classes for cl in range(num_classes): self.grads[cl] = tf.gradients(self.logits[:, cl], self.x_input)[0] # cross-entropy loss self.xent = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.logits, labels=self.y_input) self.grad_xent = tf.gradients(self.xent, self.x_input)[0] # dlr loss self.dlr = dlr_loss(self.logits, self.y_input, num_classes=self.num_classes) self.grad_dlr = tf.gradients(self.dlr, self.x_input)[0] # targeted dlr loss self.y_target = tf.placeholder(tf.int64, shape=[None]) self.dlr_target = dlr_loss_targeted(self.logits, self.y_input, self.y_target, num_classes=self.num_classes) self.grad_target = tf.gradients(self.dlr_target, self.x_input)[0] self.la = tf.placeholder(tf.int64, shape=[None]) self.la_target = tf.placeholder(tf.int64, shape=[None]) la_mask = tf.one_hot(self.la, self.num_classes) la_target_mask = tf.one_hot(self.la_target, self.num_classes) la_logit = tf.reduce_sum(la_mask * self.logits, axis=1) la_target_logit = tf.reduce_sum(la_target_mask * self.logits, axis=1) self.diff_logits = la_target_logit - la_logit self.grad_diff_logits = tf.gradients(self.diff_logits, self.x_input)[0] def predict(self, x): x2 = np.moveaxis(x.cpu().numpy(), 1, 3) y = self.sess.run(self.logits, {self.x_input: x2}) return torch.from_numpy(y).cuda() def grad_logits(self, x): x2 = np.moveaxis(x.cpu().numpy(), 1, 3) logits, g2 = self.sess.run([self.logits, self.grads], {self.x_input: x2}) g2 = np.moveaxis(np.array(g2), 0, 1) g2 = np.transpose(g2, (0, 1, 4, 2, 3)) return torch.from_numpy(logits).cuda(), torch.from_numpy(g2).cuda() def get_grad_diff_logits_target(self, x, y=None, y_target=None): la = y.cpu().numpy() la_target = y_target.cpu().numpy() x2 = np.moveaxis(x.cpu().numpy(), 1, 3) dl, g2 = self.sess.run([self.diff_logits, self.grad_diff_logits], {self.x_input: x2, self.la: la, self.la_target: la_target}) g2 = np.transpose(np.array(g2), (0, 3, 1, 2)) return torch.from_numpy(dl).cuda(), torch.from_numpy(g2).cuda() def get_logits_loss_grad_xent(self, x, y): x2 = np.moveaxis(x.cpu().numpy(), 1, 3) y2 = y.clone().cpu().numpy() logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.xent, self.grad_xent], {self.x_input: x2, self.y_input: y2}) grad_val = np.moveaxis(grad_val, 3, 1) return torch.from_numpy(logits_val).cuda(), torch.from_numpy(loss_indiv_val).cuda(), torch.from_numpy(grad_val).cuda() def get_logits_loss_grad_dlr(self, x, y): x2 = np.moveaxis(x.cpu().numpy(), 1, 3) y2 = y.clone().cpu().numpy() logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr, self.grad_dlr], {self.x_input: x2, self.y_input: y2}) grad_val = np.moveaxis(grad_val, 3, 1) return torch.from_numpy(logits_val).cuda(), torch.from_numpy(loss_indiv_val).cuda(), torch.from_numpy(grad_val).cuda() def get_logits_loss_grad_target(self, x, y, y_target): x2 = np.moveaxis(x.cpu().numpy(), 1, 3) y2 = y.clone().cpu().numpy() y_targ = y_target.clone().cpu().numpy() logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr_target, self.grad_target], {self.x_input: x2, self.y_input: y2, self.y_target: y_targ}) grad_val = np.moveaxis(grad_val, 3, 1) return torch.from_numpy(logits_val).cuda(), torch.from_numpy(loss_indiv_val).cuda(), torch.from_numpy(grad_val).cuda() def dlr_loss(x, y, num_classes=10): x_sort = tf.contrib.framework.sort(x, axis=1) y_onehot = tf.one_hot(y, num_classes) ### TODO: adapt to the case when the point is already misclassified loss = -(x_sort[:, -1] - x_sort[:, -2]) / (x_sort[:, -1] - x_sort[:, -3] + 1e-12) return loss def dlr_loss_targeted(x, y, y_target, num_classes=10): x_sort = tf.contrib.framework.sort(x, axis=1) y_onehot = tf.one_hot(y, num_classes) y_target_onehot = tf.one_hot(y_target, num_classes) loss = -(tf.reduce_sum(x * y_onehot, axis=1) - tf.reduce_sum(x * y_target_onehot, axis=1)) / (x_sort[:, -1] - .5 * x_sort[:, -3] - .5 * x_sort[:, -4] + 1e-12) return loss
4,883
45.514286
171
py
auto-attack
auto-attack-master/autoattack/autoattack.py
import math import time import numpy as np import torch from .other_utils import Logger from autoattack import checks from autoattack.state import EvaluationState class AutoAttack(): def __init__(self, model, norm='Linf', eps=.3, seed=None, verbose=True, attacks_to_run=[], version='standard', is_tf_model=False, device='cuda', log_path=None): self.model = model self.norm = norm assert norm in ['Linf', 'L2', 'L1'] self.epsilon = eps self.seed = seed self.verbose = verbose self.attacks_to_run = attacks_to_run self.version = version self.is_tf_model = is_tf_model self.device = device self.logger = Logger(log_path) if version in ['standard', 'plus', 'rand'] and attacks_to_run != []: raise ValueError("attacks_to_run will be overridden unless you use version='custom'") if not self.is_tf_model: from .autopgd_base import APGDAttack self.apgd = APGDAttack(self.model, n_restarts=5, n_iter=100, verbose=False, eps=self.epsilon, norm=self.norm, eot_iter=1, rho=.75, seed=self.seed, device=self.device, logger=self.logger) from .fab_pt import FABAttack_PT self.fab = FABAttack_PT(self.model, n_restarts=5, n_iter=100, eps=self.epsilon, seed=self.seed, norm=self.norm, verbose=False, device=self.device) from .square import SquareAttack self.square = SquareAttack(self.model, p_init=.8, n_queries=5000, eps=self.epsilon, norm=self.norm, n_restarts=1, seed=self.seed, verbose=False, device=self.device, resc_schedule=False) from .autopgd_base import APGDAttack_targeted self.apgd_targeted = APGDAttack_targeted(self.model, n_restarts=1, n_iter=100, verbose=False, eps=self.epsilon, norm=self.norm, eot_iter=1, rho=.75, seed=self.seed, device=self.device, logger=self.logger) else: from .autopgd_base import APGDAttack self.apgd = APGDAttack(self.model, n_restarts=5, n_iter=100, verbose=False, eps=self.epsilon, norm=self.norm, eot_iter=1, rho=.75, seed=self.seed, device=self.device, is_tf_model=True, logger=self.logger) from .fab_tf import FABAttack_TF self.fab = FABAttack_TF(self.model, n_restarts=5, n_iter=100, eps=self.epsilon, seed=self.seed, norm=self.norm, verbose=False, device=self.device) from .square import SquareAttack self.square = SquareAttack(self.model.predict, p_init=.8, n_queries=5000, eps=self.epsilon, norm=self.norm, n_restarts=1, seed=self.seed, verbose=False, device=self.device, resc_schedule=False) from .autopgd_base import APGDAttack_targeted self.apgd_targeted = APGDAttack_targeted(self.model, n_restarts=1, n_iter=100, verbose=False, eps=self.epsilon, norm=self.norm, eot_iter=1, rho=.75, seed=self.seed, device=self.device, is_tf_model=True, logger=self.logger) if version in ['standard', 'plus', 'rand']: self.set_version(version) def get_logits(self, x): if not self.is_tf_model: return self.model(x) else: return self.model.predict(x) def get_seed(self): return time.time() if self.seed is None else self.seed def run_standard_evaluation(self, x_orig, y_orig, bs=250, return_labels=False, state_path=None): if state_path is not None and state_path.exists(): state = EvaluationState.from_disk(state_path) if set(self.attacks_to_run) != state.attacks_to_run: raise ValueError("The state was created with a different set of attacks " "to run. You are probably using the wrong state file.") if self.verbose: self.logger.log("Restored state from {}".format(state_path)) self.logger.log("Since the state has been restored, **only** " "the adversarial examples from the current run " "are going to be returned.") else: state = EvaluationState(set(self.attacks_to_run), path=state_path) state.to_disk() if self.verbose and state_path is not None: self.logger.log("Created state in {}".format(state_path)) attacks_to_run = list(filter(lambda attack: attack not in state.run_attacks, self.attacks_to_run)) if self.verbose: self.logger.log('using {} version including {}.'.format(self.version, ', '.join(attacks_to_run))) if state.run_attacks: self.logger.log('{} was/were already run.'.format(', '.join(state.run_attacks))) # checks on type of defense if self.version != 'rand': checks.check_randomized(self.get_logits, x_orig[:bs].to(self.device), y_orig[:bs].to(self.device), bs=bs, logger=self.logger) n_cls = checks.check_range_output(self.get_logits, x_orig[:bs].to(self.device), logger=self.logger) checks.check_dynamic(self.model, x_orig[:bs].to(self.device), self.is_tf_model, logger=self.logger) checks.check_n_classes(n_cls, self.attacks_to_run, self.apgd_targeted.n_target_classes, self.fab.n_target_classes, logger=self.logger) with torch.no_grad(): # calculate accuracy n_batches = int(np.ceil(x_orig.shape[0] / bs)) if state.robust_flags is None: robust_flags = torch.zeros(x_orig.shape[0], dtype=torch.bool, device=x_orig.device) y_adv = torch.empty_like(y_orig) for batch_idx in range(n_batches): start_idx = batch_idx * bs end_idx = min( (batch_idx + 1) * bs, x_orig.shape[0]) x = x_orig[start_idx:end_idx, :].clone().to(self.device) y = y_orig[start_idx:end_idx].clone().to(self.device) output = self.get_logits(x).max(dim=1)[1] y_adv[start_idx: end_idx] = output correct_batch = y.eq(output) robust_flags[start_idx:end_idx] = correct_batch.detach().to(robust_flags.device) state.robust_flags = robust_flags robust_accuracy = torch.sum(robust_flags).item() / x_orig.shape[0] robust_accuracy_dict = {'clean': robust_accuracy} state.clean_accuracy = robust_accuracy if self.verbose: self.logger.log('initial accuracy: {:.2%}'.format(robust_accuracy)) else: robust_flags = state.robust_flags.to(x_orig.device) robust_accuracy = torch.sum(robust_flags).item() / x_orig.shape[0] robust_accuracy_dict = {'clean': state.clean_accuracy} if self.verbose: self.logger.log('initial clean accuracy: {:.2%}'.format(state.clean_accuracy)) self.logger.log('robust accuracy at the time of restoring the state: {:.2%}'.format(robust_accuracy)) x_adv = x_orig.clone().detach() startt = time.time() for attack in attacks_to_run: # item() is super important as pytorch int division uses floor rounding num_robust = torch.sum(robust_flags).item() if num_robust == 0: break n_batches = int(np.ceil(num_robust / bs)) robust_lin_idcs = torch.nonzero(robust_flags, as_tuple=False) if num_robust > 1: robust_lin_idcs.squeeze_() for batch_idx in range(n_batches): start_idx = batch_idx * bs end_idx = min((batch_idx + 1) * bs, num_robust) batch_datapoint_idcs = robust_lin_idcs[start_idx:end_idx] if len(batch_datapoint_idcs.shape) > 1: batch_datapoint_idcs.squeeze_(-1) x = x_orig[batch_datapoint_idcs, :].clone().to(self.device) y = y_orig[batch_datapoint_idcs].clone().to(self.device) # make sure that x is a 4d tensor even if there is only a single datapoint left if len(x.shape) == 3: x.unsqueeze_(dim=0) # run attack if attack == 'apgd-ce': # apgd on cross-entropy loss self.apgd.loss = 'ce' self.apgd.seed = self.get_seed() adv_curr = self.apgd.perturb(x, y) #cheap=True elif attack == 'apgd-dlr': # apgd on dlr loss self.apgd.loss = 'dlr' self.apgd.seed = self.get_seed() adv_curr = self.apgd.perturb(x, y) #cheap=True elif attack == 'fab': # fab self.fab.targeted = False self.fab.seed = self.get_seed() adv_curr = self.fab.perturb(x, y) elif attack == 'square': # square self.square.seed = self.get_seed() adv_curr = self.square.perturb(x, y) elif attack == 'apgd-t': # targeted apgd self.apgd_targeted.seed = self.get_seed() adv_curr = self.apgd_targeted.perturb(x, y) #cheap=True elif attack == 'fab-t': # fab targeted self.fab.targeted = True self.fab.n_restarts = 1 self.fab.seed = self.get_seed() adv_curr = self.fab.perturb(x, y) else: raise ValueError('Attack not supported') output = self.get_logits(adv_curr).max(dim=1)[1] false_batch = ~y.eq(output).to(robust_flags.device) non_robust_lin_idcs = batch_datapoint_idcs[false_batch] robust_flags[non_robust_lin_idcs] = False state.robust_flags = robust_flags x_adv[non_robust_lin_idcs] = adv_curr[false_batch].detach().to(x_adv.device) y_adv[non_robust_lin_idcs] = output[false_batch].detach().to(x_adv.device) if self.verbose: num_non_robust_batch = torch.sum(false_batch) self.logger.log('{} - {}/{} - {} out of {} successfully perturbed'.format( attack, batch_idx + 1, n_batches, num_non_robust_batch, x.shape[0])) robust_accuracy = torch.sum(robust_flags).item() / x_orig.shape[0] robust_accuracy_dict[attack] = robust_accuracy state.add_run_attack(attack) if self.verbose: self.logger.log('robust accuracy after {}: {:.2%} (total time {:.1f} s)'.format( attack.upper(), robust_accuracy, time.time() - startt)) # check about square checks.check_square_sr(robust_accuracy_dict, logger=self.logger) state.to_disk(force=True) # final check if self.verbose: if self.norm == 'Linf': res = (x_adv - x_orig).abs().reshape(x_orig.shape[0], -1).max(1)[0] elif self.norm == 'L2': res = ((x_adv - x_orig) ** 2).reshape(x_orig.shape[0], -1).sum(-1).sqrt() elif self.norm == 'L1': res = (x_adv - x_orig).abs().reshape(x_orig.shape[0], -1).sum(dim=-1) self.logger.log('max {} perturbation: {:.5f}, nan in tensor: {}, max: {:.5f}, min: {:.5f}'.format( self.norm, res.max(), (x_adv != x_adv).sum(), x_adv.max(), x_adv.min())) self.logger.log('robust accuracy: {:.2%}'.format(robust_accuracy)) if return_labels: return x_adv, y_adv else: return x_adv def clean_accuracy(self, x_orig, y_orig, bs=250): n_batches = math.ceil(x_orig.shape[0] / bs) acc = 0. for counter in range(n_batches): x = x_orig[counter * bs:min((counter + 1) * bs, x_orig.shape[0])].clone().to(self.device) y = y_orig[counter * bs:min((counter + 1) * bs, x_orig.shape[0])].clone().to(self.device) output = self.get_logits(x) acc += (output.max(1)[1] == y).float().sum() if self.verbose: print('clean accuracy: {:.2%}'.format(acc / x_orig.shape[0])) return acc.item() / x_orig.shape[0] def run_standard_evaluation_individual(self, x_orig, y_orig, bs=250, return_labels=False): if self.verbose: print('using {} version including {}'.format(self.version, ', '.join(self.attacks_to_run))) l_attacks = self.attacks_to_run adv = {} verbose_indiv = self.verbose self.verbose = False for c in l_attacks: startt = time.time() self.attacks_to_run = [c] x_adv, y_adv = self.run_standard_evaluation(x_orig, y_orig, bs=bs, return_labels=True) if return_labels: adv[c] = (x_adv, y_adv) else: adv[c] = x_adv if verbose_indiv: acc_indiv = self.clean_accuracy(x_adv, y_orig, bs=bs) space = '\t \t' if c == 'fab' else '\t' self.logger.log('robust accuracy by {} {} {:.2%} \t (time attack: {:.1f} s)'.format( c.upper(), space, acc_indiv, time.time() - startt)) return adv def set_version(self, version='standard'): if self.verbose: print('setting parameters for {} version'.format(version)) if version == 'standard': self.attacks_to_run = ['apgd-ce', 'apgd-t', 'fab-t', 'square'] if self.norm in ['Linf', 'L2']: self.apgd.n_restarts = 1 self.apgd_targeted.n_target_classes = 9 elif self.norm in ['L1']: self.apgd.use_largereps = True self.apgd_targeted.use_largereps = True self.apgd.n_restarts = 5 self.apgd_targeted.n_target_classes = 5 self.fab.n_restarts = 1 self.apgd_targeted.n_restarts = 1 self.fab.n_target_classes = 9 #self.apgd_targeted.n_target_classes = 9 self.square.n_queries = 5000 elif version == 'plus': self.attacks_to_run = ['apgd-ce', 'apgd-dlr', 'fab', 'square', 'apgd-t', 'fab-t'] self.apgd.n_restarts = 5 self.fab.n_restarts = 5 self.apgd_targeted.n_restarts = 1 self.fab.n_target_classes = 9 self.apgd_targeted.n_target_classes = 9 self.square.n_queries = 5000 if not self.norm in ['Linf', 'L2']: print('"{}" version is used with {} norm: please check'.format( version, self.norm)) elif version == 'rand': self.attacks_to_run = ['apgd-ce', 'apgd-dlr'] self.apgd.n_restarts = 1 self.apgd.eot_iter = 20
16,294
47.067847
121
py
auto-attack
auto-attack-master/autoattack/checks.py
import torch import warnings import math import sys from autoattack.other_utils import L2_norm funcs = {'grad': 0, 'backward': 0, #'enable_grad': 0 '_make_grads': 0, } checks_doc_path = 'flags_doc.md' def check_randomized(model, x, y, bs=250, n=5, alpha=1e-4, logger=None): acc = [] corrcl = [] outputs = [] with torch.no_grad(): for _ in range(n): output = model(x) corrcl_curr = (output.max(1)[1] == y).sum().item() corrcl.append(corrcl_curr) outputs.append(output / (L2_norm(output, keepdim=True) + 1e-10)) acc = [c != corrcl_curr for c in corrcl] max_diff = 0. for c in range(n - 1): for e in range(c + 1, n): diff = L2_norm(outputs[c] - outputs[e]) max_diff = max(max_diff, diff.max().item()) #print(diff.max().item(), max_diff) if any(acc) or max_diff > alpha: msg = 'it seems to be a randomized defense! Please use version="rand".' + \ f' See {checks_doc_path} for details.' if logger is None: warnings.warn(Warning(msg)) else: logger.log(f'Warning: {msg}') def check_range_output(model, x, alpha=1e-5, logger=None): with torch.no_grad(): output = model(x) fl = [output.max() < 1. + alpha, output.min() > -alpha, ((output.sum(-1) - 1.).abs() < alpha).all()] if all(fl): msg = 'it seems that the output is a probability distribution,' +\ ' please be sure that the logits are used!' + \ f' See {checks_doc_path} for details.' if logger is None: warnings.warn(Warning(msg)) else: logger.log(f'Warning: {msg}') return output.shape[-1] def check_zero_gradients(grad, logger=None): z = grad.view(grad.shape[0], -1).abs().sum(-1) #print(grad[0, :10]) if (z == 0).any(): msg = f'there are {(z == 0).sum()} points with zero gradient!' + \ ' This might lead to unreliable evaluation with gradient-based attacks.' + \ f' See {checks_doc_path} for details.' if logger is None: warnings.warn(Warning(msg)) else: logger.log(f'Warning: {msg}') def check_square_sr(acc_dict, alpha=.002, logger=None): if 'square' in acc_dict.keys() and len(acc_dict) > 2: acc = min([v for k, v in acc_dict.items() if k != 'square']) if acc_dict['square'] < acc - alpha: msg = 'Square Attack has decreased the robust accuracy of' + \ f' {acc - acc_dict["square"]:.2%}.' + \ ' This might indicate that the robustness evaluation using' +\ ' AutoAttack is unreliable. Consider running Square' +\ ' Attack with more iterations and restarts or an adaptive attack.' + \ f' See {checks_doc_path} for details.' if logger is None: warnings.warn(Warning(msg)) else: logger.log(f'Warning: {msg}') ''' from https://stackoverflow.com/questions/26119521/counting-function-calls-python ''' def tracefunc(frame, event, args): if event == 'call' and frame.f_code.co_name in funcs.keys(): funcs[frame.f_code.co_name] += 1 def check_dynamic(model, x, is_tf_model=False, logger=None): if is_tf_model: msg = 'the check for dynamic defenses is not currently supported' else: msg = None sys.settrace(tracefunc) model(x) sys.settrace(None) #for k, v in funcs.items(): # print(k, v) if any([c > 0 for c in funcs.values()]): msg = 'it seems to be a dynamic defense! The evaluation' + \ ' with AutoAttack might be insufficient.' + \ f' See {checks_doc_path} for details.' if not msg is None: if logger is None: warnings.warn(Warning(msg)) else: logger.log(f'Warning: {msg}') #sys.settrace(None) def check_n_classes(n_cls, attacks_to_run, apgd_targets, fab_targets, logger=None): msg = None if 'apgd-dlr' in attacks_to_run or 'apgd-t' in attacks_to_run: if n_cls <= 2: msg = f'with only {n_cls} classes it is not possible to use the DLR loss!' elif n_cls == 3: msg = f'with only {n_cls} classes it is not possible to use the targeted DLR loss!' elif 'apgd-t' in attacks_to_run and \ apgd_targets + 1 > n_cls: msg = f'it seems that more target classes ({apgd_targets})' + \ f' than possible ({n_cls - 1}) are used in {"apgd-t".upper()}!' if 'fab-t' in attacks_to_run and fab_targets + 1 > n_cls: if msg is None: msg = f'it seems that more target classes ({apgd_targets})' + \ f' than possible ({n_cls - 1}) are used in FAB-T!' else: msg += f' Also, it seems that too many target classes ({apgd_targets})' + \ f' are used in {"fab-t".upper()} ({n_cls - 1} possible)!' if not msg is None: if logger is None: warnings.warn(Warning(msg)) else: logger.log(f'Warning: {msg}')
5,206
35.412587
95
py
auto-attack
auto-attack-master/autoattack/state.py
import json from dataclasses import dataclass, field, asdict from datetime import datetime from pathlib import Path from typing import Optional, Set import warnings import torch @dataclass class EvaluationState: _attacks_to_run: Set[str] path: Optional[Path] = None _run_attacks: Set[str] = field(default_factory=set) _robust_flags: Optional[torch.Tensor] = None _last_saved: datetime = datetime(1, 1, 1) _SAVE_TIMEOUT: int = 60 _clean_accuracy: float = float("nan") def to_disk(self, force: bool = False) -> None: seconds_since_last_save = (datetime.now() - self._last_saved).total_seconds() if self.path is None or (seconds_since_last_save < self._SAVE_TIMEOUT and not force): return self._last_saved = datetime.now() d = asdict(self) if self.robust_flags is not None: d["_robust_flags"] = d["_robust_flags"].cpu().tolist() d["_run_attacks"] = list(self._run_attacks) with self.path.open("w", ) as f: json.dump(d, f, default=str) @classmethod def from_disk(cls, path: Path) -> "EvaluationState": with path.open("r") as f: d = json.load(f) d["_robust_flags"] = torch.tensor(d["_robust_flags"], dtype=torch.bool) d["path"] = Path(d["path"]) if path != d["path"]: warnings.warn( UserWarning( "The given path is different from the one found in the state file." )) d["_last_saved"] = datetime.fromisoformat(d["_last_saved"]) return cls(**d) @property def robust_flags(self) -> Optional[torch.Tensor]: return self._robust_flags @robust_flags.setter def robust_flags(self, robust_flags: torch.Tensor) -> None: self._robust_flags = robust_flags self.to_disk(force=True) @property def run_attacks(self) -> Set[str]: return self._run_attacks def add_run_attack(self, attack: str) -> None: self._run_attacks.add(attack) self.to_disk() @property def attacks_to_run(self) -> Set[str]: return self._attacks_to_run @attacks_to_run.setter def attacks_to_run(self, _: Set[str]) -> None: raise ValueError("attacks_to_run cannot be set outside of the constructor") @property def clean_accuracy(self) -> float: return self._clean_accuracy @clean_accuracy.setter def clean_accuracy(self, accuracy) -> None: self._clean_accuracy = accuracy self.to_disk(force=True) @property def robust_accuracy(self) -> float: if self.robust_flags is None: raise ValueError("robust_flags is not set yet. Start the attack first.") if self.attacks_to_run - self.run_attacks: warnings.warn("You are checking `robust_accuracy` before all the attacks" " have been run.") return self.robust_flags.float().mean().item()
3,056
32.966667
87
py
auto-attack
auto-attack-master/autoattack/other_utils.py
import os import collections.abc as container_abcs import torch class Logger(): def __init__(self, log_path): self.log_path = log_path def log(self, str_to_log): print(str_to_log) if not self.log_path is None: with open(self.log_path, 'a') as f: f.write(str_to_log + '\n') f.flush() def check_imgs(adv, x, norm): delta = (adv - x).view(adv.shape[0], -1) if norm == 'Linf': res = delta.abs().max(dim=1)[0] elif norm == 'L2': res = (delta ** 2).sum(dim=1).sqrt() elif norm == 'L1': res = delta.abs().sum(dim=1) str_det = 'max {} pert: {:.5f}, nan in imgs: {}, max in imgs: {:.5f}, min in imgs: {:.5f}'.format( norm, res.max(), (adv != adv).sum(), adv.max(), adv.min()) print(str_det) return str_det def L1_norm(x, keepdim=False): z = x.abs().view(x.shape[0], -1).sum(-1) if keepdim: z = z.view(-1, *[1]*(len(x.shape) - 1)) return z def L2_norm(x, keepdim=False): z = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt() if keepdim: z = z.view(-1, *[1]*(len(x.shape) - 1)) return z def L0_norm(x): return (x != 0.).view(x.shape[0], -1).sum(-1) def makedir(path): if not os.path.exists(path): os.makedirs(path) def zero_gradients(x): if isinstance(x, torch.Tensor): if x.grad is not None: x.grad.detach_() x.grad.zero_() elif isinstance(x, container_abcs.Iterable): for elem in x: zero_gradients(elem)
1,577
25.745763
102
py
auto-attack
auto-attack-master/autoattack/fab_base.py
# Copyright (c) 2019-present, Francesco Croce # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import time import torch from autoattack.fab_projections import projection_linf, projection_l2,\ projection_l1 DEFAULT_EPS_DICT_BY_NORM = {'Linf': .3, 'L2': 1., 'L1': 5.0} class FABAttack(): """ Fast Adaptive Boundary Attack (Linf, L2, L1) https://arxiv.org/abs/1907.02044 :param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported) :param n_restarts: number of random restarts :param n_iter: number of iterations :param eps: epsilon for the random restarts :param alpha_max: alpha_max :param eta: overshooting :param beta: backward step """ def __init__( self, norm='Linf', n_restarts=1, n_iter=100, eps=None, alpha_max=0.1, eta=1.05, beta=0.9, loss_fn=None, verbose=False, seed=0, targeted=False, device=None, n_target_classes=9): """ FAB-attack implementation in pytorch """ self.norm = norm self.n_restarts = n_restarts self.n_iter = n_iter self.eps = eps if eps is not None else DEFAULT_EPS_DICT_BY_NORM[norm] self.alpha_max = alpha_max self.eta = eta self.beta = beta self.targeted = targeted self.verbose = verbose self.seed = seed self.target_class = None self.device = device self.n_target_classes = n_target_classes def check_shape(self, x): return x if len(x.shape) > 0 else x.unsqueeze(0) def _predict_fn(self, x): raise NotImplementedError("Virtual function.") def _get_predicted_label(self, x): raise NotImplementedError("Virtual function.") def get_diff_logits_grads_batch(self, imgs, la): raise NotImplementedError("Virtual function.") def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target): raise NotImplementedError("Virtual function.") def attack_single_run(self, x, y=None, use_rand_start=False, is_targeted=False): """ :param x: clean images :param y: clean labels, if None we use the predicted labels :param is_targeted True if we ise targeted version. Targeted class is assigned by `self.target_class` """ if self.device is None: self.device = x.device self.orig_dim = list(x.shape[1:]) self.ndims = len(self.orig_dim) x = x.detach().clone().float().to(self.device) #assert next(self.predict.parameters()).device == x.device y_pred = self._get_predicted_label(x) if y is None: y = y_pred.detach().clone().long().to(self.device) else: y = y.detach().clone().long().to(self.device) pred = y_pred == y corr_classified = pred.float().sum() if self.verbose: print('Clean accuracy: {:.2%}'.format(pred.float().mean())) if pred.sum() == 0: return x pred = self.check_shape(pred.nonzero().squeeze()) if is_targeted: output = self._predict_fn(x) la_target = output.sort(dim=-1)[1][:, -self.target_class] la_target2 = la_target[pred].detach().clone() startt = time.time() # runs the attack only on correctly classified points im2 = x[pred].detach().clone() la2 = y[pred].detach().clone() if len(im2.shape) == self.ndims: im2 = im2.unsqueeze(0) bs = im2.shape[0] u1 = torch.arange(bs) adv = im2.clone() adv_c = x.clone() res2 = 1e10 * torch.ones([bs]).to(self.device) x1 = im2.clone() x0 = im2.clone().reshape([bs, -1]) if use_rand_start: if self.norm == 'Linf': t = 2 * torch.rand(x1.shape).to(self.device) - 1 x1 = im2 + (torch.min(res2, self.eps * torch.ones(res2.shape) .to(self.device) ).reshape([-1, *[1]*self.ndims]) ) * t / (t.reshape([t.shape[0], -1]).abs() .max(dim=1, keepdim=True)[0] .reshape([-1, *[1]*self.ndims])) * .5 elif self.norm == 'L2': t = torch.randn(x1.shape).to(self.device) x1 = im2 + (torch.min(res2, self.eps * torch.ones(res2.shape) .to(self.device) ).reshape([-1, *[1]*self.ndims]) ) * t / ((t ** 2) .view(t.shape[0], -1) .sum(dim=-1) .sqrt() .view(t.shape[0], *[1]*self.ndims)) * .5 elif self.norm == 'L1': t = torch.randn(x1.shape).to(self.device) x1 = im2 + (torch.min(res2, self.eps * torch.ones(res2.shape) .to(self.device) ).reshape([-1, *[1]*self.ndims]) ) * t / (t.abs().view(t.shape[0], -1) .sum(dim=-1) .view(t.shape[0], *[1]*self.ndims)) / 2 x1 = x1.clamp(0.0, 1.0) counter_iter = 0 while counter_iter < self.n_iter: with torch.no_grad(): if is_targeted: df, dg = self.get_diff_logits_grads_batch_targeted(x1, la2, la_target2) else: df, dg = self.get_diff_logits_grads_batch(x1, la2) if self.norm == 'Linf': dist1 = df.abs() / (1e-12 + dg.abs() .reshape(dg.shape[0], dg.shape[1], -1) .sum(dim=-1)) elif self.norm == 'L2': dist1 = df.abs() / (1e-12 + (dg ** 2) .reshape(dg.shape[0], dg.shape[1], -1) .sum(dim=-1).sqrt()) elif self.norm == 'L1': dist1 = df.abs() / (1e-12 + dg.abs().reshape( [df.shape[0], df.shape[1], -1]).max(dim=2)[0]) else: raise ValueError('norm not supported') ind = dist1.min(dim=1)[1] dg2 = dg[u1, ind] b = (- df[u1, ind] + (dg2 * x1).reshape(x1.shape[0], -1) .sum(dim=-1)) w = dg2.reshape([bs, -1]) if self.norm == 'Linf': d3 = projection_linf( torch.cat((x1.reshape([bs, -1]), x0), 0), torch.cat((w, w), 0), torch.cat((b, b), 0)) elif self.norm == 'L2': d3 = projection_l2( torch.cat((x1.reshape([bs, -1]), x0), 0), torch.cat((w, w), 0), torch.cat((b, b), 0)) elif self.norm == 'L1': d3 = projection_l1( torch.cat((x1.reshape([bs, -1]), x0), 0), torch.cat((w, w), 0), torch.cat((b, b), 0)) d1 = torch.reshape(d3[:bs], x1.shape) d2 = torch.reshape(d3[-bs:], x1.shape) if self.norm == 'Linf': a0 = d3.abs().max(dim=1, keepdim=True)[0]\ .view(-1, *[1]*self.ndims) elif self.norm == 'L2': a0 = (d3 ** 2).sum(dim=1, keepdim=True).sqrt()\ .view(-1, *[1]*self.ndims) elif self.norm == 'L1': a0 = d3.abs().sum(dim=1, keepdim=True)\ .view(-1, *[1]*self.ndims) a0 = torch.max(a0, 1e-8 * torch.ones( a0.shape).to(self.device)) a1 = a0[:bs] a2 = a0[-bs:] alpha = torch.min(torch.max(a1 / (a1 + a2), torch.zeros(a1.shape) .to(self.device)), self.alpha_max * torch.ones(a1.shape) .to(self.device)) x1 = ((x1 + self.eta * d1) * (1 - alpha) + (im2 + d2 * self.eta) * alpha).clamp(0.0, 1.0) is_adv = self._get_predicted_label(x1) != la2 if is_adv.sum() > 0: ind_adv = is_adv.nonzero().squeeze() ind_adv = self.check_shape(ind_adv) if self.norm == 'Linf': t = (x1[ind_adv] - im2[ind_adv]).reshape( [ind_adv.shape[0], -1]).abs().max(dim=1)[0] elif self.norm == 'L2': t = ((x1[ind_adv] - im2[ind_adv]) ** 2)\ .reshape(ind_adv.shape[0], -1).sum(dim=-1).sqrt() elif self.norm == 'L1': t = (x1[ind_adv] - im2[ind_adv])\ .abs().reshape(ind_adv.shape[0], -1).sum(dim=-1) adv[ind_adv] = x1[ind_adv] * (t < res2[ind_adv]).\ float().reshape([-1, *[1]*self.ndims]) + adv[ind_adv]\ * (t >= res2[ind_adv]).float().reshape( [-1, *[1]*self.ndims]) res2[ind_adv] = t * (t < res2[ind_adv]).float()\ + res2[ind_adv] * (t >= res2[ind_adv]).float() x1[ind_adv] = im2[ind_adv] + ( x1[ind_adv] - im2[ind_adv]) * self.beta counter_iter += 1 ind_succ = res2 < 1e10 if self.verbose: print('success rate: {:.0f}/{:.0f}' .format(ind_succ.float().sum(), corr_classified) + ' (on correctly classified points) in {:.1f} s' .format(time.time() - startt)) ind_succ = self.check_shape(ind_succ.nonzero().squeeze()) adv_c[pred[ind_succ]] = adv[ind_succ].clone() return adv_c def perturb(self, x, y): if self.device is None: self.device = x.device adv = x.clone() with torch.no_grad(): acc = self._predict_fn(x).max(1)[1] == y startt = time.time() torch.random.manual_seed(self.seed) torch.cuda.random.manual_seed(self.seed) if not self.targeted: for counter in range(self.n_restarts): ind_to_fool = acc.nonzero().squeeze() if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0) if ind_to_fool.numel() != 0: x_to_fool, y_to_fool = x[ind_to_fool].clone(), y[ind_to_fool].clone() adv_curr = self.attack_single_run(x_to_fool, y_to_fool, use_rand_start=(counter > 0), is_targeted=False) acc_curr = self._predict_fn(adv_curr).max(1)[1] == y_to_fool if self.norm == 'Linf': res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).max(1)[0] elif self.norm == 'L2': res = ((x_to_fool - adv_curr) ** 2).reshape(x_to_fool.shape[0], -1).sum(dim=-1).sqrt() elif self.norm == 'L1': res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).sum(-1) acc_curr = torch.max(acc_curr, res > self.eps) ind_curr = (acc_curr == 0).nonzero().squeeze() acc[ind_to_fool[ind_curr]] = 0 adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone() if self.verbose: print('restart {} - robust accuracy: {:.2%} at eps = {:.5f} - cum. time: {:.1f} s'.format( counter, acc.float().mean(), self.eps, time.time() - startt)) else: for target_class in range(2, self.n_target_classes + 2): self.target_class = target_class for counter in range(self.n_restarts): ind_to_fool = acc.nonzero().squeeze() if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0) if ind_to_fool.numel() != 0: x_to_fool, y_to_fool = x[ind_to_fool].clone(), y[ind_to_fool].clone() adv_curr = self.attack_single_run(x_to_fool, y_to_fool, use_rand_start=(counter > 0), is_targeted=True) acc_curr = self._predict_fn(adv_curr).max(1)[1] == y_to_fool if self.norm == 'Linf': res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).max(1)[0] elif self.norm == 'L2': res = ((x_to_fool - adv_curr) ** 2).reshape(x_to_fool.shape[0], -1).sum(dim=-1).sqrt() elif self.norm == 'L1': res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).sum(-1) acc_curr = torch.max(acc_curr, res > self.eps) ind_curr = (acc_curr == 0).nonzero().squeeze() acc[ind_to_fool[ind_curr]] = 0 adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone() if self.verbose: print('restart {} - target_class {} - robust accuracy: {:.2%} at eps = {:.5f} - cum. time: {:.1f} s'.format( counter, self.target_class, acc.float().mean(), self.eps, time.time() - startt)) return adv
14,703
43.557576
140
py
auto-attack
auto-attack-master/autoattack/square.py
# Copyright (c) 2020-present, Francesco Croce # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import torch import time import math import torch.nn.functional as F from autoattack.autopgd_base import L1_projection class SquareAttack(): """ Square Attack https://arxiv.org/abs/1912.00049 :param predict: forward pass function :param norm: Lp-norm of the attack ('Linf', 'L2' supported) :param n_restarts: number of random restarts :param n_queries: max number of queries (each restart) :param eps: bound on the norm of perturbations :param seed: random seed for the starting point :param p_init: parameter to control size of squares :param loss: loss function optimized ('margin', 'ce' supported) :param resc_schedule adapt schedule of p to n_queries """ def __init__( self, predict, norm='Linf', n_queries=5000, eps=None, p_init=.8, n_restarts=1, seed=0, verbose=False, targeted=False, loss='margin', resc_schedule=True, device=None): """ Square Attack implementation in PyTorch """ self.predict = predict self.norm = norm self.n_queries = n_queries self.eps = eps self.p_init = p_init self.n_restarts = n_restarts self.seed = seed self.verbose = verbose self.targeted = targeted self.loss = loss self.rescale_schedule = resc_schedule self.device = device self.return_all = False def margin_and_loss(self, x, y): """ :param y: correct labels if untargeted else target labels """ logits = self.predict(x) xent = F.cross_entropy(logits, y, reduction='none') u = torch.arange(x.shape[0]) y_corr = logits[u, y].clone() logits[u, y] = -float('inf') y_others = logits.max(dim=-1)[0] if not self.targeted: if self.loss == 'ce': return y_corr - y_others, -1. * xent elif self.loss == 'margin': return y_corr - y_others, y_corr - y_others else: return y_others - y_corr, xent def init_hyperparam(self, x): assert self.norm in ['Linf', 'L2', 'L1'] assert not self.eps is None assert self.loss in ['ce', 'margin'] if self.device is None: self.device = x.device self.orig_dim = list(x.shape[1:]) self.ndims = len(self.orig_dim) if self.seed is None: self.seed = time.time() def random_target_classes(self, y_pred, n_classes): y = torch.zeros_like(y_pred) for counter in range(y_pred.shape[0]): l = list(range(n_classes)) l.remove(y_pred[counter]) t = self.random_int(0, len(l)) y[counter] = l[t] return y.long().to(self.device) def check_shape(self, x): return x if len(x.shape) == (self.ndims + 1) else x.unsqueeze(0) def random_choice(self, shape): t = 2 * torch.rand(shape).to(self.device) - 1 return torch.sign(t) def random_int(self, low=0, high=1, shape=[1]): t = low + (high - low) * torch.rand(shape).to(self.device) return t.long() def normalize(self, x): if self.norm == 'Linf': t = x.abs().view(x.shape[0], -1).max(1)[0] return x / (t.view(-1, *([1] * self.ndims)) + 1e-12) elif self.norm == 'L2': t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt() return x / (t.view(-1, *([1] * self.ndims)) + 1e-12) elif self.norm == 'L1': t = x.abs().view(x.shape[0], -1).sum(dim=-1) return x / (t.view(-1, *([1] * self.ndims)) + 1e-12) def lp_norm(self, x): if self.norm == 'L2': t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt() return t.view(-1, *([1] * self.ndims)) elif self.norm == 'L1': t = x.abs().view(x.shape[0], -1).sum(dim=-1) return t.view(-1, *([1] * self.ndims)) def eta_rectangles(self, x, y): delta = torch.zeros([x, y]).to(self.device) x_c, y_c = x // 2 + 1, y // 2 + 1 counter2 = [x_c - 1, y_c - 1] if self.norm == 'L2': for counter in range(0, max(x_c, y_c)): delta[max(counter2[0], 0):min(counter2[0] + (2*counter + 1), x), max(0, counter2[1]):min(counter2[1] + (2*counter + 1), y) ] += 1.0/(torch.Tensor([counter + 1]).view(1, 1).to( self.device) ** 2) counter2[0] -= 1 counter2[1] -= 1 delta /= (delta ** 2).sum(dim=(0, 1), keepdim=True).sqrt() elif self.norm == 'L1': for counter in range(0, max(x_c, y_c)): delta[max(counter2[0], 0):min(counter2[0] + (2*counter + 1), x), max(0, counter2[1]):min(counter2[1] + (2*counter + 1), y) ] += 1.0/(torch.Tensor([counter + 1]).view(1, 1).to( self.device) ** 4) counter2[0] -= 1 counter2[1] -= 1 delta /= delta.abs().sum(dim=(), keepdim=True) return delta def eta(self, s): if self.norm == 'L2': delta = torch.zeros([s, s]).to(self.device) delta[:s // 2] = self.eta_rectangles(s // 2, s) delta[s // 2:] = -1. * self.eta_rectangles(s - s // 2, s) delta /= (delta ** 2).sum(dim=(0, 1), keepdim=True).sqrt() elif self.norm == 'L1': delta = torch.zeros([s, s]).to(self.device) delta[:s // 2] = self.eta_rectangles(s // 2, s) delta[s // 2:] = -1. * self.eta_rectangles(s - s // 2, s) #delta = self.eta_rectangles(s, s) delta /= delta.abs().sum(dim=(), keepdim=True) #delta *= (torch.rand([1]) - .5).sign().to(self.device) if torch.rand([1]) > 0.5: delta = delta.permute([1, 0]) return delta def p_selection(self, it): """ schedule to decrease the parameter p """ if self.rescale_schedule: it = int(it / self.n_queries * 10000) if 10 < it <= 50: p = self.p_init / 2 elif 50 < it <= 200: p = self.p_init / 4 elif 200 < it <= 500: p = self.p_init / 8 elif 500 < it <= 1000: p = self.p_init / 16 elif 1000 < it <= 2000: p = self.p_init / 32 elif 2000 < it <= 4000: p = self.p_init / 64 elif 4000 < it <= 6000: p = self.p_init / 128 elif 6000 < it <= 8000: p = self.p_init / 256 elif 8000 < it: p = self.p_init / 512 else: p = self.p_init return p def attack_single_run(self, x, y): with torch.no_grad(): adv = x.clone() c, h, w = x.shape[1:] n_features = c * h * w n_ex_total = x.shape[0] if self.verbose and h != w: print('square attack may not work properly for non-square image.') print('for details please refer to https://github.com/fra31/auto-attack/issues/95') if self.norm == 'Linf': x_best = torch.clamp(x + self.eps * self.random_choice( [x.shape[0], c, 1, w]), 0., 1.) margin_min, loss_min = self.margin_and_loss(x_best, y) n_queries = torch.ones(x.shape[0]).to(self.device) s_init = int(math.sqrt(self.p_init * n_features / c)) if (margin_min < 0.0).all(): return n_queries, x_best for i_iter in range(self.n_queries): idx_to_fool = (margin_min > 0.0).nonzero().squeeze() x_curr = self.check_shape(x[idx_to_fool]) x_best_curr = self.check_shape(x_best[idx_to_fool]) y_curr = y[idx_to_fool] if len(y_curr.shape) == 0: y_curr = y_curr.unsqueeze(0) margin_min_curr = margin_min[idx_to_fool] loss_min_curr = loss_min[idx_to_fool] p = self.p_selection(i_iter) s = max(int(round(math.sqrt(p * n_features / c))), 1) s = min(s, min(h, w)) vh = self.random_int(0, h - s) vw = self.random_int(0, w - s) new_deltas = torch.zeros([c, h, w]).to(self.device) new_deltas[:, vh:vh + s, vw:vw + s ] = 2. * self.eps * self.random_choice([c, 1, 1]) x_new = x_best_curr + new_deltas x_new = torch.min(torch.max(x_new, x_curr - self.eps), x_curr + self.eps) x_new = torch.clamp(x_new, 0., 1.) x_new = self.check_shape(x_new) margin, loss = self.margin_and_loss(x_new, y_curr) # update loss if new loss is better idx_improved = (loss < loss_min_curr).float() loss_min[idx_to_fool] = idx_improved * loss + ( 1. - idx_improved) * loss_min_curr # update margin and x_best if new loss is better # or misclassification idx_miscl = (margin <= 0.).float() idx_improved = torch.max(idx_improved, idx_miscl) margin_min[idx_to_fool] = idx_improved * margin + ( 1. - idx_improved) * margin_min_curr idx_improved = idx_improved.reshape([-1, *[1]*len(x.shape[:-1])]) x_best[idx_to_fool] = idx_improved * x_new + ( 1. - idx_improved) * x_best_curr n_queries[idx_to_fool] += 1. ind_succ = (margin_min <= 0.).nonzero().squeeze() if self.verbose and ind_succ.numel() != 0: print('{}'.format(i_iter + 1), '- success rate={}/{} ({:.2%})'.format( ind_succ.numel(), n_ex_total, float(ind_succ.numel()) / n_ex_total), '- avg # queries={:.1f}'.format( n_queries[ind_succ].mean().item()), '- med # queries={:.1f}'.format( n_queries[ind_succ].median().item()), '- loss={:.3f}'.format(loss_min.mean())) if ind_succ.numel() == n_ex_total: break elif self.norm == 'L2': delta_init = torch.zeros_like(x) s = h // 5 sp_init = (h - s * 5) // 2 vh = sp_init + 0 for _ in range(h // s): vw = sp_init + 0 for _ in range(w // s): delta_init[:, :, vh:vh + s, vw:vw + s] += self.eta( s).view(1, 1, s, s) * self.random_choice( [x.shape[0], c, 1, 1]) vw += s vh += s x_best = torch.clamp(x + self.normalize(delta_init ) * self.eps, 0., 1.) margin_min, loss_min = self.margin_and_loss(x_best, y) n_queries = torch.ones(x.shape[0]).to(self.device) s_init = int(math.sqrt(self.p_init * n_features / c)) if (margin_min < 0.0).all(): return n_queries, x_best for i_iter in range(self.n_queries): idx_to_fool = (margin_min > 0.0).nonzero().squeeze() x_curr = self.check_shape(x[idx_to_fool]) x_best_curr = self.check_shape(x_best[idx_to_fool]) y_curr = y[idx_to_fool] if len(y_curr.shape) == 0: y_curr = y_curr.unsqueeze(0) margin_min_curr = margin_min[idx_to_fool] loss_min_curr = loss_min[idx_to_fool] delta_curr = x_best_curr - x_curr p = self.p_selection(i_iter) s = max(int(round(math.sqrt(p * n_features / c))), 3) if s % 2 == 0: s += 1 s = min(s, min(h, w)) vh = self.random_int(0, h - s) vw = self.random_int(0, w - s) new_deltas_mask = torch.zeros_like(x_curr) new_deltas_mask[:, :, vh:vh + s, vw:vw + s] = 1.0 norms_window_1 = (delta_curr[:, :, vh:vh + s, vw:vw + s ] ** 2).sum(dim=(-2, -1), keepdim=True).sqrt() vh2 = self.random_int(0, h - s) vw2 = self.random_int(0, w - s) new_deltas_mask_2 = torch.zeros_like(x_curr) new_deltas_mask_2[:, :, vh2:vh2 + s, vw2:vw2 + s] = 1. norms_image = self.lp_norm(x_best_curr - x_curr) mask_image = torch.max(new_deltas_mask, new_deltas_mask_2) norms_windows = ((delta_curr * mask_image) ** 2).sum(dim=( -2, -1), keepdim=True).sqrt() new_deltas = torch.ones([x_curr.shape[0], c, s, s] ).to(self.device) new_deltas *= (self.eta(s).view(1, 1, s, s) * self.random_choice([x_curr.shape[0], c, 1, 1])) old_deltas = delta_curr[:, :, vh:vh + s, vw:vw + s] / ( 1e-12 + norms_window_1) new_deltas += old_deltas new_deltas = new_deltas / (1e-12 + (new_deltas ** 2).sum( dim=(-2, -1), keepdim=True).sqrt()) * (torch.max( (self.eps * torch.ones_like(new_deltas)) ** 2 - norms_image ** 2, torch.zeros_like(new_deltas)) / c + norms_windows ** 2).sqrt() delta_curr[:, :, vh2:vh2 + s, vw2:vw2 + s] = 0. delta_curr[:, :, vh:vh + s, vw:vw + s] = new_deltas + 0 x_new = torch.clamp(x_curr + self.normalize(delta_curr ) * self.eps, 0. ,1.) x_new = self.check_shape(x_new) norms_image = self.lp_norm(x_new - x_curr) margin, loss = self.margin_and_loss(x_new, y_curr) # update loss if new loss is better idx_improved = (loss < loss_min_curr).float() loss_min[idx_to_fool] = idx_improved * loss + ( 1. - idx_improved) * loss_min_curr # update margin and x_best if new loss is better # or misclassification idx_miscl = (margin <= 0.).float() idx_improved = torch.max(idx_improved, idx_miscl) margin_min[idx_to_fool] = idx_improved * margin + ( 1. - idx_improved) * margin_min_curr idx_improved = idx_improved.reshape([-1, *[1]*len(x.shape[:-1])]) x_best[idx_to_fool] = idx_improved * x_new + ( 1. - idx_improved) * x_best_curr n_queries[idx_to_fool] += 1. ind_succ = (margin_min <= 0.).nonzero().squeeze() if self.verbose and ind_succ.numel() != 0: print('{}'.format(i_iter + 1), '- success rate={}/{} ({:.2%})'.format( ind_succ.numel(), n_ex_total, float( ind_succ.numel()) / n_ex_total), '- avg # queries={:.1f}'.format( n_queries[ind_succ].mean().item()), '- med # queries={:.1f}'.format( n_queries[ind_succ].median().item()), '- loss={:.3f}'.format(loss_min.mean())) assert (x_new != x_new).sum() == 0 assert (x_best != x_best).sum() == 0 if ind_succ.numel() == n_ex_total: break elif self.norm == 'L1': delta_init = torch.zeros_like(x) s = h // 5 sp_init = (h - s * 5) // 2 vh = sp_init + 0 for _ in range(h // s): vw = sp_init + 0 for _ in range(w // s): delta_init[:, :, vh:vh + s, vw:vw + s] += self.eta( s).view(1, 1, s, s) * self.random_choice( [x.shape[0], c, 1, 1]) vw += s vh += s #x_best = torch.clamp(x + self.normalize(delta_init # ) * self.eps, 0., 1.) r_best = L1_projection(x, delta_init, self.eps * (1. - 1e-6)) x_best = x + delta_init + r_best margin_min, loss_min = self.margin_and_loss(x_best, y) n_queries = torch.ones(x.shape[0]).to(self.device) s_init = int(math.sqrt(self.p_init * n_features / c)) if (margin_min < 0.0).all(): return n_queries, x_best for i_iter in range(self.n_queries): idx_to_fool = (margin_min > 0.0).nonzero().squeeze() x_curr = self.check_shape(x[idx_to_fool]) x_best_curr = self.check_shape(x_best[idx_to_fool]) y_curr = y[idx_to_fool] if len(y_curr.shape) == 0: y_curr = y_curr.unsqueeze(0) margin_min_curr = margin_min[idx_to_fool] loss_min_curr = loss_min[idx_to_fool] delta_curr = x_best_curr - x_curr p = self.p_selection(i_iter) s = max(int(round(math.sqrt(p * n_features / c))), 3) if s % 2 == 0: s += 1 #pass s = min(s, min(h, w)) vh = self.random_int(0, h - s) vw = self.random_int(0, w - s) new_deltas_mask = torch.zeros_like(x_curr) new_deltas_mask[:, :, vh:vh + s, vw:vw + s] = 1.0 norms_window_1 = delta_curr[:, :, vh:vh + s, vw:vw + s ].abs().sum(dim=(-2, -1), keepdim=True) vh2 = self.random_int(0, h - s) vw2 = self.random_int(0, w - s) new_deltas_mask_2 = torch.zeros_like(x_curr) new_deltas_mask_2[:, :, vh2:vh2 + s, vw2:vw2 + s] = 1. norms_image = self.lp_norm(x_best_curr - x_curr) mask_image = torch.max(new_deltas_mask, new_deltas_mask_2) norms_windows = (delta_curr * mask_image).abs().sum(dim=( -2, -1), keepdim=True) new_deltas = torch.ones([x_curr.shape[0], c, s, s] ).to(self.device) new_deltas *= (self.eta(s).view(1, 1, s, s) * self.random_choice([x_curr.shape[0], c, 1, 1])) old_deltas = delta_curr[:, :, vh:vh + s, vw:vw + s] / ( 1e-12 + norms_window_1) new_deltas += old_deltas new_deltas = new_deltas / (1e-12 + new_deltas.abs().sum( dim=(-2, -1), keepdim=True)) * (torch.max( self.eps * torch.ones_like(norms_image) - norms_image, torch.zeros_like(norms_image)) / c + norms_windows) * c delta_curr[:, :, vh2:vh2 + s, vw2:vw2 + s] = 0. delta_curr[:, :, vh:vh + s, vw:vw + s] = new_deltas + 0 # #norms_image_old = self.lp_norm(delta_curr) r_curr = L1_projection(x_curr, delta_curr, self.eps * (1. - 1e-6)) x_new = x_curr + delta_curr + r_curr x_new = self.check_shape(x_new) norms_image = self.lp_norm(x_new - x_curr) margin, loss = self.margin_and_loss(x_new, y_curr) # update loss if new loss is better idx_improved = (loss < loss_min_curr).float() loss_min[idx_to_fool] = idx_improved * loss + ( 1. - idx_improved) * loss_min_curr # update margin and x_best if new loss is better # or misclassification idx_miscl = (margin <= 0.).float() idx_improved = torch.max(idx_improved, idx_miscl) margin_min[idx_to_fool] = idx_improved * margin + ( 1. - idx_improved) * margin_min_curr idx_improved = idx_improved.reshape([-1, *[1]*len(x.shape[:-1])]) x_best[idx_to_fool] = idx_improved * x_new + ( 1. - idx_improved) * x_best_curr n_queries[idx_to_fool] += 1. ind_succ = (margin_min <= 0.).nonzero().squeeze() if self.verbose and ind_succ.numel() != 0: print('{}'.format(i_iter + 1), '- success rate={}/{} ({:.2%})'.format( ind_succ.numel(), n_ex_total, float( ind_succ.numel()) / n_ex_total), '- avg # queries={:.1f}'.format( n_queries[ind_succ].mean().item()), '- med # queries={:.1f}'.format( n_queries[ind_succ].median().item()), '- loss={:.3f}'.format(loss_min.mean()), '- max pert={:.3f}'.format(norms_image.max().item()), #'- old pert={:.3f}'.format(norms_image_old.max().item()) ) assert (x_new != x_new).sum() == 0 assert (x_best != x_best).sum() == 0 if ind_succ.numel() == n_ex_total: break return n_queries, x_best def perturb(self, x, y=None): """ :param x: clean images :param y: untargeted attack -> clean labels, if None we use the predicted labels targeted attack -> target labels, if None random classes, different from the predicted ones, are sampled """ self.init_hyperparam(x) adv = x.clone() #adv_all = x.clone() if y is None: if not self.targeted: with torch.no_grad(): output = self.predict(x) y_pred = output.max(1)[1] y = y_pred.detach().clone().long().to(self.device) else: with torch.no_grad(): output = self.predict(x) n_classes = output.shape[-1] y_pred = output.max(1)[1] y = self.random_target_classes(y_pred, n_classes) else: y = y.detach().clone().long().to(self.device) if not self.targeted: acc = self.predict(x).max(1)[1] == y else: acc = self.predict(x).max(1)[1] != y startt = time.time() torch.random.manual_seed(self.seed) torch.cuda.random.manual_seed(self.seed) for counter in range(self.n_restarts): ind_to_fool = acc.nonzero().squeeze() if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0) if ind_to_fool.numel() != 0: x_to_fool = x[ind_to_fool].clone() y_to_fool = y[ind_to_fool].clone() _, adv_curr = self.attack_single_run(x_to_fool, y_to_fool) output_curr = self.predict(adv_curr) if not self.targeted: acc_curr = output_curr.max(1)[1] == y_to_fool else: acc_curr = output_curr.max(1)[1] != y_to_fool ind_curr = (acc_curr == 0).nonzero().squeeze() acc[ind_to_fool[ind_curr]] = 0 adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone() #adv_all[ind_to_fool] = adv_curr.clone() if self.verbose: print('restart {} - robust accuracy: {:.2%}'.format( counter, acc.float().mean()), '- cum. time: {:.1f} s'.format( time.time() - startt)) if not self.return_all: return adv else: print('returning final points') return adv_all
26,018
41.033926
99
py
auto-attack
auto-attack-master/autoattack/autopgd_base.py
# Copyright (c) 2020-present, Francesco Croce # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree # import time import torch import torch.nn as nn import torch.nn.functional as F import math import random from autoattack.other_utils import L0_norm, L1_norm, L2_norm from autoattack.checks import check_zero_gradients def L1_projection(x2, y2, eps1): ''' x2: center of the L1 ball (bs x input_dim) y2: current perturbation (x2 + y2 is the point to be projected) eps1: radius of the L1 ball output: delta s.th. ||y2 + delta||_1 <= eps1 and 0 <= x2 + y2 + delta <= 1 ''' x = x2.clone().float().view(x2.shape[0], -1) y = y2.clone().float().view(y2.shape[0], -1) sigma = y.clone().sign() u = torch.min(1 - x - y, x + y) #u = torch.min(u, epsinf - torch.clone(y).abs()) u = torch.min(torch.zeros_like(y), u) l = -torch.clone(y).abs() d = u.clone() bs, indbs = torch.sort(-torch.cat((u, l), 1), dim=1) bs2 = torch.cat((bs[:, 1:], torch.zeros(bs.shape[0], 1).to(bs.device)), 1) inu = 2*(indbs < u.shape[1]).float() - 1 size1 = inu.cumsum(dim=1) s1 = -u.sum(dim=1) c = eps1 - y.clone().abs().sum(dim=1) c5 = s1 + c < 0 c2 = c5.nonzero().squeeze(1) s = s1.unsqueeze(-1) + torch.cumsum((bs2 - bs) * size1, dim=1) if c2.nelement != 0: lb = torch.zeros_like(c2).float() ub = torch.ones_like(lb) *(bs.shape[1] - 1) #print(c2.shape, lb.shape) nitermax = torch.ceil(torch.log2(torch.tensor(bs.shape[1]).float())) counter2 = torch.zeros_like(lb).long() counter = 0 while counter < nitermax: counter4 = torch.floor((lb + ub) / 2.) counter2 = counter4.type(torch.LongTensor) c8 = s[c2, counter2] + c[c2] < 0 ind3 = c8.nonzero().squeeze(1) ind32 = (~c8).nonzero().squeeze(1) #print(ind3.shape) if ind3.nelement != 0: lb[ind3] = counter4[ind3] if ind32.nelement != 0: ub[ind32] = counter4[ind32] #print(lb, ub) counter += 1 lb2 = lb.long() alpha = (-s[c2, lb2] -c[c2]) / size1[c2, lb2 + 1] + bs2[c2, lb2] d[c2] = -torch.min(torch.max(-u[c2], alpha.unsqueeze(-1)), -l[c2]) return (sigma * d).view(x2.shape) class APGDAttack(): """ AutoPGD https://arxiv.org/abs/2003.01690 :param predict: forward pass function :param norm: Lp-norm of the attack ('Linf', 'L2', 'L0' supported) :param n_restarts: number of random restarts :param n_iter: number of iterations :param eps: bound on the norm of perturbations :param seed: random seed for the starting point :param loss: loss to optimize ('ce', 'dlr' supported) :param eot_iter: iterations for Expectation over Trasformation :param rho: parameter for decreasing the step size """ def __init__( self, predict, n_iter=100, norm='Linf', n_restarts=1, eps=None, seed=0, loss='ce', eot_iter=1, rho=.75, topk=None, verbose=False, device=None, use_largereps=False, is_tf_model=False, logger=None): """ AutoPGD implementation in PyTorch """ self.model = predict self.n_iter = n_iter self.eps = eps self.norm = norm self.n_restarts = n_restarts self.seed = seed self.loss = loss self.eot_iter = eot_iter self.thr_decr = rho self.topk = topk self.verbose = verbose self.device = device self.use_rs = True #self.init_point = None self.use_largereps = use_largereps #self.larger_epss = None #self.iters = None self.n_iter_orig = n_iter + 0 self.eps_orig = eps + 0. self.is_tf_model = is_tf_model self.y_target = None self.logger = logger assert self.norm in ['Linf', 'L2', 'L1'] assert not self.eps is None ### set parameters for checkpoints self.n_iter_2 = max(int(0.22 * self.n_iter), 1) self.n_iter_min = max(int(0.06 * self.n_iter), 1) self.size_decr = max(int(0.03 * self.n_iter), 1) def init_hyperparam(self, x): if self.device is None: self.device = x.device self.orig_dim = list(x.shape[1:]) self.ndims = len(self.orig_dim) if self.seed is None: self.seed = time.time() def check_oscillation(self, x, j, k, y5, k3=0.75): t = torch.zeros(x.shape[1]).to(self.device) for counter5 in range(k): t += (x[j - counter5] > x[j - counter5 - 1]).float() return (t <= k * k3 * torch.ones_like(t)).float() def check_shape(self, x): return x if len(x.shape) > 0 else x.unsqueeze(0) def normalize(self, x): if self.norm == 'Linf': t = x.abs().view(x.shape[0], -1).max(1)[0] elif self.norm == 'L2': t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt() elif self.norm == 'L1': try: t = x.abs().view(x.shape[0], -1).sum(dim=-1) except: t = x.abs().reshape([x.shape[0], -1]).sum(dim=-1) return x / (t.view(-1, *([1] * self.ndims)) + 1e-12) def dlr_loss(self, x, y): x_sorted, ind_sorted = x.sort(dim=1) ind = (ind_sorted[:, -1] == y).float() u = torch.arange(x.shape[0]) return -(x[u, y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * ( 1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12) # def attack_single_run(self, x, y, x_init=None): if len(x.shape) < self.ndims: x = x.unsqueeze(0) y = y.unsqueeze(0) if self.norm == 'Linf': t = 2 * torch.rand(x.shape).to(self.device).detach() - 1 x_adv = x + self.eps * torch.ones_like(x ).detach() * self.normalize(t) elif self.norm == 'L2': t = torch.randn(x.shape).to(self.device).detach() x_adv = x + self.eps * torch.ones_like(x ).detach() * self.normalize(t) elif self.norm == 'L1': t = torch.randn(x.shape).to(self.device).detach() delta = L1_projection(x, t, self.eps) x_adv = x + t + delta if not x_init is None: x_adv = x_init.clone() if self.norm == 'L1' and self.verbose: print('[custom init] L1 perturbation {:.5f}'.format( (x_adv - x).abs().view(x.shape[0], -1).sum(1).max())) x_adv = x_adv.clamp(0., 1.) x_best = x_adv.clone() x_best_adv = x_adv.clone() loss_steps = torch.zeros([self.n_iter, x.shape[0]] ).to(self.device) loss_best_steps = torch.zeros([self.n_iter + 1, x.shape[0]] ).to(self.device) acc_steps = torch.zeros_like(loss_best_steps) if not self.is_tf_model: if self.loss == 'ce': criterion_indiv = nn.CrossEntropyLoss(reduction='none') elif self.loss == 'ce-targeted-cfts': criterion_indiv = lambda x, y: -1. * F.cross_entropy(x, y, reduction='none') elif self.loss == 'dlr': criterion_indiv = self.dlr_loss elif self.loss == 'dlr-targeted': criterion_indiv = self.dlr_loss_targeted elif self.loss == 'ce-targeted': criterion_indiv = self.ce_loss_targeted else: raise ValueError('unknowkn loss') else: if self.loss == 'ce': criterion_indiv = self.model.get_logits_loss_grad_xent elif self.loss == 'dlr': criterion_indiv = self.model.get_logits_loss_grad_dlr elif self.loss == 'dlr-targeted': criterion_indiv = self.model.get_logits_loss_grad_target else: raise ValueError('unknowkn loss') x_adv.requires_grad_() grad = torch.zeros_like(x) for _ in range(self.eot_iter): if not self.is_tf_model: with torch.enable_grad(): logits = self.model(x_adv) loss_indiv = criterion_indiv(logits, y) loss = loss_indiv.sum() grad += torch.autograd.grad(loss, [x_adv])[0].detach() else: if self.y_target is None: logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y) else: logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y, self.y_target) grad += grad_curr grad /= float(self.eot_iter) grad_best = grad.clone() if self.loss in ['dlr', 'dlr-targeted']: # check if there are zero gradients check_zero_gradients(grad, logger=self.logger) acc = logits.detach().max(1)[1] == y acc_steps[0] = acc + 0 loss_best = loss_indiv.detach().clone() alpha = 2. if self.norm in ['Linf', 'L2'] else 1. if self.norm in ['L1'] else 2e-2 step_size = alpha * self.eps * torch.ones([x.shape[0], *( [1] * self.ndims)]).to(self.device).detach() x_adv_old = x_adv.clone() counter = 0 k = self.n_iter_2 + 0 n_fts = math.prod(self.orig_dim) if self.norm == 'L1': k = max(int(.04 * self.n_iter), 1) if x_init is None: topk = .2 * torch.ones([x.shape[0]], device=self.device) sp_old = n_fts * torch.ones_like(topk) else: topk = L0_norm(x_adv - x) / n_fts / 1.5 sp_old = L0_norm(x_adv - x) #print(topk[0], sp_old[0]) adasp_redstep = 1.5 adasp_minstep = 10. #print(step_size[0].item()) counter3 = 0 loss_best_last_check = loss_best.clone() reduced_last_check = torch.ones_like(loss_best) n_reduced = 0 u = torch.arange(x.shape[0], device=self.device) for i in range(self.n_iter): ### gradient step with torch.no_grad(): x_adv = x_adv.detach() grad2 = x_adv - x_adv_old x_adv_old = x_adv.clone() a = 0.75 if i > 0 else 1.0 if self.norm == 'Linf': x_adv_1 = x_adv + step_size * torch.sign(grad) x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - self.eps), x + self.eps), 0.0, 1.0) x_adv_1 = torch.clamp(torch.min(torch.max( x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a), x - self.eps), x + self.eps), 0.0, 1.0) elif self.norm == 'L2': x_adv_1 = x_adv + step_size * self.normalize(grad) x_adv_1 = torch.clamp(x + self.normalize(x_adv_1 - x ) * torch.min(self.eps * torch.ones_like(x).detach(), L2_norm(x_adv_1 - x, keepdim=True)), 0.0, 1.0) x_adv_1 = x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a) x_adv_1 = torch.clamp(x + self.normalize(x_adv_1 - x ) * torch.min(self.eps * torch.ones_like(x).detach(), L2_norm(x_adv_1 - x, keepdim=True)), 0.0, 1.0) elif self.norm == 'L1': grad_topk = grad.abs().view(x.shape[0], -1).sort(-1)[0] topk_curr = torch.clamp((1. - topk) * n_fts, min=0, max=n_fts - 1).long() grad_topk = grad_topk[u, topk_curr].view(-1, *[1]*(len(x.shape) - 1)) sparsegrad = grad * (grad.abs() >= grad_topk).float() x_adv_1 = x_adv + step_size * sparsegrad.sign() / ( L1_norm(sparsegrad.sign(), keepdim=True) + 1e-10) delta_u = x_adv_1 - x delta_p = L1_projection(x, delta_u, self.eps) x_adv_1 = x + delta_u + delta_p x_adv = x_adv_1 + 0. ### get gradient x_adv.requires_grad_() grad = torch.zeros_like(x) for _ in range(self.eot_iter): if not self.is_tf_model: with torch.enable_grad(): logits = self.model(x_adv) loss_indiv = criterion_indiv(logits, y) loss = loss_indiv.sum() grad += torch.autograd.grad(loss, [x_adv])[0].detach() else: if self.y_target is None: logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y) else: logits, loss_indiv, grad_curr = criterion_indiv(x_adv, y, self.y_target) grad += grad_curr grad /= float(self.eot_iter) pred = logits.detach().max(1)[1] == y acc = torch.min(acc, pred) acc_steps[i + 1] = acc + 0 ind_pred = (pred == 0).nonzero().squeeze() x_best_adv[ind_pred] = x_adv[ind_pred] + 0. if self.verbose: str_stats = ' - step size: {:.5f} - topk: {:.2f}'.format( step_size.mean(), topk.mean() * n_fts) if self.norm in ['L1'] else '' print('[m] iteration: {} - best loss: {:.6f} - robust accuracy: {:.2%}{}'.format( i, loss_best.sum(), acc.float().mean(), str_stats)) #print('pert {}'.format((x - x_best_adv).abs().view(x.shape[0], -1).sum(-1).max())) ### check step size with torch.no_grad(): y1 = loss_indiv.detach().clone() loss_steps[i] = y1 + 0 ind = (y1 > loss_best).nonzero().squeeze() x_best[ind] = x_adv[ind].clone() grad_best[ind] = grad[ind].clone() loss_best[ind] = y1[ind] + 0 loss_best_steps[i + 1] = loss_best + 0 counter3 += 1 if counter3 == k: if self.norm in ['Linf', 'L2']: fl_oscillation = self.check_oscillation(loss_steps, i, k, loss_best, k3=self.thr_decr) fl_reduce_no_impr = (1. - reduced_last_check) * ( loss_best_last_check >= loss_best).float() fl_oscillation = torch.max(fl_oscillation, fl_reduce_no_impr) reduced_last_check = fl_oscillation.clone() loss_best_last_check = loss_best.clone() if fl_oscillation.sum() > 0: ind_fl_osc = (fl_oscillation > 0).nonzero().squeeze() step_size[ind_fl_osc] /= 2.0 n_reduced = fl_oscillation.sum() x_adv[ind_fl_osc] = x_best[ind_fl_osc].clone() grad[ind_fl_osc] = grad_best[ind_fl_osc].clone() k = max(k - self.size_decr, self.n_iter_min) elif self.norm == 'L1': sp_curr = L0_norm(x_best - x) fl_redtopk = (sp_curr / sp_old) < .95 topk = sp_curr / n_fts / 1.5 step_size[fl_redtopk] = alpha * self.eps step_size[~fl_redtopk] /= adasp_redstep step_size.clamp_(alpha * self.eps / adasp_minstep, alpha * self.eps) sp_old = sp_curr.clone() x_adv[fl_redtopk] = x_best[fl_redtopk].clone() grad[fl_redtopk] = grad_best[fl_redtopk].clone() counter3 = 0 #k = max(k - self.size_decr, self.n_iter_min) # return (x_best, acc, loss_best, x_best_adv) def perturb(self, x, y=None, best_loss=False, x_init=None): """ :param x: clean images :param y: clean labels, if None we use the predicted labels :param best_loss: if True the points attaining highest loss are returned, otherwise adversarial examples """ assert self.loss in ['ce', 'dlr'] #'ce-targeted-cfts' if not y is None and len(y.shape) == 0: x.unsqueeze_(0) y.unsqueeze_(0) self.init_hyperparam(x) x = x.detach().clone().float().to(self.device) if not self.is_tf_model: y_pred = self.model(x).max(1)[1] else: y_pred = self.model.predict(x).max(1)[1] if y is None: #y_pred = self.predict(x).max(1)[1] y = y_pred.detach().clone().long().to(self.device) else: y = y.detach().clone().long().to(self.device) adv = x.clone() if self.loss != 'ce-targeted': acc = y_pred == y else: acc = y_pred != y loss = -1e10 * torch.ones_like(acc).float() if self.verbose: print('-------------------------- ', 'running {}-attack with epsilon {:.5f}'.format( self.norm, self.eps), '--------------------------') print('initial accuracy: {:.2%}'.format(acc.float().mean())) if self.use_largereps: epss = [3. * self.eps_orig, 2. * self.eps_orig, 1. * self.eps_orig] iters = [.3 * self.n_iter_orig, .3 * self.n_iter_orig, .4 * self.n_iter_orig] iters = [math.ceil(c) for c in iters] iters[-1] = self.n_iter_orig - sum(iters[:-1]) # make sure to use the given iterations if self.verbose: print('using schedule [{}x{}]'.format('+'.join([str(c ) for c in epss]), '+'.join([str(c) for c in iters]))) startt = time.time() if not best_loss: torch.random.manual_seed(self.seed) torch.cuda.random.manual_seed(self.seed) for counter in range(self.n_restarts): ind_to_fool = acc.nonzero().squeeze() if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0) if ind_to_fool.numel() != 0: x_to_fool = x[ind_to_fool].clone() y_to_fool = y[ind_to_fool].clone() if not self.use_largereps: res_curr = self.attack_single_run(x_to_fool, y_to_fool) else: res_curr = self.decr_eps_pgd(x_to_fool, y_to_fool, epss, iters) best_curr, acc_curr, loss_curr, adv_curr = res_curr ind_curr = (acc_curr == 0).nonzero().squeeze() acc[ind_to_fool[ind_curr]] = 0 adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone() if self.verbose: print('restart {} - robust accuracy: {:.2%}'.format( counter, acc.float().mean()), '- cum. time: {:.1f} s'.format( time.time() - startt)) return adv else: adv_best = x.detach().clone() loss_best = torch.ones([x.shape[0]]).to( self.device) * (-float('inf')) for counter in range(self.n_restarts): best_curr, _, loss_curr, _ = self.attack_single_run(x, y) ind_curr = (loss_curr > loss_best).nonzero().squeeze() adv_best[ind_curr] = best_curr[ind_curr] + 0. loss_best[ind_curr] = loss_curr[ind_curr] + 0. if self.verbose: print('restart {} - loss: {:.5f}'.format( counter, loss_best.sum())) return adv_best def decr_eps_pgd(self, x, y, epss, iters, use_rs=True): assert len(epss) == len(iters) assert self.norm in ['L1'] self.use_rs = False if not use_rs: x_init = None else: x_init = x + torch.randn_like(x) x_init += L1_projection(x, x_init - x, 1. * float(epss[0])) eps_target = float(epss[-1]) if self.verbose: print('total iter: {}'.format(sum(iters))) for eps, niter in zip(epss, iters): if self.verbose: print('using eps: {:.2f}'.format(eps)) self.n_iter = niter + 0 self.eps = eps + 0. # if not x_init is None: x_init += L1_projection(x, x_init - x, 1. * eps) x_init, acc, loss, x_adv = self.attack_single_run(x, y, x_init=x_init) return (x_init, acc, loss, x_adv) class APGDAttack_targeted(APGDAttack): def __init__( self, predict, n_iter=100, norm='Linf', n_restarts=1, eps=None, seed=0, eot_iter=1, rho=.75, topk=None, n_target_classes=9, verbose=False, device=None, use_largereps=False, is_tf_model=False, logger=None): """ AutoPGD on the targeted DLR loss """ super(APGDAttack_targeted, self).__init__(predict, n_iter=n_iter, norm=norm, n_restarts=n_restarts, eps=eps, seed=seed, loss='dlr-targeted', eot_iter=eot_iter, rho=rho, topk=topk, verbose=verbose, device=device, use_largereps=use_largereps, is_tf_model=is_tf_model, logger=logger) self.y_target = None self.n_target_classes = n_target_classes def dlr_loss_targeted(self, x, y): x_sorted, ind_sorted = x.sort(dim=1) u = torch.arange(x.shape[0]) return -(x[u, y] - x[u, self.y_target]) / (x_sorted[:, -1] - .5 * ( x_sorted[:, -3] + x_sorted[:, -4]) + 1e-12) def ce_loss_targeted(self, x, y): return -1. * F.cross_entropy(x, self.y_target, reduction='none') def perturb(self, x, y=None, x_init=None): """ :param x: clean images :param y: clean labels, if None we use the predicted labels """ assert self.loss in ['dlr-targeted'] #'ce-targeted' if not y is None and len(y.shape) == 0: x.unsqueeze_(0) y.unsqueeze_(0) self.init_hyperparam(x) x = x.detach().clone().float().to(self.device) if not self.is_tf_model: y_pred = self.model(x).max(1)[1] else: y_pred = self.model.predict(x).max(1)[1] if y is None: #y_pred = self._get_predicted_label(x) y = y_pred.detach().clone().long().to(self.device) else: y = y.detach().clone().long().to(self.device) adv = x.clone() acc = y_pred == y if self.verbose: print('-------------------------- ', 'running {}-attack with epsilon {:.5f}'.format( self.norm, self.eps), '--------------------------') print('initial accuracy: {:.2%}'.format(acc.float().mean())) startt = time.time() torch.random.manual_seed(self.seed) torch.cuda.random.manual_seed(self.seed) # if self.use_largereps: epss = [3. * self.eps_orig, 2. * self.eps_orig, 1. * self.eps_orig] iters = [.3 * self.n_iter_orig, .3 * self.n_iter_orig, .4 * self.n_iter_orig] iters = [math.ceil(c) for c in iters] iters[-1] = self.n_iter_orig - sum(iters[:-1]) if self.verbose: print('using schedule [{}x{}]'.format('+'.join([str(c ) for c in epss]), '+'.join([str(c) for c in iters]))) for target_class in range(2, self.n_target_classes + 2): for counter in range(self.n_restarts): ind_to_fool = acc.nonzero().squeeze() if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0) if ind_to_fool.numel() != 0: x_to_fool = x[ind_to_fool].clone() y_to_fool = y[ind_to_fool].clone() if not self.is_tf_model: output = self.model(x_to_fool) else: output = self.model.predict(x_to_fool) self.y_target = output.sort(dim=1)[1][:, -target_class] if not self.use_largereps: res_curr = self.attack_single_run(x_to_fool, y_to_fool) else: res_curr = self.decr_eps_pgd(x_to_fool, y_to_fool, epss, iters) best_curr, acc_curr, loss_curr, adv_curr = res_curr ind_curr = (acc_curr == 0).nonzero().squeeze() acc[ind_to_fool[ind_curr]] = 0 adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone() if self.verbose: print('target class {}'.format(target_class), '- restart {} - robust accuracy: {:.2%}'.format( counter, acc.float().mean()), '- cum. time: {:.1f} s'.format( time.time() - startt)) return adv
26,198
37.079942
99
py
auto-attack
auto-attack-master/autoattack/utils_tf2.py
import tensorflow as tf import numpy as np import torch class ModelAdapter(): def __init__(self, model, num_classes=10): """ Please note that model should be tf.keras model without activation function 'softmax' """ self.num_classes = num_classes self.tf_model = model self.data_format = self.__check_channel_ordering() def __tf_to_pt(self, tf_tensor): """ Private function Convert tf tensor to pt format Args: tf_tensor: (tf_tensor) TF tensor Retruns: pt_tensor: (pt_tensor) Pytorch tensor """ cpu_tensor = tf_tensor.numpy() pt_tensor = torch.from_numpy(cpu_tensor).cuda() return pt_tensor def set_data_format(self, data_format): """ Set data_format manually Args: data_format: A string, whose value should be either 'channels_last' or 'channels_first' """ if data_format != 'channels_last' or data_format != 'channels_first': raise ValueError("data_format should be either 'channels_last' or 'channels_first'") self.data_format = data_format def __check_channel_ordering(self): """ Private function Determinate TF model's channel ordering based on model's information. Default ordering is 'channels_last' in TF. However, 'channels_first' is used in Pytorch. Returns: data_format: A string, whose value should be either 'channels_last' or 'channels_first' """ data_format = None # Get the ordering of the dimensions in data from TF model for L in self.tf_model.layers: if isinstance(L, tf.keras.layers.Conv2D): print("[INFO] set data_format = '{:s}'".format(L.data_format)) data_format = L.data_format break # Guess the ordering of the dimensions in data by input dimensions which sould be 4-D tensor if data_format is None: print("[WARNING] Can not find Conv2D layer") input_shape = self.tf_model.input_shape # Assume that input is *colorful image* whose dimensions should be [batch_size, img_w, img_h, 3] if input_shape[3] == 3: print("[INFO] Because detecting input_shape[3] == 3, set data_format = 'channels_last'") data_format = 'channels_last' # Assume that input is *gray image* whose dimensions should be [batch_size, img_w, img_h, 1] elif input_shape[3] == 1: print("[INFO] Because detecting input_shape[3] == 1, set data_format = 'channels_last'") data_format = 'channels_last' # Assume that input is *colorful image* whose dimensions should be [batch_size, 3, img_w, img_h] elif input_shape[1] == 3: print("[INFO] Because detecting input_shape[1] == 3, set data_format = 'channels_first'") data_format = 'channels_first' # Assume that input is *gray image* whose dimensions should be [batch_size, 1, img_w, img_h] elif input_shape[1] == 1: print("[INFO] Because detecting input_shape[1] == 1, set data_format = 'channels_first'") data_format = 'channels_first' else: print("[ERROR] Unknow case") return data_format # Common function which may be called in tf.function # def __get_logits(self, x_input): """ Private function Get model's pre-softmax output in inference mode Args: x_input: (tf_tensor) Input data Returns: logits: (tf_tensor) Logits """ return self.tf_model(x_input, training=False) def __get_xent(self, logits, y_input): """ Private function Get cross entropy loss Args: logits: (tf_tensor) Logits. y_input: (tf_tensor) Label. Returns: xent: (tf_tensor) Cross entropy """ return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_input) def __get_dlr(self, logit, y_input): """ Private function Get DLR loss Args: logit: (tf_tensor) Logits y_input: (tf_tensor) Input label Returns: loss: (tf_tensor) DLR loss """ # logit logit_sort = tf.sort(logit, axis=1) # onthot_y y_onehot = tf.one_hot(y_input , self.num_classes, dtype=tf.float32) logit_y = tf.reduce_sum(y_onehot * logit, axis=1) # z_i logit_pred = tf.reduce_max(logit, axis=1) cond = (logit_pred == logit_y) z_i = tf.where(cond, logit_sort[:, -2], logit_sort[:, -1]) # loss z_y = logit_y z_p1 = logit_sort[:, -1] z_p3 = logit_sort[:, -3] loss = - (z_y - z_i) / (z_p1 - z_p3 + 1e-12) return loss def __get_dlr_target(self, logits, y_input, y_target): """ Private function Get targeted version of DLR loss Args: logit: (tf_tensor) Logits y_input: (tf_tensor) Input label y_target: (tf_tensor) Input targeted label Returns: loss: (tf_tensor) Targeted DLR loss """ x = logits x_sort = tf.sort(x, axis=1) y_onehot = tf.one_hot(y_input, self.num_classes) y_target_onehot = tf.one_hot(y_target, self.num_classes) loss = -(tf.reduce_sum(x * y_onehot, axis=1) - tf.reduce_sum(x * y_target_onehot, axis=1)) / (x_sort[:, -1] - .5 * x_sort[:, -3] - .5 * x_sort[:, -4] + 1e-12) return loss # function called by public API directly # @tf.function @tf.autograph.experimental.do_not_convert def __get_jacobian(self, x_input): """ Private function Get Jacoian Args: x_input: (tf_tensor) Input data Returns: jaconbian: (tf_tensor) Jacobian """ with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(x_input) logits = self.__get_logits(x_input) jacobian = g.batch_jacobian(logits, x_input) return logits, jacobian @tf.function @tf.autograph.experimental.do_not_convert def __get_grad_xent(self, x_input, y_input): """ Private function Get gradient of cross entropy Args: x_input: (tf_tensor) Input data y_input: (tf_tensor) Input label Returns: logits: (tf_tensor) Logits xent: (tf_tensor) Cross entropy grad_xent: (tf_tensor) Gradient of cross entropy """ with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(x_input) logits = self.__get_logits(x_input) xent = self.__get_xent(logits, y_input) grad_xent = g.gradient(xent, x_input) return logits, xent, grad_xent @tf.function @tf.autograph.experimental.do_not_convert def __get_grad_diff_logits_target(self, x, la, la_target): """ Private function Get difference of logits and corrospopnding gradient Args: x_input: (tf_tensor) Input data la: (tf_tensor) Input label la_target: (tf_tensor) Input targeted label Returns: difflogits: (tf_tensor) Difference of logits grad_diff: (tf_tensor) Gradient of difference of logits """ la_mask = tf.one_hot(la, self.num_classes) la_target_mask = tf.one_hot(la_target, self.num_classes) with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(x) logits = self.__get_logits(x) difflogits = tf.reduce_sum((la_target_mask - la_mask) * logits, axis=1) grad_diff = g.gradient(difflogits, x) return difflogits, grad_diff @tf.function @tf.autograph.experimental.do_not_convert def __get_grad_dlr(self, x_input, y_input): """ Private function Get gradient of DLR loss Args: x_input: (tf_tensor) Input data y_input: (tf_tensor) Input label Returns: logits: (tf_tensor) Logits val_dlr: (tf_tensor) DLR loss grad_dlr: (tf_tensor) Gradient of DLR loss """ with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(x_input) logits = self.__get_logits(x_input) val_dlr = self.__get_dlr(logits, y_input) grad_dlr = g.gradient(val_dlr, x_input) return logits, val_dlr, grad_dlr @tf.function @tf.autograph.experimental.do_not_convert def __get_grad_dlr_target(self, x_input, y_input, y_target): """ Private function Get gradient of targeted DLR loss Args: x_input: (tf_tensor) Input data y_input: (tf_tensor) Input label y_target: (tf_tensor) Input targeted label Returns: logits: (tf_tensor) Logits val_dlr: (tf_tensor) Targeted DLR loss grad_dlr: (tf_tensor) Gradient of targeted DLR loss """ with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(x_input) logits = self.__get_logits(x_input) dlr_target = self.__get_dlr_target(logits, y_input, y_target) grad_target = g.gradient(dlr_target, x_input) return logits, dlr_target, grad_target # Public API # def predict(self, x): """ Get model's pre-softmax output in inference mode Args: x_input: (pytorch_tensor) Input data Returns: y: (pytorch_tensor) Pre-softmax output """ # Convert pt_tensor to tf format x2 = tf.convert_to_tensor(x.cpu().numpy(), dtype=tf.float32) if self.data_format == 'channels_last': x2 = tf.transpose(x2, perm=[0,2,3,1]) # Get result y = self.__get_logits(x2) # Convert result to pt format y = self.__tf_to_pt(y) return y def grad_logits(self, x): """ Get logits and gradient of logits Args: x: (pytorch_tensor) Input data Returns: logits: (pytorch_tensor) Logits g2: (pytorch_tensor) Jacobian """ # Convert pt_tensor to tf format x2 = tf.convert_to_tensor(x.cpu().numpy(), dtype=tf.float32) if self.data_format == 'channels_last': x2 = tf.transpose(x2, perm=[0,2,3,1]) # Get result logits, g2 = self.__get_jacobian(x2) # Convert result to pt format if self.data_format == 'channels_last': g2 = tf.transpose(g2, perm=[0,1,4,2,3]) logits = self.__tf_to_pt(logits) g2 = self.__tf_to_pt(g2) return logits, g2 def get_logits_loss_grad_xent(self, x, y): """ Get gradient of cross entropy Args: x: (pytorch_tensor) Input data y: (pytorch_tensor) Input label Returns: logits_val: (pytorch_tensor) Logits loss_indiv_val: (pytorch_tensor) Cross entropy grad_val: (pytorch_tensor) Gradient of cross entropy """ # Convert pt_tensor to tf format x2 = tf.convert_to_tensor(x.cpu().numpy(), dtype=tf.float32) y2 = tf.convert_to_tensor(y.cpu().numpy(), dtype=tf.int32) if self.data_format == 'channels_last': x2 = tf.transpose(x2, perm=[0,2,3,1]) # Get result logits_val, loss_indiv_val, grad_val = self.__get_grad_xent(x2, y2) # Convert result to pt format if self.data_format == 'channels_last': grad_val = tf.transpose(grad_val, perm=[0,3,1,2]) logits_val = self.__tf_to_pt(logits_val) loss_indiv_val = self.__tf_to_pt(loss_indiv_val) grad_val = self.__tf_to_pt(grad_val) return logits_val, loss_indiv_val, grad_val def set_target_class(self, y, y_target): pass def get_grad_diff_logits_target(self, x, y, y_target): """ Get difference of logits and corrospopnding gradient Args: x: (pytorch_tensor) Input data y: (pytorch_tensor) Input label y_target: (pytorch_tensor) Input targeted label Returns: difflogits: (pytorch_tensor) Difference of logits g2: (pytorch_tensor) Gradient of difference of logits """ # Convert pt_tensor to tf format la = tf.convert_to_tensor(y.cpu().numpy(), dtype=tf.int32) la_target = tf.convert_to_tensor(y_target.cpu().numpy(), dtype=tf.int32) x2 = tf.convert_to_tensor(x.cpu().numpy(), dtype=tf.float32) if self.data_format == 'channels_last': x2 = tf.transpose(x2, perm=[0,2,3,1]) # Get result difflogits, g2 = self.__get_grad_diff_logits_target(x2, la, la_target) # Convert result to pt format if self.data_format == 'channels_last': g2 = tf.transpose(g2, perm=[0, 3, 1, 2]) difflogits = self.__tf_to_pt(difflogits) g2 = self.__tf_to_pt(g2) return difflogits, g2 def get_logits_loss_grad_dlr(self, x, y): """ Get gradient of DLR loss Args: x: (pytorch_tensor) Input data y: (pytorch_tensor) Input label Returns: logits_val: (pytorch_tensor) Logits loss_indiv_val: (pytorch_tensor) DLR loss grad_val: (pytorch_tensor) Gradient of DLR loss """ # Convert pt_tensor to tf format x2 = tf.convert_to_tensor(x.cpu().numpy(), dtype=tf.float32) y2 = tf.convert_to_tensor(y.cpu().numpy(), dtype=tf.int32) if self.data_format == 'channels_last': x2 = tf.transpose(x2, perm=[0,2,3,1]) # Get result logits_val, loss_indiv_val, grad_val = self.__get_grad_dlr(x2, y2) # Convert result to pt format if self.data_format == 'channels_last': grad_val = tf.transpose(grad_val, perm=[0,3,1,2]) logits_val = self.__tf_to_pt(logits_val) loss_indiv_val = self.__tf_to_pt(loss_indiv_val) grad_val = self.__tf_to_pt(grad_val) return logits_val, loss_indiv_val, grad_val def get_logits_loss_grad_target(self, x, y, y_target): """ Get gradient of targeted DLR loss Args: x: (pytorch_tensor) Input data y: (pytorch_tensor) Input label y_target: (pytorch_tensor) Input targeted label Returns: logits_val: (pytorch_tensor) Logits loss_indiv_val: (pytorch_tensor) Targeted DLR loss grad_val: (pytorch_tensor) Gradient of targeted DLR loss """ # Convert pt_tensor to tf format x2 = tf.convert_to_tensor(x.cpu().numpy(), dtype=tf.float32) y2 = tf.convert_to_tensor(y.cpu().numpy(), dtype=tf.int32) y_targ = tf.convert_to_tensor(y_target.cpu().numpy(), dtype=tf.int32) if self.data_format == 'channels_last': x2 = tf.transpose(x2, perm=[0,2,3,1]) # Get result logits_val, loss_indiv_val, grad_val = self.__get_grad_dlr_target(x2, y2, y_targ) # Convert result to pt format if self.data_format == 'channels_last': grad_val = tf.transpose(grad_val, perm=[0,3,1,2]) logits_val = self.__tf_to_pt(logits_val) loss_indiv_val = self.__tf_to_pt(loss_indiv_val) grad_val = self.__tf_to_pt(grad_val) return logits_val, loss_indiv_val, grad_val
15,775
30.552
166
py
auto-attack
auto-attack-master/autoattack/__init__.py
from .autoattack import AutoAttack
35
17
34
py
auto-attack
auto-attack-master/autoattack/fab_pt.py
# Copyright (c) 2019-present, Francesco Croce # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import time import torch from autoattack.other_utils import zero_gradients from autoattack.fab_base import FABAttack class FABAttack_PT(FABAttack): """ Fast Adaptive Boundary Attack (Linf, L2, L1) https://arxiv.org/abs/1907.02044 :param predict: forward pass function :param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported) :param n_restarts: number of random restarts :param n_iter: number of iterations :param eps: epsilon for the random restarts :param alpha_max: alpha_max :param eta: overshooting :param beta: backward step """ def __init__( self, predict, norm='Linf', n_restarts=1, n_iter=100, eps=None, alpha_max=0.1, eta=1.05, beta=0.9, loss_fn=None, verbose=False, seed=0, targeted=False, device=None, n_target_classes=9): """ FAB-attack implementation in pytorch """ self.predict = predict super().__init__(norm, n_restarts, n_iter, eps, alpha_max, eta, beta, loss_fn, verbose, seed, targeted, device, n_target_classes) def _predict_fn(self, x): return self.predict(x) def _get_predicted_label(self, x): with torch.no_grad(): outputs = self._predict_fn(x) _, y = torch.max(outputs, dim=1) return y def get_diff_logits_grads_batch(self, imgs, la): im = imgs.clone().requires_grad_() with torch.enable_grad(): y = self.predict(im) g2 = torch.zeros([y.shape[-1], *imgs.size()]).to(self.device) grad_mask = torch.zeros_like(y) for counter in range(y.shape[-1]): zero_gradients(im) grad_mask[:, counter] = 1.0 y.backward(grad_mask, retain_graph=True) grad_mask[:, counter] = 0.0 g2[counter] = im.grad.data g2 = torch.transpose(g2, 0, 1).detach() #y2 = self.predict(imgs).detach() y2 = y.detach() df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1) dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1) df[torch.arange(imgs.shape[0]), la] = 1e10 return df, dg def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target): u = torch.arange(imgs.shape[0]) im = imgs.clone().requires_grad_() with torch.enable_grad(): y = self.predict(im) diffy = -(y[u, la] - y[u, la_target]) sumdiffy = diffy.sum() zero_gradients(im) sumdiffy.backward() graddiffy = im.grad.data df = diffy.detach().unsqueeze(1) dg = graddiffy.unsqueeze(1) return df, dg
3,481
29.278261
76
py
auto-attack
auto-attack-master/autoattack/fab_projections.py
import math import torch from torch.nn import functional as F def projection_linf(points_to_project, w_hyperplane, b_hyperplane): device = points_to_project.device t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane.clone() sign = 2 * ((w * t).sum(1) - b >= 0) - 1 w.mul_(sign.unsqueeze(1)) b.mul_(sign) a = (w < 0).float() d = (a - t) * (w != 0).float() p = a - t * (2 * a - 1) indp = torch.argsort(p, dim=1) b = b - (w * t).sum(1) b0 = (w * d).sum(1) indp2 = indp.flip((1,)) ws = w.gather(1, indp2) bs2 = - ws * d.gather(1, indp2) s = torch.cumsum(ws.abs(), dim=1) sb = torch.cumsum(bs2, dim=1) + b0.unsqueeze(1) b2 = sb[:, -1] - s[:, -1] * p.gather(1, indp[:, 0:1]).squeeze(1) c_l = b - b2 > 0 c2 = (b - b0 > 0) & (~c_l) lb = torch.zeros(c2.sum(), device=device) ub = torch.full_like(lb, w.shape[1] - 1) nitermax = math.ceil(math.log2(w.shape[1])) indp_, sb_, s_, p_, b_ = indp[c2], sb[c2], s[c2], p[c2], b[c2] for counter in range(nitermax): counter4 = torch.floor((lb + ub) / 2) counter2 = counter4.long().unsqueeze(1) indcurr = indp_.gather(1, indp_.size(1) - 1 - counter2) b2 = (sb_.gather(1, counter2) - s_.gather(1, counter2) * p_.gather(1, indcurr)).squeeze(1) c = b_ - b2 > 0 lb = torch.where(c, counter4, lb) ub = torch.where(c, ub, counter4) lb = lb.long() if c_l.any(): lmbd_opt = torch.clamp_min((b[c_l] - sb[c_l, -1]) / (-s[c_l, -1]), min=0).unsqueeze(-1) d[c_l] = (2 * a[c_l] - 1) * lmbd_opt lmbd_opt = torch.clamp_min((b[c2] - sb[c2, lb]) / (-s[c2, lb]), min=0).unsqueeze(-1) d[c2] = torch.min(lmbd_opt, d[c2]) * a[c2] + torch.max(-lmbd_opt, d[c2]) * (1 - a[c2]) return d * (w != 0).float() def projection_l2(points_to_project, w_hyperplane, b_hyperplane): device = points_to_project.device t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane c = (w * t).sum(1) - b ind2 = 2 * (c >= 0) - 1 w.mul_(ind2.unsqueeze(1)) c.mul_(ind2) r = torch.max(t / w, (t - 1) / w).clamp(min=-1e12, max=1e12) r.masked_fill_(w.abs() < 1e-8, 1e12) r[r == -1e12] *= -1 rs, indr = torch.sort(r, dim=1) rs2 = F.pad(rs[:, 1:], (0, 1)) rs.masked_fill_(rs == 1e12, 0) rs2.masked_fill_(rs2 == 1e12, 0) w3s = (w ** 2).gather(1, indr) w5 = w3s.sum(dim=1, keepdim=True) ws = w5 - torch.cumsum(w3s, dim=1) d = -(r * w) d.mul_((w.abs() > 1e-8).float()) s = torch.cat((-w5 * rs[:, 0:1], torch.cumsum((-rs2 + rs) * ws, dim=1) - w5 * rs[:, 0:1]), 1) c4 = s[:, 0] + c < 0 c3 = (d * w).sum(dim=1) + c > 0 c2 = ~(c4 | c3) lb = torch.zeros(c2.sum(), device=device) ub = torch.full_like(lb, w.shape[1] - 1) nitermax = math.ceil(math.log2(w.shape[1])) s_, c_ = s[c2], c[c2] for counter in range(nitermax): counter4 = torch.floor((lb + ub) / 2) counter2 = counter4.long().unsqueeze(1) c3 = s_.gather(1, counter2).squeeze(1) + c_ > 0 lb = torch.where(c3, counter4, lb) ub = torch.where(c3, ub, counter4) lb = lb.long() if c4.any(): alpha = c[c4] / w5[c4].squeeze(-1) d[c4] = -alpha.unsqueeze(-1) * w[c4] if c2.any(): alpha = (s[c2, lb] + c[c2]) / ws[c2, lb] + rs[c2, lb] alpha[ws[c2, lb] == 0] = 0 c5 = (alpha.unsqueeze(-1) > r[c2]).float() d[c2] = d[c2] * c5 - alpha.unsqueeze(-1) * w[c2] * (1 - c5) return d * (w.abs() > 1e-8).float() def projection_l1(points_to_project, w_hyperplane, b_hyperplane): device = points_to_project.device t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane c = (w * t).sum(1) - b ind2 = 2 * (c >= 0) - 1 w.mul_(ind2.unsqueeze(1)) c.mul_(ind2) r = (1 / w).abs().clamp_max(1e12) indr = torch.argsort(r, dim=1) indr_rev = torch.argsort(indr) c6 = (w < 0).float() d = (-t + c6) * (w != 0).float() ds = torch.min(-w * t, w * (1 - t)).gather(1, indr) ds2 = torch.cat((c.unsqueeze(-1), ds), 1) s = torch.cumsum(ds2, dim=1) c2 = s[:, -1] < 0 lb = torch.zeros(c2.sum(), device=device) ub = torch.full_like(lb, s.shape[1]) nitermax = math.ceil(math.log2(w.shape[1])) s_ = s[c2] for counter in range(nitermax): counter4 = torch.floor((lb + ub) / 2) counter2 = counter4.long().unsqueeze(1) c3 = s_.gather(1, counter2).squeeze(1) > 0 lb = torch.where(c3, counter4, lb) ub = torch.where(c3, ub, counter4) lb2 = lb.long() if c2.any(): indr = indr[c2].gather(1, lb2.unsqueeze(1)).squeeze(1) u = torch.arange(0, w.shape[0], device=device).unsqueeze(1) u2 = torch.arange(0, w.shape[1], device=device, dtype=torch.float).unsqueeze(0) alpha = -s[c2, lb2] / w[c2, indr] c5 = u2 < lb.unsqueeze(-1) u3 = c5[u[:c5.shape[0]], indr_rev[c2]] d[c2] = d[c2] * u3.float() d[c2, indr] = alpha return d * (w.abs() > 1e-8).float()
5,076
30.147239
98
py
auto-attack
auto-attack-master/autoattack/fab_tf.py
# Copyright (c) 2019-present, Francesco Croce # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import torch from autoattack.fab_base import FABAttack class FABAttack_TF(FABAttack): """ Fast Adaptive Boundary Attack (Linf, L2, L1) https://arxiv.org/abs/1907.02044 :param model: TF_model :param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported) :param n_restarts: number of random restarts :param n_iter: number of iterations :param eps: epsilon for the random restarts :param alpha_max: alpha_max :param eta: overshooting :param beta: backward step """ def __init__( self, model, norm='Linf', n_restarts=1, n_iter=100, eps=None, alpha_max=0.1, eta=1.05, beta=0.9, loss_fn=None, verbose=False, seed=0, targeted=False, device=None, n_target_classes=9): """ FAB-attack implementation in TF2 """ self.model = model super().__init__(norm, n_restarts, n_iter, eps, alpha_max, eta, beta, loss_fn, verbose, seed, targeted, device, n_target_classes) def _predict_fn(self, x): return self.model.predict(x) def _get_predicted_label(self, x): with torch.no_grad(): outputs = self._predict_fn(x) _, y = torch.max(outputs, dim=1) return y def get_diff_logits_grads_batch(self, imgs, la): y2, g2 = self.model.grad_logits(imgs) df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1) dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1) df[torch.arange(imgs.shape[0]), la] = 1e10 return df, dg def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target): df, dg = self.model.get_grad_diff_logits_target(imgs, la, la_target) df.unsqueeze_(1) dg.unsqueeze_(1) return df, dg
2,599
28.545455
76
py
auto-attack
auto-attack-master/autoattack/examples/eval_tf1.py
#%% from argparse import ArgumentParser import numpy as np import tensorflow as tf import torch import torch.nn as nn import torchvision.datasets as datasets import torch.utils.data as data import torchvision.transforms as transforms import sys #sys.path.insert(0,'..') from autoattack import AutoAttack, utils_tf # #%% class mnist_loader: def __init__(self): self.n_class = 10 self.dim_x = 28 self.dim_y = 28 self.dim_z = 1 self.img_min = 0.0 self.img_max = 1.0 self.epsilon = 0.3 def download(self): (trainX, trainY), (testX, testY) = tf.keras.datasets.mnist.load_data() trainX = trainX.astype(np.float32) testX = testX.astype(np.float32) # ont-hot trainY = tf.keras.utils.to_categorical(trainY, self.n_class) testY = tf.keras.utils.to_categorical(testY , self.n_class) # get validation sets training_size = 55000 validX = trainX[training_size:,:] validY = trainY[training_size:,:] trainX = trainX[:training_size,:] trainY = trainY[:training_size,:] # expand dimesion trainX = np.expand_dims(trainX, axis=3) validX = np.expand_dims(validX, axis=3) testX = np.expand_dims(testX , axis=3) return trainX, trainY, validX, validY, testX, testY def get_raw_data(self): return self.download() def get_normalized_data(self): trainX, trainY, validX, validY, testX, testY = self.get_raw_data() trainX = trainX / 255.0 * (self.img_max - self.img_min) + self.img_min validX = validX / 255.0 * (self.img_max - self.img_min) + self.img_min testX = testX / 255.0 * (self.img_max - self.img_min) + self.img_min trainY = trainY validY = validY testY = testY return trainX, trainY, validX, validY, testX, testY #%% def mnist_model(): # declare variables model_layers = [ tf.keras.layers.Input(shape=(28,28,1), name="model/input"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c1"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c2"), tf.keras.layers.MaxPooling2D(pool_size=(2, 2), name="clf/p1"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c3"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c4"), tf.keras.layers.MaxPooling2D(pool_size=(2, 2), name="clf/p2"), tf.keras.layers.Flatten(name="clf/f1"), tf.keras.layers.Dense(256, activation="relu", kernel_initializer='he_normal', name="clf/d1"), tf.keras.layers.Dense(10 , activation=None , kernel_initializer='he_normal', name="clf/d2"), tf.keras.layers.Activation('softmax', name="clf_output") ] # clf_model clf_model = tf.keras.Sequential() for ii in model_layers: clf_model.add(ii) clf_model.compile(loss='categorical_crossentropy', optimizer='Nadam', metrics=['accuracy']) clf_model.summary() return clf_model #%% def arg_parser(parser): parser.add_argument("--path" , dest ="path", type=str, default='./', help="path of tf.keras model's wieghts") args, unknown = parser.parse_known_args() if unknown: msg = " ".join(unknown) print('[Warning] Unrecognized arguments: {:s}'.format(msg) ) return args #%% if __name__ == '__main__': # get arguments parser = ArgumentParser() args = arg_parser(parser) # MODEL PATH MODEL_PATH = args.path # init tf/keras tf.compat.v1.keras.backend.clear_session() gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) tf.compat.v1.keras.backend.set_session(sess) tf.compat.v1.keras.backend.set_learning_phase(0) # load data batch_size = 1000 epsilon = mnist_loader().epsilon _, _, _, _, testX, testY = mnist_loader().get_normalized_data() # convert to pytorch format testY = np.argmax(testY, axis=1) torch_testX = torch.from_numpy( np.transpose(testX, (0, 3, 1, 2)) ).float().cuda() torch_testY = torch.from_numpy( testY ).float() # load model from saved weights print('[INFO] MODEL_PATH: {:s}'.format(MODEL_PATH) ) tf_model = mnist_model() tf_model.load_weights(MODEL_PATH) # remove 'softmax layer' and put it into adapter atk_model = tf.keras.models.Model(inputs=tf_model.input, outputs=tf_model.get_layer(index=-2).output) atk_model.summary() y_input = tf.placeholder(tf.int64, shape = [None]) x_input = atk_model.input logits = atk_model.output model_adapted = utils_tf.ModelAdapter(logits, x_input, y_input, sess) # run attack adversary = AutoAttack(model_adapted, norm='Linf', eps=epsilon, version='standard', is_tf_model=True) x_adv, y_adv = adversary.run_standard_evaluation(torch_testX, torch_testY, bs=batch_size, return_labels=True) np_x_adv = np.moveaxis(x_adv.cpu().numpy(), 1, 3) np.save("./output/mnist_adv.npy", np_x_adv)
5,460
35.165563
141
py
auto-attack
auto-attack-master/autoattack/examples/resnet.py
import torch import torch.nn as nn import torch.nn.functional as F class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion * planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512 * block.expansion, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1] * (num_blocks - 1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.linear(out) return out def ResNet18(): return ResNet(BasicBlock, [2, 2, 2, 2]) def ResNet34(): return ResNet(BasicBlock, [3, 4, 6, 3]) def ResNet50(): return ResNet(Bottleneck, [3, 4, 6, 3]) def ResNet101(): return ResNet(Bottleneck, [3, 4, 23, 3]) def ResNet152(): return ResNet(Bottleneck, [3, 8, 36, 3]) def test(): net = ResNet18() y = net(torch.randn(1, 3, 32, 32)) print(y.size())
3,828
32.008621
104
py
auto-attack
auto-attack-master/autoattack/examples/eval_tf2.py
#%% from argparse import ArgumentParser import numpy as np import tensorflow as tf import torch import torch.nn as nn import torchvision.datasets as datasets import torch.utils.data as data import torchvision.transforms as transforms import sys sys.path.insert(0, '..') from autoattack import AutoAttack, utils_tf2 #%% class mnist_loader: def __init__(self): self.n_class = 10 self.dim_x = 28 self.dim_y = 28 self.dim_z = 1 self.img_min = 0.0 self.img_max = 1.0 self.epsilon = 0.3 def download(self): (trainX, trainY), (testX, testY) = tf.keras.datasets.mnist.load_data() trainX = trainX.astype(np.float32) testX = testX.astype(np.float32) # ont-hot trainY = tf.keras.utils.to_categorical(trainY, self.n_class) testY = tf.keras.utils.to_categorical(testY , self.n_class) # get validation sets training_size = 55000 validX = trainX[training_size:,:] validY = trainY[training_size:,:] trainX = trainX[:training_size,:] trainY = trainY[:training_size,:] # expand dimesion trainX = np.expand_dims(trainX, axis=3) validX = np.expand_dims(validX, axis=3) testX = np.expand_dims(testX , axis=3) return trainX, trainY, validX, validY, testX, testY def get_raw_data(self): return self.download() def get_normalized_data(self): trainX, trainY, validX, validY, testX, testY = self.get_raw_data() trainX = trainX / 255.0 * (self.img_max - self.img_min) + self.img_min validX = validX / 255.0 * (self.img_max - self.img_min) + self.img_min testX = testX / 255.0 * (self.img_max - self.img_min) + self.img_min trainY = trainY validY = validY testY = testY return trainX, trainY, validX, validY, testX, testY #%% def mnist_model(): # declare variables model_layers = [ tf.keras.layers.Input(shape=(28,28,1), name="model/input"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c1"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c2"), tf.keras.layers.MaxPooling2D(pool_size=(2, 2), name="clf/p1"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c3"), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu", kernel_initializer='he_normal', name="clf/c4"), tf.keras.layers.MaxPooling2D(pool_size=(2, 2), name="clf/p2"), tf.keras.layers.Flatten(name="clf/f1"), tf.keras.layers.Dense(256, activation="relu", kernel_initializer='he_normal', name="clf/d1"), tf.keras.layers.Dense(10 , activation=None , kernel_initializer='he_normal', name="clf/d2"), tf.keras.layers.Activation('softmax', name="clf_output") ] # clf_model clf_model = tf.keras.Sequential() for ii in model_layers: clf_model.add(ii) clf_model.compile(loss='categorical_crossentropy', optimizer='Nadam', metrics=['accuracy']) clf_model.summary() return clf_model #%% def arg_parser(parser): parser.add_argument("--path" , dest ="path", type=str, default='./autoattack/examples/tf_model.weight.h5', help="path of tf.keras model's wieghts") args, unknown = parser.parse_known_args() if unknown: msg = " ".join(unknown) print('[Warning] Unrecognized arguments: {:s}'.format(msg) ) return args #%% if __name__ == '__main__': # get arguments parser = ArgumentParser() args = arg_parser(parser) # MODEL PATH MODEL_PATH = args.path # init tf/keras gpus = tf.config.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) # load data batch_size = 1000 epsilon = mnist_loader().epsilon _, _, _, _, testX, testY = mnist_loader().get_normalized_data() # convert to pytorch format testY = np.argmax(testY, axis=1) torch_testX = torch.from_numpy( np.transpose(testX, (0, 3, 1, 2)) ).float().cuda() torch_testY = torch.from_numpy( testY ).float() # load model from saved weights print('[INFO] MODEL_PATH: {:s}'.format(MODEL_PATH) ) tf_model = mnist_model() tf_model.load_weights(MODEL_PATH) # remove 'softmax layer' and put it into adapter atk_model = tf.keras.models.Model(inputs=tf_model.input, outputs=tf_model.get_layer(index=-2).output) atk_model.summary() model_adapted = utils_tf2.ModelAdapter(atk_model) # run attack adversary = AutoAttack(model_adapted, norm='Linf', eps=epsilon, version='standard', is_tf_model=True) x_adv, y_adv = adversary.run_standard_evaluation(torch_testX, torch_testY, bs=batch_size, return_labels=True) np_x_adv = np.moveaxis(x_adv.cpu().numpy(), 1, 3) np.save("./output/mnist_adv.npy", np_x_adv)
5,193
34.575342
151
py
auto-attack
auto-attack-master/autoattack/examples/eval.py
import os import argparse from pathlib import Path import warnings import torch import torch.nn as nn import torchvision.datasets as datasets import torch.utils.data as data import torchvision.transforms as transforms import sys sys.path.insert(0,'..') from resnet import * if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='./data') parser.add_argument('--norm', type=str, default='Linf') parser.add_argument('--epsilon', type=float, default=8./255.) parser.add_argument('--model', type=str, default='./model_test.pt') parser.add_argument('--n_ex', type=int, default=1000) parser.add_argument('--individual', action='store_true') parser.add_argument('--save_dir', type=str, default='./results') parser.add_argument('--batch_size', type=int, default=500) parser.add_argument('--log_path', type=str, default='./log_file.txt') parser.add_argument('--version', type=str, default='standard') parser.add_argument('--state-path', type=Path, default=None) args = parser.parse_args() # load model model = ResNet18() ckpt = torch.load(args.model) model.load_state_dict(ckpt) model.cuda() model.eval() # load data transform_list = [transforms.ToTensor()] transform_chain = transforms.Compose(transform_list) item = datasets.CIFAR10(root=args.data_dir, train=False, transform=transform_chain, download=True) test_loader = data.DataLoader(item, batch_size=1000, shuffle=False, num_workers=0) # create save dir if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) # load attack from autoattack import AutoAttack adversary = AutoAttack(model, norm=args.norm, eps=args.epsilon, log_path=args.log_path, version=args.version) l = [x for (x, y) in test_loader] x_test = torch.cat(l, 0) l = [y for (x, y) in test_loader] y_test = torch.cat(l, 0) # example of custom version if args.version == 'custom': adversary.attacks_to_run = ['apgd-ce', 'fab'] adversary.apgd.n_restarts = 2 adversary.fab.n_restarts = 2 # run attack and save images with torch.no_grad(): if not args.individual: adv_complete = adversary.run_standard_evaluation(x_test[:args.n_ex], y_test[:args.n_ex], bs=args.batch_size, state_path=args.state_path) torch.save({'adv_complete': adv_complete}, '{}/{}_{}_1_{}_eps_{:.5f}.pth'.format( args.save_dir, 'aa', args.version, adv_complete.shape[0], args.epsilon)) else: # individual version, each attack is run on all test points adv_complete = adversary.run_standard_evaluation_individual(x_test[:args.n_ex], y_test[:args.n_ex], bs=args.batch_size) torch.save(adv_complete, '{}/{}_{}_individual_1_{}_eps_{:.5f}_plus_{}_cheap_{}.pth'.format( args.save_dir, 'aa', args.version, args.n_ex, args.epsilon))
3,071
36.012048
103
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/SphereDenoisingMNN.py
# 2022/10/20~ # Claudio Battiloro, clabat@seas.upenn.edu/claudio.battiloro@uniroma1.it # Zhiyang Wang, zhiyangw@seas.upenn.edu # Hans Riess # Thanks to: # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu # for implementing the "alegnn" library. # This is the code used for implementing the MNN numerical results in the paper: # "Tangent Bundle Neural Networks: from Manifolds to Celullar Sheaves and Back" # C. Battiloro, Z. Wang, H. Riess, A. Ribeiro, P. Di Lorenzo # In particular, this code implements the denoising task described in the paper # over the vector field (-y,x,0) tangent to the unitary sphere S2 using MNN # When it runs, it produces the following output: # - It trains the specified models and saves the best and the last model # parameters of each realization on a directory named 'savedModels'. # - It saves a pickle file with the torch random state and the numpy random # state for reproducibility. # - It saves a text file 'hyperparameters.txt' containing the specific # (hyper)parameters that control the run, together with the main (scalar) # results obtained. # - If desired, logs in tensorboardX the training loss and evaluation measure # both of the training set and the validation set. These tensorboardX logs # are saved in a logsTB directory. # - If desired, saves the vector variables of each realization (training and # validation loss and evaluation measure, respectively); this is saved # in pickle format. These variables are saved in a trainVars directory. # - If desired, plots the training and validation loss and evaluation # performance for each of the models, together with the training loss and # validation evaluation performance for all models. #%%################################################################## # # # IMPORTING # # # ##################################################################### #\\\ Standard libraries: import os import numpy as np import numpy.ma as ma import matplotlib matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.family'] = 'serif' matplotlib.rcParams['text.latex.preamble']=[r'\usepackage{amsmath}'] import matplotlib.pyplot as plt import pickle import datetime from copy import deepcopy import torch; torch.set_default_dtype(torch.float64) import torch.nn as nn import torch.optim as optim import pandas as pd #\\\ Own libraries: import alegnnss.utils.graphML as gml import alegnnss.utils.dataTools as dt import alegnnss.modules.architectures as archit import alegnnss.modules.model as model import alegnnss.modules.training as training import alegnnss.modules.evaluation as evaluation from alegnnss.modules.loss import MSE_semisup #\\\ Separate functions: from alegnnss.utils.miscTools import writeVarValues from alegnnss.utils.miscTools import saveSeed #\\\ Activation function: just a scaled Tanh class alphaTanh(nn.Module): def __init__(self, alpha = 2): super(alphaTanh, self).__init__() self.alpha = alpha def forward(self, x): return self.alpha*torch.tanh(x) # Start measuring time startRunTime = datetime.datetime.now() #%%################################################################## # # # SETTING PARAMETERS # # # ##################################################################### thisFilename = 'manifoldnn' # This is the general name of all related files saveDirRoot = 'experiments' # In this case, relative location saveDir = os.path.join(saveDirRoot, thisFilename) # Dir where to save all # the results from each run #\\\ Create .txt to store the values of the setting parameters for easier # reference when running multiple experiments today = datetime.datetime.now().strftime("%Y%m%d%H%M%S") # Append date and time of the run to the directory, to avoid several runs of # overwritting each other. saveDir = saveDir + '-' + '-' + today # Create directory if not os.path.exists(saveDir): os.makedirs(saveDir) # Create the file where all the (hyper)parameters and results will be saved. varsFile = os.path.join(saveDir,'hyperparameters.txt') with open(varsFile, 'w+') as file: file.write('%s\n\n' % datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")) #\\\ Save seeds for reproducibility # PyTorch seeds torchState = torch.get_rng_state() torchSeed = torch.initial_seed() # Numpy seeds numpyState = np.random.RandomState().get_state() # Collect all random states randomStates = [] randomStates.append({}) randomStates[0]['module'] = 'numpy' randomStates[0]['state'] = numpyState randomStates.append({}) randomStates[1]['module'] = 'torch' randomStates[1]['state'] = torchState randomStates[1]['seed'] = torchSeed # This list and dictionary follows the format to then be loaded, if needed, # by calling the loadSeed function in Utils.miscTools saveSeed(randomStates, saveDir) useGPU = False# If true, and GPU is available, use it. ######## # DATA # ######## nDataSplits = 5 # Number of data realizations nNoiseSplits = 5 # Number of noise realizations --> Total Num. of experiments = nDataSplits*nNoiseSplits sigma_noise = 1e-1 # Noise variance ############ # TRAINING # ############ #\\\ Individual model training options optimAlg = 'ADAM' # Options: 'SGD', 'ADAM', 'RMSprop' l2pen = 1e-5 learningRate = 0.0005 # In all options beta1 = 0.8 # beta1 if 'ADAM', alpha if 'RMSprop' beta2 = 0.9 # ADAM option only #\\\ Loss function choice lossFunction = MSE_semisup reg_smooth = None #\\\ Overall training options nEpochs = 5000 #Number of epochs batchSize = 1 # Batch size (Semisupervised task, so it is set to 1 such that nEpochs + number of training steps) doLearningRateDecay = False # Learning rate decay learningRateDecayRate = 0.9 # Rate learningRateDecayPeriod = int(nEpochs/3) # How many epochs after which update the lr validationInterval = 5 # How many training steps to do the validation for nPoints in [200,800]: #\\\ Save values writeVarValues(varsFile, {'nDataSplits': nDataSplits, 'optimAlg': optimAlg, 'learningRate': learningRate, 'beta1': beta1, 'beta2': beta2, 'lossFunction': lossFunction, 'nEpochs': nEpochs, 'batchSize': batchSize, 'doLearningRateDecay': doLearningRateDecay, 'learningRateDecayRate': learningRateDecayRate, 'learningRateDecayPeriod': learningRateDecayPeriod, 'validationInterval': validationInterval}) ################# # ARCHITECTURES # ################# # Manifold Neural Networks are implemented via Graph Neural Netwroks; # Sheaf Neural Networks can be implemented via a Selection-GNN with the GSO set # as the CLoud Laplacian # We exploit a one layered MNN # In this section, we determine the (hyper)parameters of models that we are # going to train. This only sets the parameters. The architectures need to be # created later below. Do not forget to add the name of the architecture # to modelList. # If the model dictionary is called 'model' + name, then it can be # picked up immediately later on, and there's no need to recode anything after # the section 'Setup' (except for setting the number of nodes in the 'N' # variable after it has been coded). # The name of the keys in the model dictionary have to be the same # as the names of the variables in the architecture call, because they will # be called by unpacking the dictionary. modelList = [] #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #\\\ MNN via Selection GNN \\\ #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #\\\ Basic parameters for both MNN modelSelGNN = {} # Model parameters for the Selection GNN (SelGNN) modelSelGNN['name'] = 'MNN' modelSelGNN['device'] = 'cuda:0' if (useGPU and torch.cuda.is_available()) \ else 'cpu' #\\\ ARCHITECTURE # Chosen architecture modelSelGNN['archit'] = archit.SelectionGNN # Layers Parameters modelSelGNN['dimNodeSignals'] = [3, 3] # Features per layer modelSelGNN['nFilterTaps'] = [5] # Number of filter taps per layer modelSelGNN['bias'] = False # Decide whether to include a bias term # Pooling (Set To No Pooling) modelSelGNN['poolingFunction'] = gml.NoPool # Summarizing function modelSelGNN['nSelectedNodes'] = [nPoints] modelSelGNN['poolingSize'] = [0] # poolingSize-hop neighborhood that is affected by the summary # Full MLP readout layer (not needed in our semisupervised setting) modelSelGNN['dimLayersMLP'] = [] # Dimension of the fully connected # layers after the GCN layers, we are doing a binary classification problem. # Cellular Sheaf structure modelSelGNN['GSO'] = None # Sheaf Shift Operator, to be determined later on, based on data modelSelGNN['order'] = None # Not used because there is no pooling modelSelGNN['lossFunction'] = lossFunction # Loss Function #\\\ TRAINER modelSelGNN['trainer'] = training.Trainer #\\\ EVALUATOR modelSelGNN['evaluator'] = evaluation.evaluate #\\\\\\\\\\\\\\\\\\\\\\\ #\\\ MODEL 1: MNN \\\ #\\\\\\\\\\\\\\\\\\\\\\\ modelMNN = deepcopy(modelSelGNN) modelMNN['name'] = 'MNN' # Name of the architecture # Nonlinearity modelMNN['nonlinearity'] = nn.PReLU # Save Values: writeVarValues(varsFile, modelMNN) modelList += [modelMNN['name']] ########### # LOGGING # ########### # Options: doPrint = True # Decide whether to print stuff while running doLogging = False # Log into tensorboard doSaveVars = True # Save (pickle) useful variables doFigs = False # Plot some figures (this only works if doSaveVars is True) # Parameters: printInterval = 16000 # After how many training steps, print the partial results # 0 means to never print partial results while training xAxisMultiplierTrain = 1 # How many training steps in between those shown in # the plot, i.e., one training step every xAxisMultiplierTrain is shown. xAxisMultiplierValid = 1 # How many validation steps in between those shown, # same as above. figSize = 5 # Overall size of the figure that contains the plot lineWidth = 2 # Width of the plot lines markerShape = 'o' # Shape of the markers markerSize = 3 # Size of the markers #\\\ Save values: writeVarValues(varsFile, {'doPrint': doPrint, 'doLogging': doLogging, 'doSaveVars': doSaveVars, 'doFigs': doFigs, 'saveDir': saveDir, 'printInterval': printInterval, 'figSize': figSize, 'lineWidth': lineWidth, 'markerShape': markerShape, 'markerSize': markerSize}) #%%################################################################## # # # SETUP # # # ##################################################################### #\\\ Determine processing unit: if useGPU and torch.cuda.is_available(): torch.cuda.empty_cache() #\\\ Notify of processing units if doPrint: print("Selected devices:") for thisModel in modelList: modelDict = eval('model' + thisModel) print("\t%s: %s" % (thisModel, modelDict['device'])) #\\\ Logging options if doLogging: # If logging is on, load the tensorboard visualizer and initialize it from alegnnss.utils.visualTools import Visualizer logsTB = os.path.join(saveDir, 'logsTB') logger = Visualizer(logsTB, name='visualResults') #\\\ Save variables during evaluation. # We will save all the evaluations obtained for each of the trained models. # It basically is a dictionary, containing a list. The key of the # dictionary determines the model, then the first list index determines # which split realization. Then, this will be converted to numpy to compute # mean and standard deviation (across the split dimension). costBest = {} # Cost for the best model (Evaluation cost: RMSE) costLast = {} # Cost for the last model costBestDiff = {} InitCost ={} for thisModel in modelList: # Create an element for each realization, costBest[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) costLast[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) costBestDiff[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) InitCost[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) # Create the variables to save all the realizations. This is, again, a # dictionary, where each key represents a model, and each model is a list # for each data split. # Each data split, in this case, is not a scalar, but a vector of # length the number of training steps (or of validation steps) lossTrain = {} costTrain = {} lossValid = {} costValid = {} # Initialize the splits dimension for thisModel in modelList: lossTrain[thisModel] = np.zeros((nDataSplits,nNoiseSplits,nEpochs)) costTrain[thisModel] = np.zeros((nDataSplits,nNoiseSplits,nEpochs)) lossValid[thisModel] = np.zeros((nDataSplits,nNoiseSplits,np.ceil(nEpochs/validationInterval).astype(int))) costValid[thisModel] = np.zeros((nDataSplits,nNoiseSplits,np.ceil(nEpochs/validationInterval).astype(int))) #################### # TRAINING OPTIONS # #################### # Training phase. It has a lot of options that are input through a # dictionary of arguments. # The value of these options was decided above with the rest of the parameters. # This just creates a dictionary necessary to pass to the train function. trainingOptions = {} if doLogging: trainingOptions['logger'] = logger if doSaveVars: trainingOptions['saveDir'] = saveDir if doPrint: trainingOptions['printInterval'] = printInterval if doLearningRateDecay: trainingOptions['learningRateDecayRate'] = learningRateDecayRate trainingOptions['learningRateDecayPeriod'] = learningRateDecayPeriod trainingOptions['validationInterval'] = validationInterval # And in case each model has specific training options, then we create a # separate dictionary per model. trainingOptsPerModel= {} #%%################################################################## # # # DATA LOADING AND SAMPLING # # # ##################################################################### # Data and Laplcians have been precedently computed via the MatLab VDM implementation. for split in range(nDataSplits): for rel in range(nNoiseSplits): # Loads the Cloud Laplacian, its Exp and the specific data realization #SLaplacian = pd.read_csv('/home/claudio/Dropbox/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ #+str(split)+'/Laplacian.csv',header = None).to_numpy() SLaplacian = pd.read_csv('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Repo/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ +str(split+1)+'/expLaplacian.csv',header = None).to_numpy() [lambdas,_] = np.linalg.eigh(SLaplacian) SLaplacian = SLaplacian/np.max(np.real(lambdas)) data_np = pd.read_csv('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Repo/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ +str(split+1)+'/Data.csv',header = None).to_numpy() train_np = pd.read_csv('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Repo/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ +str(split+1)+'/Data_sd_'+str(sigma_noise)+'_nrel_'+str(rel+1)+'.csv',header = None).to_numpy() train_np=np.expand_dims(train_np,0) # Data Object Instatiating # Clearly we have one data matrix, train data are noisy, validation/test is # computed evaluating the denoising error data = dt._dataForSemisupervised() data.dataType = torch.float64 data.nTrain = 1 data.nValid = 1 data.nTest = 1 data.samples = {} data.samples['train'] = {} data.samples['train']['signals'] = torch.from_numpy(train_np) data.samples['train']['targets'] = torch.from_numpy(train_np) data.samples['valid'] = {} data.samples['valid']['signals'] = torch.from_numpy(train_np) data.samples['valid']['targets'] = torch.from_numpy(data_np) data.samples['test'] = {} data.samples['test']['signals'] = torch.from_numpy(train_np) data.samples['test']['targets'] = torch.from_numpy(data_np) data.nPoints = nPoints #%%################################################################## # # # MODELS INITIALIZATION # # # ##################################################################### # This is the dictionary where we store the models (in a model.Model # class, that is then passed to training). modelsMNN = {} # If a new model is to be created, it should be called for here. if doPrint: print("Model initialization...", flush = True) for thisModel in modelList: # Get the corresponding parameter dictionary modelDict = deepcopy(eval('model' + thisModel)) modelDict['GSO'] = SLaplacian # and training options trainingOptsPerModel[thisModel] = deepcopy(trainingOptions) # Now, this dictionary has all the hyperparameters that we need to pass # to the architecture function, but it also has other keys that belong # to the more general model (like 'name' or 'device'), so we need to # extract them and save them in seperate variables for future use. thisName = modelDict.pop('name') callArchit = modelDict.pop('archit') thisDevice = modelDict.pop('device') thisTrainer = modelDict.pop('trainer') thisEvaluator = modelDict.pop('evaluator') thisLossFunction = modelDict.pop('lossFunction')(None, reg_smooth,\ torch.Tensor(SLaplacian).to(thisDevice)) # If more than one data realization is going to be carried out, # we are going to store all of those models separately, so that any of # them can be brought back and studied in detail. if nDataSplits > 1: thisName += '_num_sampl_'+str(nPoints)+'_data_real_'+str(split)+'_noise_real_'+str(rel) if doPrint: print("\tInitializing %s..." % thisName, end = ' ',flush = True) ############## # PARAMETERS # ############## #\\\ Optimizer options # (If different from the default ones, change here.) thisOptimAlg = optimAlg thisLearningRate = learningRate thisBeta1 = beta1 thisBeta2 = beta2 ################ # ARCHITECTURE # ################ thisArchit = callArchit(**modelDict) thisArchit.to(thisDevice) ############# # OPTIMIZER # ############# if thisOptimAlg == 'ADAM': thisOptim = optim.Adam(thisArchit.parameters(), lr = learningRate, betas = (beta1, beta2), weight_decay=l2pen) elif thisOptimAlg == 'SGD': thisOptim = optim.SGD(thisArchit.parameters(), lr = learningRate) elif thisOptimAlg == 'RMSprop': thisOptim = optim.RMSprop(thisArchit.parameters(), lr = learningRate, alpha = beta1) ######### # MODEL # ######### # Create the model modelCreated = model.Model(thisArchit, thisLossFunction, thisOptim, thisTrainer, thisEvaluator, thisDevice, thisName, saveDir) # Store it modelsMNN[thisName] = modelCreated # Write the main hyperparameters writeVarValues(varsFile, {'name': thisName, 'thisOptimizationAlgorithm': thisOptimAlg, 'thisTrainer': thisTrainer, 'thisEvaluator': thisEvaluator, 'thisLearningRate': thisLearningRate, 'thisBeta1': thisBeta1, 'thisBeta2': thisBeta2}) if doPrint: print("OK") if doPrint: print("Model initialization... COMPLETE") #%%################################################################## # # # TRAINING # # # ##################################################################### print("") # We train each model separately for thisModel in modelsMNN.keys(): if doPrint: print("Training model %s..." % thisModel) # Remember that modelsMNN.keys() has the split numbering as well as the # name, while modelList has only the name. So we need to map the # specific model for this specific split with the actual model name, # since there are several variables that are indexed by the model name # (for instance, the training options, or the dictionaries saving the # loss values) for m in modelList: if m in thisModel: modelName = m # Identify the specific split number at training time if nDataSplits > 1: trainingOptsPerModel[modelName]['graphNo'] = split # Train the model thisTrainVars = modelsMNN[thisModel].train(data, nEpochs, batchSize, **trainingOptsPerModel[modelName]) # Find which model to save the results (when having multiple # realizations) lossTrain[modelName][split,rel,:] = thisTrainVars['lossTrain'] costTrain[modelName][split,rel,:] = thisTrainVars['costTrain'] lossValid[modelName][split,rel,:] = thisTrainVars['lossValid'] costValid[modelName][split,rel,:] = thisTrainVars['costValid'] # Store Best RMSE costBest[modelName][split,rel]= min(costValid[modelName][split,rel,:]) # And we also need to save 'nBatches' but is the same for all models, so if doFigs: nBatches = thisTrainVars['nBatches'] #Computes and Saves Best Performances as a DataFrame meancostBest = {} for thisModel in modelList: mask = np.logical_or(costBest[thisModel] == costBest[thisModel].max(keepdims = 1), costBest[thisModel] == costBest[thisModel].min(keepdims = 1)) tmp = ma.masked_array(costBest[thisModel], mask = mask) meancostBest[thisModel] = {"Mean":np.mean(tmp),"Std":np.std(tmp)} pd.DataFrame(meancostBest).to_csv(saveDir+'/best_summary_'+str(nPoints)+'.csv') #%%################################################################## # # # PLOT # # # ##################################################################### # Finally, we might want to plot several quantities of interest if doFigs and doSaveVars: ################### # DATA PROCESSING # ################### #\\\ FIGURES DIRECTORY: saveDirFigs = os.path.join(saveDir,'figs') # If it doesn't exist, create it. if not os.path.exists(saveDirFigs): os.makedirs(saveDirFigs) #\\\ COMPUTE STATISTICS: # The first thing to do is to transform those into a matrix with all the # realizations, so create the variables to save that. meanLossTrain = {} meanCostTrain = {} meanLossValid = {} meanCostValid = {} stdDevLossTrain = {} stdDevCostTrain = {} stdDevLossValid = {} stdDevCostValid = {} # Initialize the variables for thisModel in modelList: # Transform into np.array lossTrain[thisModel] = np.array(lossTrain[thisModel]) costTrain[thisModel] = np.array(costTrain[thisModel]) lossValid[thisModel] = np.array(lossValid[thisModel]) costValid[thisModel] = np.array(costValid[thisModel]) # Each of one of these variables should be of shape # nDataSplits x nNoiseSplits x nEpochs # And compute the statistics meanLossTrain[thisModel] = np.mean(np.mean(lossTrain[thisModel], axis = 1), axis = 0) meanCostTrain[thisModel] = np.mean(np.mean(costTrain[thisModel], axis = 1), axis = 0) meanLossValid[thisModel] = np.mean(np.mean(lossValid[thisModel], axis = 1), axis = 0) meanCostValid[thisModel] = np.mean(np.mean(costValid[thisModel], axis = 1), axis = 0) stdDevLossTrain[thisModel] = np.std(np.mean(lossTrain[thisModel], axis = 1), axis = 0) stdDevCostTrain[thisModel] = np.std(np.mean(costTrain[thisModel], axis = 1), axis = 0) stdDevLossValid[thisModel] = np.std(np.mean(lossValid[thisModel], axis = 1), axis = 0) stdDevCostValid[thisModel] = np.std(np.mean(costValid[thisModel], axis = 1), axis = 0) #################### # SAVE FIGURE DATA # #################### # And finally, we can plot. But before, let's save the variables mean and # stdDev so, if we don't like the plot, we can re-open them, and re-plot # them, a piacere. # Pickle, first: varsPickle = {} varsPickle['nEpochs'] = nEpochs varsPickle['nBatches'] = nBatches varsPickle['meanLossTrain'] = meanLossTrain varsPickle['stdDevLossTrain'] = stdDevLossTrain varsPickle['meanCostTrain'] = meanCostTrain varsPickle['stdDevCostTrain'] = stdDevCostTrain varsPickle['meanLossValid'] = meanLossValid varsPickle['stdDevLossValid'] = stdDevLossValid varsPickle['meanCostValid'] = meanCostValid varsPickle['stdDevCostValid'] = stdDevCostValid with open(os.path.join(saveDirFigs,'figVars.pkl'), 'wb') as figVarsFile: pickle.dump(varsPickle, figVarsFile) ######## # PLOT # ######## # Compute the x-axis xTrain = np.arange(0, nEpochs * nBatches, xAxisMultiplierTrain) xValid = np.arange(0, nEpochs * nBatches, \ validationInterval*xAxisMultiplierValid) xTest = [pow(10, r) for r in np.linspace(-3, 0 , num = 5)] # If we do not want to plot all the elements (to avoid overcrowded plots) # we need to recompute the x axis and take those elements corresponding # to the training steps we want to plot if xAxisMultiplierTrain > 1: # Actual selected samples selectSamplesTrain = xTrain # Go and fetch tem for thisModel in modelList: meanLossTrain[thisModel] = meanLossTrain[thisModel]\ [selectSamplesTrain] stdDevLossTrain[thisModel] = stdDevLossTrain[thisModel]\ [selectSamplesTrain] meanCostTrain[thisModel] = meanCostTrain[thisModel]\ [selectSamplesTrain] stdDevCostTrain[thisModel] = stdDevCostTrain[thisModel]\ [selectSamplesTrain] # And same for the validation, if necessary. if xAxisMultiplierValid > 1: selectSamplesValid = np.arange(0, len(meanLossValid[thisModel]), \ xAxisMultiplierValid) for thisModel in modelList: meanLossValid[thisModel] = meanLossValid[thisModel]\ [selectSamplesValid] stdDevLossValid[thisModel] = stdDevLossValid[thisModel]\ [selectSamplesValid] meanCostValid[thisModel] = meanCostValid[thisModel]\ [selectSamplesValid] stdDevCostValid[thisModel] = stdDevCostValid[thisModel]\ [selectSamplesValid] #\\\ LOSS (Training and validation) for EACH MODEL for key in meanLossTrain.keys(): lossFig = plt.figure(figsize=(1.61*figSize, 1*figSize)) plt.errorbar(xTrain, meanLossTrain[key], yerr = stdDevLossTrain[key], color = '#01256E', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.errorbar(xValid, meanLossValid[key], yerr = stdDevLossValid[key], color = '#95001A', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'Loss') plt.xlabel(r'Training steps') plt.legend([r'Training', r'Validation']) plt.title(r'%s' % key) lossFig.savefig(os.path.join(saveDirFigs,'loss%s.pdf' % key), bbox_inches = 'tight') plot_lag = 1 # LOSS (training) for ALL MODELS allLossTrain = plt.figure(figsize=(1.61*figSize, 1*figSize)) for key in meanLossTrain.keys(): plt.errorbar(xTrain[plot_lag:], meanLossTrain[key][plot_lag:], yerr = stdDevLossTrain[key][plot_lag:], linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'Loss') plt.xlabel(r'Training steps') plt.legend(list(meanLossTrain.keys())) allLossTrain.savefig(os.path.join(saveDirFigs,'allLossTrain.pdf'), bbox_inches = 'tight') #\\\ RMSE (Training and validation) for EACH MODEL for key in meanCostTrain.keys(): costFig = plt.figure(figsize=(1.61*figSize, 1*figSize)) plt.errorbar(xTrain[plot_lag:], meanCostTrain[key][plot_lag:], yerr = stdDevCostTrain[key][plot_lag:], color = '#01256E', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.errorbar(xValid[plot_lag:], meanCostValid[key][plot_lag:], yerr = stdDevCostValid[key][plot_lag:], color = '#95001A', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'RMSE') plt.xlabel(r'Training steps') plt.legend([r'Training', r'Validation']) plt.title(r'%s' % key) costFig.savefig(os.path.join(saveDirFigs,'cost%s.pdf' % key), bbox_inches = 'tight') # RMSE (validation) for ALL MODELS allCostValidFig = plt.figure(figsize=(1.61*figSize, 1*figSize)) for key in meanCostValid.keys(): plt.errorbar(xValid[plot_lag:], meanCostValid[key][plot_lag:], yerr = stdDevCostValid[key][plot_lag:], linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'RMSE') plt.xlabel(r'Training steps') plt.legend(list(meanCostValid.keys())) allCostValidFig.savefig(os.path.join(saveDirFigs,'allCostValid.pdf'), bbox_inches = 'tight') # Finish measuring time endRunTime = datetime.datetime.now() totalRunTime = abs(endRunTime - startRunTime) totalRunTimeH = int(divmod(totalRunTime.total_seconds(), 3600)[0]) totalRunTimeM, totalRunTimeS = \ divmod(totalRunTime.total_seconds() - totalRunTimeH * 3600., 60) totalRunTimeM = int(totalRunTimeM) if doPrint: print(" ") print("Simulation started: %s" %startRunTime.strftime("%Y/%m/%d %H:%M:%S")) print("Simulation ended: %s" % endRunTime.strftime("%Y/%m/%d %H:%M:%S")) print("Total time: %dh %dm %.2fs" % (totalRunTimeH, totalRunTimeM, totalRunTimeS)) # And save this info into the .txt file as well with open(varsFile, 'a+') as file: file.write("Simulation started: %s\n" % startRunTime.strftime("%Y/%m/%d %H:%M:%S")) file.write("Simulation ended: %s\n" % endRunTime.strftime("%Y/%m/%d %H:%M:%S")) file.write("Total time: %dh %dm %.2fs" % (totalRunTimeH, totalRunTimeM, totalRunTimeS))
36,535
45.661558
177
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/SphereDenoisingDDTNN.py
# 2022/10/20~ # Claudio Battiloro, clabat@seas.upenn.edu/claudio.battiloro@uniroma1.it # Zhiyang Wang, zhiyangw@seas.upenn.edu # Hans Riess # Thanks to: # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu # for implementing the "alegnn" library. # This is the code used for implementing the numerical results in the paper: # "Tangent Bundle Neural Networks: from Manifolds to Celullar Sheaves and Back" # C. Battiloro, Z. Wang, H. Riess, A. Ribeiro, P. Di Lorenzo # In particular, this code implements the denoising task described in the paper # over the vector field (-y,x,0) tangent to the unitary sphere S2 using DD-TNN # Obs: This code could be used also for a denoising+reconstruction task, indeed # a random sampler is implemented. For the denoising-only task, it is sufficient, # tos et the sample percentage to 1. # When it runs, it produces the following output: # - It trains the specified models and saves the best and the last model # parameters of each realization on a directory named 'savedModels'. # - It saves a pickle file with the torch random state and the numpy random # state for reproducibility. # - It saves a text file 'hyperparameters.txt' containing the specific # (hyper)parameters that control the run, together with the main (scalar) # results obtained. # - If desired, logs in tensorboardX the training loss and evaluation measure # both of the training set and the validation set. These tensorboardX logs # are saved in a logsTB directory. # - If desired, saves the vector variables of each realization (training and # validation loss and evaluation measure, respectively); this is saved # in pickle format. These variables are saved in a trainVars directory. # - If desired, plots the training and validation loss and evaluation # performance for each of the models, together with the training loss and # validation evaluation performance for all models. #%%################################################################## # # # IMPORTING # # # ##################################################################### #\\\ Standard libraries: import os import numpy as np import numpy.ma as ma import matplotlib matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.family'] = 'serif' matplotlib.rcParams['text.latex.preamble']=[r'\usepackage{amsmath}'] import matplotlib.pyplot as plt import pickle import datetime from copy import deepcopy import torch; torch.set_default_dtype(torch.float64) import torch.nn as nn import torch.optim as optim import pandas as pd #\\\ Own libraries: import alegnnss.utils.graphML as gml import alegnnss.utils.dataTools as dt import alegnnss.modules.architectures as archit import alegnnss.modules.model as model import alegnnss.modules.training as training import alegnnss.modules.evaluation as evaluation from alegnnss.modules.loss import MSE_semisup #\\\ Separate functions: from alegnnss.utils.miscTools import writeVarValues from alegnnss.utils.miscTools import saveSeed #\\\ Activation function: just a scaled Tanh class alphaTanh(nn.Module): def __init__(self, alpha = 2): super(alphaTanh, self).__init__() self.alpha = alpha def forward(self, x): return self.alpha*torch.tanh(x) # Start measuring time startRunTime = datetime.datetime.now() #%%################################################################## # # # SETTING PARAMETERS # # # ##################################################################### thisFilename = 'tangentbundlenn' # This is the general name of all related files saveDirRoot = 'experiments' # In this case, relative location saveDir = os.path.join(saveDirRoot, thisFilename) # Dir where to save all # the results from each run #\\\ Create .txt to store the values of the setting parameters for easier # reference when running multiple experiments today = datetime.datetime.now().strftime("%Y%m%d%H%M%S") # Append date and time of the run to the directory, to avoid several runs of # overwritting each other. saveDir = saveDir + '-' + '-' + today # Create directory if not os.path.exists(saveDir): os.makedirs(saveDir) # Create the file where all the (hyper)parameters and results will be saved. varsFile = os.path.join(saveDir,'hyperparameters.txt') with open(varsFile, 'w+') as file: file.write('%s\n\n' % datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")) #\\\ Save seeds for reproducibility # PyTorch seeds torchState = torch.get_rng_state() torchSeed = torch.initial_seed() # Numpy seeds numpyState = np.random.RandomState().get_state() # Collect all random states randomStates = [] randomStates.append({}) randomStates[0]['module'] = 'numpy' randomStates[0]['state'] = numpyState randomStates.append({}) randomStates[1]['module'] = 'torch' randomStates[1]['state'] = torchState randomStates[1]['seed'] = torchSeed # This list and dictionary follows the format to then be loaded, if needed, # by calling the loadSeed function in Utils.miscTools saveSeed(randomStates, saveDir) useGPU = True# If true, and GPU is available, use it. ######## # DATA # ######## nDataSplits = 5 # Number of data realizations nNoiseSplits = 5 # Number of noise realizations --> Total Num. of experiments = nDataSplits*nNoiseSplits dhat = 2 # Estimated underlying manifold dimension sigma_noise = 1e-1 #5e-2 # Noise variance sample_percentage = 1 # The sampling mask is generated with P(retain_sample)=sample_percentage ############ # TRAINING # ############ #\\\ Individual model training options optimAlg = 'ADAM' # Options: 'SGD', 'ADAM', 'RMSprop' l2pen = 1e-5 learningRate = 0.0005 # In all options beta1 = 0.8 # beta1 if 'ADAM', alpha if 'RMSprop' beta2 = 0.9 # ADAM option only #\\\ Loss function choice lossFunction = MSE_semisup reg_smooth = None #\\\ Overall training options nEpochs = 5000 #Number of epochs batchSize = 1 # Batch size (Semisupervised task, so it is set to 1 such that nEpochs + number of training steps) doLearningRateDecay = False # Learning rate decay learningRateDecayRate = 0.9 # Rate learningRateDecayPeriod = int(nEpochs/3) # How many epochs after which update the lr validationInterval = 5 # How many training steps to do the validation #\\\ Save values writeVarValues(varsFile, {'nDataSplits': nDataSplits, 'optimAlg': optimAlg, 'learningRate': learningRate, 'beta1': beta1, 'beta2': beta2, 'lossFunction': lossFunction, 'nEpochs': nEpochs, 'batchSize': batchSize, 'doLearningRateDecay': doLearningRateDecay, 'learningRateDecayRate': learningRateDecayRate, 'learningRateDecayPeriod': learningRateDecayPeriod, 'validationInterval': validationInterval}) for nPoints in [200,800]: # The Available Data are for 200, 800 and 1200. Of course other # can be generated from the Matlab script ################# # ARCHITECTURES # ################# # Tangent Bundle Neural Networks are implemented via Sheaf Neural Netwroks; # Sheaf Neural Networks can be implemented via a Selection-GNN with the GSO set # as the Sheaf Laplacian and vector data stacked on the columns of the input matrix # (as explained in the paper). # We exploit a two layered DD-TNN and Discretized Space/Time Filters (denoted as DD-TF). # The main difference is that the DD-TNN includes a non-linearity function. # In this section, we determine the (hyper)parameters of models that we are # going to train. This only sets the parameters. The architectures need to be # created later below. Do not forget to add the name of the architecture # to modelList. # If the model dictionary is called 'model' + name, then it can be # picked up immediately later on, and there's no need to recode anything after # the section 'Setup' (except for setting the number of nodes in the 'N' # variable after it has been coded). # The name of the keys in the model dictionary have to be the same # as the names of the variables in the architecture call, because they will # be called by unpacking the dictionary. modelList = [] #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #\\\ DD-TNN via Selection GNN \\\ #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #\\\ Basic parameters for both DD-TNN and DD-TF modelSelGNN = {} # Model parameters for the Selection GNN (SelGNN) modelSelGNN['name'] = 'TNN' modelSelGNN['device'] = 'cuda:0' if (useGPU and torch.cuda.is_available()) \ else 'cpu' #\\\ ARCHITECTURE # Chosen architecture modelSelGNN['archit'] = archit.SelectionGNN # Layers Parameters modelSelGNN['dimNodeSignals'] = [1,1] # Features per layer modelSelGNN['nFilterTaps'] = [5] # Number of filter taps per layer modelSelGNN['bias'] = False # Decide whether to include a bias term # Pooling (Set To No Pooling) modelSelGNN['poolingFunction'] = gml.NoPool # Summarizing function modelSelGNN['nSelectedNodes'] = [dhat*nPoints] modelSelGNN['poolingSize'] = [0] # poolingSize-hop neighborhood that is affected by the summary # Full MLP readout layer (not needed in our semisupervised setting) modelSelGNN['dimLayersMLP'] = [] # Dimension of the fully connected # layers after the GCN layers, we are doing a binary classification problem. # Cellular Sheaf structure modelSelGNN['GSO'] = None # Sheaf Shift Operator, to be determined later on, based on data modelSelGNN['order'] = None # Not used because there is no pooling modelSelGNN['lossFunction'] = lossFunction # Loss Function #\\\ TRAINER modelSelGNN['trainer'] = training.Trainer #\\\ EVALUATOR modelSelGNN['evaluator'] = evaluation.evaluate #\\\\\\\\\\\\\\\\\\\\\\\ #\\\ MODEL 1: DD-TNN \\\ #\\\\\\\\\\\\\\\\\\\\\\\ modelDDTNN = deepcopy(modelSelGNN) modelDDTNN['name'] = 'DDTNN' # Name of the architecture # Nonlinearity modelDDTNN['nonlinearity'] = nn.PReLU # Save Values: writeVarValues(varsFile, modelDDTNN) modelList += [modelDDTNN['name']] #\\\\\\\\\\\\\\\\\\\\\\ #\\\ MODEL 2: DD-TF \\\ #\\\\\\\\\\\\\\\\\\\\\\ #modelDDTF = deepcopy(modelSelGNN) #modelDDTF['name'] = 'DDTF' # Name of the architecture # Nonlinearity #modelDDTF['nonlinearity'] = gml.NoActivation # Save Values: #writeVarValues(varsFile, modelDDTF) #modelList += [modelDDTF['name']] ########### # LOGGING # ########### # Options: doPrint = True # Decide whether to print stuff while running doLogging = False # Log into tensorboard doSaveVars = True # Save (pickle) useful variables doFigs = False # Plot some figures (this only works if doSaveVars is True) # Parameters: printInterval = 16000 # After how many training steps, print the partial results # 0 means to never print partial results while training xAxisMultiplierTrain = 1 # How many training steps in between those shown in # the plot, i.e., one training step every xAxisMultiplierTrain is shown. xAxisMultiplierValid = 1 # How many validation steps in between those shown, # same as above. figSize = 5 # Overall size of the figure that contains the plot lineWidth = 2 # Width of the plot lines markerShape = 'o' # Shape of the markers markerSize = 3 # Size of the markers #\\\ Save values: writeVarValues(varsFile, {'doPrint': doPrint, 'doLogging': doLogging, 'doSaveVars': doSaveVars, 'doFigs': doFigs, 'saveDir': saveDir, 'printInterval': printInterval, 'figSize': figSize, 'lineWidth': lineWidth, 'markerShape': markerShape, 'markerSize': markerSize}) #%%################################################################## # # # SETUP # # # ##################################################################### #\\\ Determine processing unit: if useGPU and torch.cuda.is_available(): torch.cuda.empty_cache() #\\\ Notify of processing units if doPrint: print("Selected devices:") for thisModel in modelList: modelDict = eval('model' + thisModel) print("\t%s: %s" % (thisModel, modelDict['device'])) #\\\ Logging options if doLogging: # If logging is on, load the tensorboard visualizer and initialize it from alegnnss.utils.visualTools import Visualizer logsTB = os.path.join(saveDir, 'logsTB') logger = Visualizer(logsTB, name='visualResults') #\\\ Save variables during evaluation. # We will save all the evaluations obtained for each of the trained models. # It basically is a dictionary, containing a list. The key of the # dictionary determines the model, then the first list index determines # which split realization. Then, this will be converted to numpy to compute # mean and standard deviation (across the split dimension). costBest = {} # Cost for the best model (Evaluation cost: RMSE) costLast = {} # Cost for the last model costBestDiff = {} InitCost ={} for thisModel in modelList: # Create an element for each realization, costBest[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) costLast[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) costBestDiff[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) InitCost[thisModel] = np.zeros((nDataSplits,nNoiseSplits)) # Create the variables to save all the realizations. This is, again, a # dictionary, where each key represents a model, and each model is a list # for each data split. # Each data split, in this case, is not a scalar, but a vector of # length the number of training steps (or of validation steps) lossTrain = {} costTrain = {} lossValid = {} costValid = {} # Initialize the splits dimension for thisModel in modelList: lossTrain[thisModel] = np.zeros((nDataSplits,nNoiseSplits,nEpochs)) costTrain[thisModel] = np.zeros((nDataSplits,nNoiseSplits,nEpochs)) lossValid[thisModel] = np.zeros((nDataSplits,nNoiseSplits,np.ceil(nEpochs/validationInterval).astype(int))) costValid[thisModel] = np.zeros((nDataSplits,nNoiseSplits,np.ceil(nEpochs/validationInterval).astype(int))) #################### # TRAINING OPTIONS # #################### # Training phase. It has a lot of options that are input through a # dictionary of arguments. # The value of these options was decided above with the rest of the parameters. # This just creates a dictionary necessary to pass to the train function. trainingOptions = {} if doLogging: trainingOptions['logger'] = logger if doSaveVars: trainingOptions['saveDir'] = saveDir if doPrint: trainingOptions['printInterval'] = printInterval if doLearningRateDecay: trainingOptions['learningRateDecayRate'] = learningRateDecayRate trainingOptions['learningRateDecayPeriod'] = learningRateDecayPeriod trainingOptions['validationInterval'] = validationInterval # And in case each model has specific training options, then we create a # separate dictionary per model. trainingOptsPerModel= {} #%%################################################################## # # # DATA LOADING AND SAMPLING # # # ##################################################################### # Data and Laplcians have been precedently computed via the MatLab VDM implementation. for split in range(nDataSplits): for rel in range(nNoiseSplits): # Loads the Sheaf Laplacian (Delta_n), the Laplacian, their Exp and the specific data realization #SLaplacian = pd.read_csv('/home/claudio/Dropbox/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ #+str(split)+'/SLaplacian.csv',header = None).to_numpy() SLaplacian = pd.read_csv('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Repo/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ +str(split+1)+'/expSLaplacian.csv',header = None).to_numpy() [lambdas,_] = np.linalg.eigh(SLaplacian) SLaplacian = SLaplacian/np.max(np.real(lambdas)) data_np = pd.read_csv('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Repo/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ +str(split+1)+'/projData.csv',header = None).to_numpy() sampling_mask = np.kron(np.ones((1,dhat)),np.expand_dims(np.random.binomial\ (1, sample_percentage, size=nPoints),1)).flatten() # Sampling Mask # Defines the training set as the masked signals and adds noise train_indices =(np.arange(1, nPoints*dhat+1)*sampling_mask-1).astype(int) test_indices = np.arange(0,nPoints*dhat)[train_indices == -1]#np.delete(np.arange(0, nPoints),train_indices) train_np = pd.read_csv('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Repo/VectorDiffusionMaps-master/data/data_samples_'+str(nPoints)+'_realization_'\ +str(split+1)+'/projData_sd_'+str(sigma_noise)+'_nrel_'+str(rel+1)+'.csv',header = None).to_numpy() train_indices_torch = torch.from_numpy(np.expand_dims(train_indices[train_indices != -1],1).T).to('cuda:0' if (useGPU and torch.cuda.is_available()) \ else 'cpu') test_indices_torch = torch.from_numpy(np.expand_dims(test_indices,1).T).to('cuda:0' if (useGPU and torch.cuda.is_available()) \ else 'cpu') train_np[test_indices] = 0#np.mean(train_np[train_indices_torch]) # Data Object Instatiating # Clearly we have one data vector, train data are noisy, validation/test is # computed evaluating the denoising error data = dt._dataForSemisupervised() data.dataType = torch.float64 data.nTrain = 1 data.nValid = 1 data.nTest = 1 data.test_indices = test_indices data.test_indices_torch = test_indices_torch data.evaluate_only_test = 0 # If data are also sampled, this attribute allowes to evaluate # the model only on unsampled points data.samples = {} data.samples['train'] = {} data.samples['train']['signals'] = torch.from_numpy(train_np.T) data.samples['train']['targets'] = torch.from_numpy(train_np.T) data.samples['valid'] = {} data.samples['valid']['signals'] = torch.from_numpy(train_np.T) data.samples['valid']['targets'] = torch.from_numpy(data_np.T) data.samples['test'] = {} data.samples['test']['signals'] = torch.from_numpy(train_np.T) data.samples['test']['targets'] = torch.from_numpy(data_np.T) data.nPoints = nPoints data.expandDims() #%%################################################################## # # # MODELS INITIALIZATION # # # ##################################################################### # This is the dictionary where we store the models (in a model.Model # class, that is then passed to training). modelsTNN = {} # If a new model is to be created, it should be called for here. if doPrint: print("Model initialization...", flush = True) for thisModel in modelList: # Get the corresponding parameter dictionary modelDict = deepcopy(eval('model' + thisModel)) modelDict['GSO'] = SLaplacian # and training options trainingOptsPerModel[thisModel] = deepcopy(trainingOptions) # Now, this dictionary has all the hyperparameters that we need to pass # to the architecture function, but it also has other keys that belong # to the more general model (like 'name' or 'device'), so we need to # extract them and save them in seperate variables for future use. thisName = modelDict.pop('name') callArchit = modelDict.pop('archit') thisDevice = modelDict.pop('device') thisTrainer = modelDict.pop('trainer') thisEvaluator = modelDict.pop('evaluator') thisLossFunction = modelDict.pop('lossFunction')(train_indices_torch, reg_smooth,\ torch.Tensor(SLaplacian).to(thisDevice)) # If more than one data realization is going to be carried out, # we are going to store all of those models separately, so that any of # them can be brought back and studied in detail. if nDataSplits > 1: thisName += '_num_sampl_'+str(nPoints)+'_data_real_'+str(split)+'_noise_real_'+str(rel) if doPrint: print("\tInitializing %s..." % thisName, end = ' ',flush = True) ############## # PARAMETERS # ############## #\\\ Optimizer options # (If different from the default ones, change here.) thisOptimAlg = optimAlg thisLearningRate = learningRate thisBeta1 = beta1 thisBeta2 = beta2 ################ # ARCHITECTURE # ################ thisArchit = callArchit(**modelDict) thisArchit.to(thisDevice) train_indices_torch.to(thisDevice) ############# # OPTIMIZER # ############# if thisOptimAlg == 'ADAM': thisOptim = optim.Adam(thisArchit.parameters(), lr = learningRate, betas = (beta1, beta2), weight_decay=l2pen) elif thisOptimAlg == 'SGD': thisOptim = optim.SGD(thisArchit.parameters(), lr = learningRate) elif thisOptimAlg == 'RMSprop': thisOptim = optim.RMSprop(thisArchit.parameters(), lr = learningRate, alpha = beta1) ######### # MODEL # ######### # Create the model modelCreated = model.Model(thisArchit, thisLossFunction, thisOptim, thisTrainer, thisEvaluator, thisDevice, thisName, saveDir) # Store it modelsTNN[thisName] = modelCreated # Write the main hyperparameters writeVarValues(varsFile, {'name': thisName, 'thisOptimizationAlgorithm': thisOptimAlg, 'thisTrainer': thisTrainer, 'thisEvaluator': thisEvaluator, 'thisLearningRate': thisLearningRate, 'thisBeta1': thisBeta1, 'thisBeta2': thisBeta2}) if doPrint: print("OK") if doPrint: print("Model initialization... COMPLETE") #%%################################################################## # # # TRAINING # # # ##################################################################### print("") # We train each model separately for thisModel in modelsTNN.keys(): if doPrint: print("Training model %s..." % thisModel) # Remember that modelsTNN.keys() has the split numbering as well as the # name, while modelList has only the name. So we need to map the # specific model for this specific split with the actual model name, # since there are several variables that are indexed by the model name # (for instance, the training options, or the dictionaries saving the # loss values) for m in modelList: if m in thisModel: modelName = m # Identify the specific split number at training time if nDataSplits > 1: trainingOptsPerModel[modelName]['graphNo'] = split # Train the model thisTrainVars = modelsTNN[thisModel].train(data, nEpochs, batchSize, **trainingOptsPerModel[modelName]) # Find which model to save the results (when having multiple # realizations) lossTrain[modelName][split,rel,:] = thisTrainVars['lossTrain'] costTrain[modelName][split,rel,:] = thisTrainVars['costTrain'] lossValid[modelName][split,rel,:] = thisTrainVars['lossValid'] costValid[modelName][split,rel,:] = thisTrainVars['costValid'] # Store Best RMSE costBest[modelName][split,rel]= min(costValid[modelName][split,rel,:]) # And we also need to save 'nBatches' but is the same for all models, so if doFigs: nBatches = thisTrainVars['nBatches'] #Computes and Saves Best Performances as a DataFrame meancostBest = {} for thisModel in modelList: mask = np.logical_or(costBest[thisModel] == costBest[thisModel].max(keepdims = 1), costBest[thisModel] == costBest[thisModel].min(keepdims = 1)) tmp = ma.masked_array(costBest[thisModel], mask = mask) meancostBest[thisModel] = {"Mean":np.mean(tmp),"Std":np.std(tmp)} pd.DataFrame(meancostBest).to_csv(saveDir+'/best_summary_'+str(nPoints)+'.csv') #%%################################################################## # # # PLOT # # # ##################################################################### # Finally, we might want to plot several quantities of interest if doFigs and doSaveVars: ################### # DATA PROCESSING # ################### #\\\ FIGURES DIRECTORY: saveDirFigs = os.path.join(saveDir,'figs') # If it doesn't exist, create it. if not os.path.exists(saveDirFigs): os.makedirs(saveDirFigs) #\\\ COMPUTE STATISTICS: # The first thing to do is to transform those into a matrix with all the # realizations, so create the variables to save that. meanLossTrain = {} meanCostTrain = {} meanLossValid = {} meanCostValid = {} stdDevLossTrain = {} stdDevCostTrain = {} stdDevLossValid = {} stdDevCostValid = {} # Initialize the variables for thisModel in modelList: # Transform into np.array lossTrain[thisModel] = np.array(lossTrain[thisModel]) costTrain[thisModel] = np.array(costTrain[thisModel]) lossValid[thisModel] = np.array(lossValid[thisModel]) costValid[thisModel] = np.array(costValid[thisModel]) # Each of one of these variables should be of shape # nDataSplits x nNoiseSplits x nEpochs # And compute the statistics meanLossTrain[thisModel] = np.mean(np.mean(lossTrain[thisModel], axis = 1), axis = 0) meanCostTrain[thisModel] = np.mean(np.mean(costTrain[thisModel], axis = 1), axis = 0) meanLossValid[thisModel] = np.mean(np.mean(lossValid[thisModel], axis = 1), axis = 0) meanCostValid[thisModel] = np.mean(np.mean(costValid[thisModel], axis = 1), axis = 0) stdDevLossTrain[thisModel] = np.std(np.mean(lossTrain[thisModel], axis = 1), axis = 0) stdDevCostTrain[thisModel] = np.std(np.mean(costTrain[thisModel], axis = 1), axis = 0) stdDevLossValid[thisModel] = np.std(np.mean(lossValid[thisModel], axis = 1), axis = 0) stdDevCostValid[thisModel] = np.std(np.mean(costValid[thisModel], axis = 1), axis = 0) #################### # SAVE FIGURE DATA # #################### # And finally, we can plot. But before, let's save the variables mean and # stdDev so, if we don't like the plot, we can re-open them, and re-plot # them, a piacere. # Pickle, first: varsPickle = {} varsPickle['nEpochs'] = nEpochs varsPickle['nBatches'] = nBatches varsPickle['meanLossTrain'] = meanLossTrain varsPickle['stdDevLossTrain'] = stdDevLossTrain varsPickle['meanCostTrain'] = meanCostTrain varsPickle['stdDevCostTrain'] = stdDevCostTrain varsPickle['meanLossValid'] = meanLossValid varsPickle['stdDevLossValid'] = stdDevLossValid varsPickle['meanCostValid'] = meanCostValid varsPickle['stdDevCostValid'] = stdDevCostValid with open(os.path.join(saveDirFigs,'figVars.pkl'), 'wb') as figVarsFile: pickle.dump(varsPickle, figVarsFile) ######## # PLOT # ######## # Compute the x-axis xTrain = np.arange(0, nEpochs * nBatches, xAxisMultiplierTrain) xValid = np.arange(0, nEpochs * nBatches, \ validationInterval*xAxisMultiplierValid) xTest = [pow(10, r) for r in np.linspace(-3, 0 , num = 5)] # If we do not want to plot all the elements (to avoid overcrowded plots) # we need to recompute the x axis and take those elements corresponding # to the training steps we want to plot if xAxisMultiplierTrain > 1: # Actual selected samples selectSamplesTrain = xTrain # Go and fetch tem for thisModel in modelList: meanLossTrain[thisModel] = meanLossTrain[thisModel]\ [selectSamplesTrain] stdDevLossTrain[thisModel] = stdDevLossTrain[thisModel]\ [selectSamplesTrain] meanCostTrain[thisModel] = meanCostTrain[thisModel]\ [selectSamplesTrain] stdDevCostTrain[thisModel] = stdDevCostTrain[thisModel]\ [selectSamplesTrain] # And same for the validation, if necessary. if xAxisMultiplierValid > 1: selectSamplesValid = np.arange(0, len(meanLossValid[thisModel]), \ xAxisMultiplierValid) for thisModel in modelList: meanLossValid[thisModel] = meanLossValid[thisModel]\ [selectSamplesValid] stdDevLossValid[thisModel] = stdDevLossValid[thisModel]\ [selectSamplesValid] meanCostValid[thisModel] = meanCostValid[thisModel]\ [selectSamplesValid] stdDevCostValid[thisModel] = stdDevCostValid[thisModel]\ [selectSamplesValid] #\\\ LOSS (Training and validation) for EACH MODEL for key in meanLossTrain.keys(): lossFig = plt.figure(figsize=(1.61*figSize, 1*figSize)) plt.errorbar(xTrain, meanLossTrain[key], yerr = stdDevLossTrain[key], color = '#01256E', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.errorbar(xValid, meanLossValid[key], yerr = stdDevLossValid[key], color = '#95001A', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'Loss') plt.xlabel(r'Training steps') plt.legend([r'Training', r'Validation']) plt.title(r'%s' % key) lossFig.savefig(os.path.join(saveDirFigs,'loss%s.pdf' % key), bbox_inches = 'tight') plot_lag = 1 # LOSS (training) for ALL MODELS allLossTrain = plt.figure(figsize=(1.61*figSize, 1*figSize)) for key in meanLossTrain.keys(): plt.errorbar(xTrain[plot_lag:], meanLossTrain[key][plot_lag:], yerr = stdDevLossTrain[key][plot_lag:], linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'Loss') plt.xlabel(r'Training steps') plt.legend(list(meanLossTrain.keys())) allLossTrain.savefig(os.path.join(saveDirFigs,'allLossTrain.pdf'), bbox_inches = 'tight') #\\\ RMSE (Training and validation) for EACH MODEL for key in meanCostTrain.keys(): costFig = plt.figure(figsize=(1.61*figSize, 1*figSize)) plt.errorbar(xTrain[plot_lag:], meanCostTrain[key][plot_lag:], yerr = stdDevCostTrain[key][plot_lag:], color = '#01256E', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.errorbar(xValid[plot_lag:], meanCostValid[key][plot_lag:], yerr = stdDevCostValid[key][plot_lag:], color = '#95001A', linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'RMSE') plt.xlabel(r'Training steps') plt.legend([r'Training', r'Validation']) plt.title(r'%s' % key) costFig.savefig(os.path.join(saveDirFigs,'cost%s.pdf' % key), bbox_inches = 'tight') # RMSE (validation) for ALL MODELS allCostValidFig = plt.figure(figsize=(1.61*figSize, 1*figSize)) for key in meanCostValid.keys(): plt.errorbar(xValid[plot_lag:], meanCostValid[key][plot_lag:], yerr = stdDevCostValid[key][plot_lag:], linewidth = lineWidth, marker = markerShape, markersize = markerSize) plt.ylabel(r'RMSE') plt.xlabel(r'Training steps') plt.legend(list(meanCostValid.keys())) allCostValidFig.savefig(os.path.join(saveDirFigs,'allCostValid.pdf'), bbox_inches = 'tight') # Finish measuring time endRunTime = datetime.datetime.now() totalRunTime = abs(endRunTime - startRunTime) totalRunTimeH = int(divmod(totalRunTime.total_seconds(), 3600)[0]) totalRunTimeM, totalRunTimeS = \ divmod(totalRunTime.total_seconds() - totalRunTimeH * 3600., 60) totalRunTimeM = int(totalRunTimeM) if doPrint: print(" ") print("Simulation started: %s" %startRunTime.strftime("%Y/%m/%d %H:%M:%S")) print("Simulation ended: %s" % endRunTime.strftime("%Y/%m/%d %H:%M:%S")) print("Total time: %dh %dm %.2fs" % (totalRunTimeH, totalRunTimeM, totalRunTimeS)) # And save this info into the .txt file as well with open(varsFile, 'a+') as file: file.write("Simulation started: %s\n" % startRunTime.strftime("%Y/%m/%d %H:%M:%S")) file.write("Simulation ended: %s\n" % endRunTime.strftime("%Y/%m/%d %H:%M:%S")) file.write("Total time: %dh %dm %.2fs" % (totalRunTimeH, totalRunTimeM, totalRunTimeS))
38,987
46.430657
177
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/__init__.py
0
0
0
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/modules/architecturesTime.py
# 2019/12/31~ # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu # Kate Tolstaya, eig@seas.upenn.edu """ architecturesTime.py Architectures module Definition of GNN architectures. The basic idea of these architectures is that the data comes in the form {(S_t, x_t)} where the shift operator as well as the signal change with time, and where each training point consists of a trajectory. Unlike architectures.py where the shift operator S is fixed (although it can be changed after the architectures has been initialized) and the training set consist of a set of {x_b} with b=1,...,B for a total of B samples, here the training set is assumed to be a trajectory, and to include a different shift operator for each sample {(S_t, x_t)_{t=1}^{T}}_{b=1,...,B}. Also, all implementations consider a unit delay exchange (i.e. the S_t and x_t values get delayed by one unit of time for each neighboring exchange). LocalGNN_DB: implements the selection GNN architecture by means of local operations only GraphRecurrentNN_DB: implements the GRNN architecture AggregationGNN_DB: implements the aggregation GNN architecture """ import numpy as np import torch import torch.nn as nn import alegnnss.utils.graphML as gml zeroTolerance = 1e-9 # Absolute values below this number are considered zero. class LocalGNN_DB(nn.Module): """ LocalGNN_DB: implement the local GNN architecture where all operations are implemented locally, i.e. by means of neighboring exchanges only. More specifically, it has graph convolutional layers, but the readout layer, instead of being an MLP for the entire graph signal, it is a linear combination of the features at each node. It considers signals that change in time with batch GSOs. Initialization: LocalGNN_DB(dimNodeSignals, nFilterTaps, bias, # Graph Filtering nonlinearity, # Nonlinearity dimReadout, # Local readout layer dimEdgeFeatures) # Structure Input: /** Graph convolutional layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nFilterTaps (list of int): number of filter taps on each layer (i.e. nFilterTaps-1 is the extent of neighborhoods that are reached, for example K=2 is info from the 1-hop neighbors) bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nFilterTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Readout layers **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers applied locally at each node (i.e. no exchange of information involved). /** Graph structure **/ dimEdgeFeatures (int): number of edge features Output: nn.Module with a Local GNN architecture with the above specified characteristics that considers time-varying batch GSO and delayed signals Forward call: LocalGNN_DB(x, S) Input: x (torch.tensor): input data of shape batchSize x timeSamples x dimFeatures x numberNodes GSO (torch.tensor): graph shift operator; shape batchSize x timeSamples (x dimEdgeFeatures) x numberNodes x numberNodes Output: y (torch.tensor): output data after being processed by the GNN; batchSize x timeSamples x dimReadout[-1] x numberNodes Other methods: y, yGNN = .splitForward(x, S): gives the output of the entire GNN y, which has shape batchSize x timeSamples x dimReadout[-1] x numberNodes, as well as the output of all the GNN layers (i.e. before the readout layers), yGNN of shape batchSize x timeSamples x dimFeatures[-1] x numberNodes. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. y = .singleNodeForward(x, S, nodes): outputs the value of the last layer at a single node. x is the usual input of shape batchSize x timeSamples x dimFeatures x numberNodes. nodes is either a single node (int) or a collection of nodes (list or numpy.array) of length batchSize, where for each element in the batch, we get the output at the single specified node. The output y is of shape batchSize x timeSamples x dimReadout[-1]. """ def __init__(self, # Graph filtering dimNodeSignals, nFilterTaps, bias, # Nonlinearity nonlinearity, # MLP in the end dimReadout, # Structure dimEdgeFeatures): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nFilterTaps) + 1 # Store the values (using the notation in the paper): self.L = len(nFilterTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nFilterTaps # Filter taps self.E = dimEdgeFeatures # Number of edge features self.bias = bias # Boolean # Store the rest of the variables self.sigma = nonlinearity self.dimReadout = dimReadout # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. gfl = [] # Graph Filtering Layers for l in range(self.L): #\\ Graph filtering stage: gfl.append(gml.GraphFilter_DB(self.F[l], self.F[l+1], self.K[l], self.E, self.bias)) #\\ Nonlinearity gfl.append(self.sigma()) # And now feed them into the sequential self.GFL = nn.Sequential(*gfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to readout anything # The first layer has to connect whatever was left of the graph # filtering stage to create the number of features required by # the readout layer fc.append(nn.Linear(self.F[-1], dimReadout[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done self.Readout = nn.Sequential(*fc) # so we finally have the architecture. def splitForward(self, x, S): # Check the dimensions of the input # S: B x T (x E) x N x N # x: B x T x F[0] x N assert len(S.shape) == 4 or len(S.shape) == 5 if len(S.shape) == 4: S = S.unsqueeze(2) B = S.shape[0] T = S.shape[1] assert S.shape[2] == self.E N = S.shape[3] assert S.shape[4] == N assert len(x.shape) == 4 assert x.shape[0] == B assert x.shape[1] == T assert x.shape[2] == self.F[0] assert x.shape[3] == N # Add the GSO at each layer for l in range(self.L): self.GFL[2*l].addGSO(S) # Let's call the graph filtering layer yGFL = self.GFL(x) # Change the order, for the readout y = yGFL.permute(0, 1, 3, 2) # B x T x N x F[-1] # And, feed it into the Readout layer y = self.Readout(y) # B x T x N x dimReadout[-1] # Reshape and return return y.permute(0, 1, 3, 2), yGFL # B x T x dimReadout[-1] x N, B x T x dimFeatures[-1] x N def forward(self, x, S): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x, S) return output def singleNodeForward(self, x, S, nodes): # x is of shape B x T x F[0] x N batchSize = x.shape[0] N = x.shape[3] # nodes is either an int, or a list/np.array of ints of size B assert type(nodes) is int \ or type(nodes) is list \ or type(nodes) is np.ndarray # Let us start by building the selection matrix # This selection matrix has to be a matrix of shape # B x 1 x N[-1] x 1 # so that when multiplying with the output of the forward, we get a # B x T x dimRedout[-1] x 1 # and we just squeeze the last dimension # TODO: The big question here is if multiplying by a matrix is faster # than doing torch.index_select # Let's always work with numpy arrays to make it easier. if type(nodes) is int: # Change the node number to accommodate the new order nodes = self.order.index(nodes) # If it's int, make it a list and an array nodes = np.array([nodes], dtype=np.int) # And repeat for the number of batches nodes = np.tile(nodes, batchSize) if type(nodes) is list: newNodes = [self.order.index(n) for n in nodes] nodes = np.array(newNodes, dtype = np.int) elif type(nodes) is np.ndarray: newNodes = np.array([np.where(np.array(self.order) == n)[0][0] \ for n in nodes]) nodes = newNodes.astype(np.int) # Now, nodes is an np.int np.ndarray with shape batchSize # Build the selection matrix selectionMatrix = np.zeros([batchSize, 1, N, 1]) selectionMatrix[np.arange(batchSize), nodes, 0] = 1. # And convert it to a tensor selectionMatrix = torch.tensor(selectionMatrix, dtype = x.dtype, device = x.device) # Now compute the output y = self.forward(x, S) # This output is of size B x T x dimReadout[-1] x N # Multiply the output y = torch.matmul(y, selectionMatrix) # B x T x dimReadout[-1] x 1 # Squeeze the last dimension and return return y.squeeze(3) class GraphRecurrentNN_DB(nn.Module): """ GraphRecurrentNN_DB: implements the GRNN architecture on a time-varying GSO batch and delayed signals. It is a single-layer GRNN and the hidden state is initialized at random drawing from a standard gaussian. Initialization: GraphRecurrentNN_DB(dimInputSignals, dimOutputSignals, dimHiddenSignals, nFilterTaps, bias, # Filtering nonlinearityHidden, nonlinearityOutput, nonlinearityReadout, # Nonlinearities dimReadout, # Local readout layer dimEdgeFeatures) # Structure Input: /** Graph convolutions **/ dimInputSignals (int): dimension of the input signals dimOutputSignals (int): dimension of the output signals dimHiddenSignals (int): dimension of the hidden state nFilterTaps (list of int): a list with two elements, the first one is the number of filter taps for the filters in the hidden state equation, the second one is the number of filter taps for the filters in the output bias (bool): include bias after graph filter on every layer /** Activation functions **/ nonlinearityHidden (torch.function): the nonlinearity to apply when computing the hidden state; it has to be a torch function, not a nn.Module nonlinearityOutput (torch.function): the nonlinearity to apply when computing the output signal; it has to be a torch function, not a nn.Module. nonlinearityReadout (nn.Module): the nonlinearity to apply at the end of the readout layer (if the readout layer has more than one layer); this one has to be a nn.Module, instead of just a torch function. /** Readout layer **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers applied locally at each node (i.e. no exchange of information involved). /** Graph structure **/ dimEdgeFeatures (int): number of edge features Output: nn.Module with a GRNN architecture with the above specified characteristics that considers time-varying batch GSO and delayed signals Forward call: GraphRecurrentNN_DB(x, S) Input: x (torch.tensor): input data of shape batchSize x timeSamples x dimInputSignals x numberNodes GSO (torch.tensor): graph shift operator; shape batchSize x timeSamples (x dimEdgeFeatures) x numberNodes x numberNodes Output: y (torch.tensor): output data after being processed by the GRNN; batchSize x timeSamples x dimReadout[-1] x numberNodes Other methods: y, yGNN = .splitForward(x, S): gives the output of the entire GRNN y, which has shape batchSize x timeSamples x dimReadout[-1] x numberNodes, as well as the output of the GRNN (i.e. before the readout layers), yGNN of shape batchSize x timeSamples x dimInputSignals x numberNodes. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. y = .singleNodeForward(x, S, nodes): outputs the value of the last layer at a single node. x is the usual input of shape batchSize x timeSamples x dimInputSignals x numberNodes. nodes is either a single node (int) or a collection of nodes (list or numpy.array) of length batchSize, where for each element in the batch, we get the output at the single specified node. The output y is of shape batchSize x timeSamples x dimReadout[-1]. """ def __init__(self, # Graph filtering dimInputSignals, dimOutputSignals, dimHiddenSignals, nFilterTaps, bias, # Nonlinearities nonlinearityHidden, nonlinearityOutput, nonlinearityReadout, # nn.Module # Local MLP in the end dimReadout, # Structure dimEdgeFeatures): # Initialize parent: super().__init__() # A list of two int, one for the number of filter taps (the computation # of the hidden state has the same number of filter taps) assert len(nFilterTaps) == 2 # Store the values (using the notation in the paper): self.F = dimInputSignals # Number of input features self.G = dimOutputSignals # Number of output features self.H = dimHiddenSignals # NUmber of hidden features self.K = nFilterTaps # Filter taps self.E = dimEdgeFeatures # Number of edge features self.bias = bias # Boolean # Store the rest of the variables self.sigma = nonlinearityHidden self.rho = nonlinearityOutput self.nonlinearityReadout = nonlinearityReadout self.dimReadout = dimReadout #\\\ Hidden State RNN \\\ # Create the layer that generates the hidden state, and generate z0 self.hiddenState = gml.HiddenState_DB(self.F, self.H, self.K[0], nonlinearity = self.sigma, E = self.E, bias = self.bias) #\\\ Output Graph Filters \\\ self.outputState = gml.GraphFilter_DB(self.H, self.G, self.K[1], E = self.E, bias = self.bias) #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to readout anything # The first layer has to connect whatever was left of the graph # filtering stage to create the number of features required by # the readout layer fc.append(nn.Linear(self.G, dimReadout[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.nonlinearityReadout()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done self.Readout = nn.Sequential(*fc) # so we finally have the architecture. def splitForward(self, x, S): # Check the dimensions of the input # S: B x T (x E) x N x N # x: B x T x F[0] x N assert len(S.shape) == 4 or len(S.shape) == 5 if len(S.shape) == 4: S = S.unsqueeze(2) B = S.shape[0] T = S.shape[1] assert S.shape[2] == self.E N = S.shape[3] assert S.shape[4] == N assert len(x.shape) == 4 assert x.shape[0] == B assert x.shape[1] == T assert x.shape[2] == self.F assert x.shape[3] == N # This can be generated here or generated outside of here, not clear yet # what's the most coherent option z0 = torch.randn((B, self.H, N), device = x.device) # Add the GSO for each graph filter self.hiddenState.addGSO(S) self.outputState.addGSO(S) # Compute the trajectory of hidden states z, _ = self.hiddenState(x, z0) # Compute the output trajectory from the hidden states yOut = self.outputState(z) yOut = self.rho(yOut) # Don't forget the nonlinearity! # B x T x G x N # Change the order, for the readout y = yOut.permute(0, 1, 3, 2) # B x T x N x G # And, feed it into the Readout layer y = self.Readout(y) # B x T x N x dimReadout[-1] # Reshape and return return y.permute(0, 1, 3, 2), yOut # B x T x dimReadout[-1] x N, B x T x dimFeatures[-1] x N def forward(self, x, S): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x, S) return output def singleNodeForward(self, x, S, nodes): # x is of shape B x T x F[0] x N batchSize = x.shape[0] N = x.shape[3] # nodes is either an int, or a list/np.array of ints of size B assert type(nodes) is int \ or type(nodes) is list \ or type(nodes) is np.ndarray # Let us start by building the selection matrix # This selection matrix has to be a matrix of shape # B x 1 x N[-1] x 1 # so that when multiplying with the output of the forward, we get a # B x T x dimRedout[-1] x 1 # and we just squeeze the last dimension # TODO: The big question here is if multiplying by a matrix is faster # than doing torch.index_select # Let's always work with numpy arrays to make it easier. if type(nodes) is int: # Change the node number to accommodate the new order nodes = self.order.index(nodes) # If it's int, make it a list and an array nodes = np.array([nodes], dtype=np.int) # And repeat for the number of batches nodes = np.tile(nodes, batchSize) if type(nodes) is list: newNodes = [self.order.index(n) for n in nodes] nodes = np.array(newNodes, dtype = np.int) elif type(nodes) is np.ndarray: newNodes = np.array([np.where(np.array(self.order) == n)[0][0] \ for n in nodes]) nodes = newNodes.astype(np.int) # Now, nodes is an np.int np.ndarray with shape batchSize # Build the selection matrix selectionMatrix = np.zeros([batchSize, 1, N, 1]) selectionMatrix[np.arange(batchSize), nodes, 0] = 1. # And convert it to a tensor selectionMatrix = torch.tensor(selectionMatrix, dtype = x.dtype, device = x.device) # Now compute the output y = self.forward(x, S) # This output is of size B x T x dimReadout[-1] x N # Multiply the output y = torch.matmul(y, selectionMatrix) # B x T x dimReadout[-1] x 1 # Squeeze the last dimension and return return y.squeeze(3) class AggregationGNN_DB(nn.Module): """ AggregationGNN_DB: implement the aggregation GNN architecture with delayed time structure and batch GSOs Initialization: Input: /** Regular convolutional layers **/ dimFeatures (list of int): number of features on each layer nFilterTaps (list of int): number of filter taps on each layer bias (bool): include bias after graph filter on every layer >> Obs.: dimFeatures[0] is the number of features (the dimension of the node signals) of the data, where dimFeatures[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimFeatures) = L+1. Slightly different, nFilterTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ poolingFunction (torch.nn): module from torch.nn pooling layers poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers after the filters have been applied /** Graph structure **/ dimEdgeFeatures (int): number of edge features nExchanges (int): maximum number of neighborhood exchanges Output: nn.Module with an Aggregation GNN architecture with the above specified characteristics. Forward call: Input: x (torch.tensor): input data of shape batchSize x timeSamples x dimFeatures x numberNodes GSO (torch.tensor): graph shift operator of shape batchSize x timeSamples (x dimEdgeFeatures) x numberNodes x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x x timeSamples x dimReadout[-1] x nNodes """ def __init__(self, # Graph filtering dimFeatures, nFilterTaps, bias, # Nonlinearity nonlinearity, # Pooling poolingFunction, poolingSize, # MLP in the end dimReadout, # Structure dimEdgeFeatures, nExchanges): super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimFeatures) == len(nFilterTaps) + 1 # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nFilterTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. # Store the values (using the notation in the paper): self.L = len(nFilterTaps) # Number of convolutional layers self.F = dimFeatures # Features self.K = nFilterTaps # Filter taps self.E = dimEdgeFeatures # Dimension of edge features self.bias = bias # Boolean self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize # This acts as both the kernel_size and the # stride, so there is no overlap on the elements over which we take # the maximum (this is how it works as default) self.dimReadout = dimReadout self.nExchanges = nExchanges # Number of exchanges # Let's also record the number of nodes on each layer (L+1, actually) self.N = [self.nExchanges+1] # If we have one exchange, then we have # two entries in the collected vector (the zeroth-exchange the # first exchange) for l in range(self.L): # In pyTorch, the convolution is a valid correlation, instead of a # full one, which means that the output is smaller than the input. # Precisely, this smaller (check documentation for nn.conv1d) outConvN = self.N[l] - (self.K[l] - 1) # Size of the conv output # The next equation to compute the number of nodes is obtained from # the maxPool1d help in the pytorch documentation self.N += [int( (outConvN - (self.alpha[l]-1) - 1)/self.alpha[l] + 1 )] # int() on a float always applies floor() # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. convl = [] # Convolutional Layers for l in range(self.L): #\\ Graph filtering stage: convl.append(nn.Conv1d(self.F[l]*self.E, self.F[l+1]*self.E, self.K[l], bias = self.bias)) #\\ Nonlinearity convl.append(self.sigma()) #\\ Pooling convl.append(self.rho(self.alpha[l])) # And now feed them into the sequential self.ConvLayers = nn.Sequential(*convl) # Convolutional layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputReadout = self.N[-1] * self.F[-1] * self.E # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputReadout,dimReadout[0],bias=self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done within each node self.Readout = nn.Sequential(*fc) def forward(self, x, S): # Check the dimensions of the input first # S: B x T (x E) x N x N # x: B x T x F[0] x N assert len(S.shape) == 4 or len(S.shape) == 5 if len(S.shape) == 4: # Then S is B x T x N x N S = S.unsqueeze(2) # And we want it B x T x 1 x N x N B = S.shape[0] T = S.shape[1] assert S.shape[2] == self.E N = S.shape[3] assert S.shape[4] == N # Check the dimensions of x assert len(x.shape) == 4 assert x.shape[0] == B assert x.shape[1] == T assert x.shape[2] == self.F[0] assert x.shape[3] == N # Now we need to do the exchange to build the aggregation vector at # every node # z has to be of shape: B x T x F[0] x (nExchanges+1) x N # to be fed into conv1d it has to be (B*T*N) x F[0] x (nExchanges+1) # This vector is built by multiplying x with S, so we need to adapt x # to have a dimension that can be multiplied by S (we need to add the # E dimension) x = x.reshape([B, T, 1, self.F[0], N]).repeat(1, 1, self.E, 1, 1) # The first element of z is, precisely, this element (no exchanges) z = x.reshape([B, T, 1, self.E, self.F[0], N]) # The new dimension is # the one that accumulates the nExchanges # Now we start with the exchanges (multiplying by S) for k in range(1, self.nExchanges+1): # Across dim = 1 (time) we need to "displace the dimension down", # i.e. where it used to be t = 1 we now need it to be t=0 and so # on. For t=0 we add a "row" of zeros. x, _ = torch.split(x, [T-1, 1], dim = 1) # The second part is the most recent time instant which we do # not need anymore (it's used only once for the first value of K) # Now, we need to add a "row" of zeros at the beginning (for t = 0) zeroRow = torch.zeros(B, 1, self.E, self.F[0], N, dtype=x.dtype,device=x.device) x = torch.cat((zeroRow, x), dim = 1) # And now we multiply with S x = torch.matmul(x, S) # Add the dimension along K xS = x.reshape(B, T, 1, self.E, self.F[0], N) # And concatenate it with z z = torch.cat((z, xS), dim = 2) # Now, we have finally built the vector of delayed aggregations. This # vector has shape B x T x (nExchanges+1) x E x F[0] x N # To get rid of the edge features (dim E) we just sum through that # dimension z = torch.sum(z, dim = 3) # B x T x (nExchanges+1) x F[0] x N # It is, essentially, a matrix of N x (nExchanges+1) for each feature, # for each time instant, for each batch. # NOTE1: This is inconsequential if self.E = 1 (most of the cases) # NOTE2: Alternatively, not to lose information, we could contatenate # dim E after dim F[0] to get E*F[0] features; this increases the # dimensionsonality of the data (which could be fine) but need to be # adapted so that the first input in the conv1d takes self.E*self.F[0] # features instead of just self.F[0] # The operation conv1d takes tensors of shape # batchSize x nFeatures x nEntries # This means that the convolution takes place along nEntries with # a summation along nFeatures, for each of the elements along # batchSize. So we need to put (nExchanges+1) last since it is along # those elements that we want the convolution to be performed, and # we need to put F[0] as nFeatures since there is where we want the # features to be combined. The other three dimensions are different # elements (agents, time, batch) to which the convolution needs to be # applied. # Therefore, we want a vector z of shape # (B*T*N) x F[0] x (nExchanges+1) # Let's get started with this reorganization # First, we join B*T*N. Because we always join the last dimensions, # we need to permute first to put B, T, N as the last dimensions. # z: B x T x (nExchanges+1) x F[0] x N z = z.permute(3, 2, 0, 1, 4) # F[0] x (nExchanges+1) x B x T x N z = z.reshape([self.F[0], self.nExchanges+1, B*T*N]) # F[0] x (nExchanges+1) x B*T*N # Second, we put it back at the beginning z = z.permute(2, 0, 1) # B*T*N x F[0] x (nExchanges+1) # Let's call the convolutional layers y = self.ConvLayers(z) # B*T*N x F[-1] x N[-1] # Flatten the output y = y.reshape([B*T*N, self.F[-1] * self.N[-1]]) # And, feed it into the per node readout layers y = self.Readout(y) # (B*T*N) x dimReadout[-1] # And now we have to unpack it back for every node, i.e. to get it # back to shape B x T x N x dimReadout[-1] y = y.permute(1, 0) # dimReadout[-1] x (B*T*N) y = y.reshape(self.dimReadout[-1], B, T, N) # And finally put it back to the usual B x T x F x N y = y.permute(1, 2, 0, 3) return y def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device)
36,148
45.167305
80
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/modules/loss.py
# 2021/03/04~ # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu """ loss.py Loss functions adaptExtraDimensionLoss: wrapper that handles extra dimensions F1Score: loss function corresponding to 1 - F1 score """ import torch import torch.nn as nn # An arbitrary loss function handling penalties needs to have the following # conditions # .penaltyList attribute listing the names of the penalties # .nPenalties attibute is an int with the number of penalties # Forward function has to output the actual loss, the main loss (with no # penalties), and a dictionary with the value of each of the penalties. # This will be standard procedure for all loss functions that have penalties. # Note: The existence of a penalty will be signaled by an attribute in the model class MSE_semisup(nn.Module): """ MSE_semisup: function that implemente a masked MSE loss Initialization: Input: idx: the mask is passed as a vector of indices Forward: Input: estimate (torch.tensor): output of the GNN target (torch.tensor): target representation """ def __init__(self, idx = None, mult = None, SS = None): super(MSE_semisup, self).__init__() self.idx = idx self.mult = mult self.SS = SS def forward(self, output, target): if output.shape != target.shape: target = target.flatten() if not self.idx == None: gat1 = torch.gather(output,1,self.idx) gat2 = torch.gather(target,1,self.idx) else: gat1 = output gat2 = target N = max(gat2.shape) delta = gat1 - gat2 loss_ = torch.sum(delta**2)/N if self.mult != None: pen = self.mult*torch.trace(torch.matmul(torch.matmul(output,self.SS),output.t())) else: pen = 0 return loss_ + pen class adaptExtraDimensionLoss(nn.modules.loss._Loss): """ adaptExtraDimensionLoss: wrapper that handles extra dimensions Some loss functions take vectors as inputs while others take scalars; if we input a one-dimensional vector instead of a scalar, although virtually the same, the loss function could complain. The output of the GNNs is, by default, a vector. And sometimes we want it to still be a vector (i.e. crossEntropyLoss where we output a one-hot vector) and sometimes we want it to be treated as a scalar (i.e. MSELoss). Since we still have a single training function to train multiple models, we do not know whether we will have a scalar or a vector. So this wrapper adapts the input to the loss function seamlessly. Eventually, more loss functions could be added to the code below to better handle their dimensions. Initialization: Input: lossFunction (torch.nn loss function): desired loss function arguments: arguments required to initialize the loss function >> Obs.: The loss function gets initialized as well Forward: Input: estimate (torch.tensor): output of the GNN target (torch.tensor): target representation """ # When we want to compare scalars, we will have a B x 1 output of the GNN, # since the number of features is always there. However, most of the scalar # comparative functions take just a B vector, so we have an extra 1 dim # that raises a warning. This container will simply get rid of it. # This allows to change loss from crossEntropy (class based, expecting # B x C input) to MSE or SmoothL1Loss (expecting B input) def __init__(self, lossFunction, *args): # The second argument is optional and it is if there are any extra # arguments with which we want to initialize the loss super().__init__() if len(args) > 0: self.loss = lossFunction(*args) # Initialize loss function else: self.loss = lossFunction() def forward(self, estimate, target): # What we're doing here is checking what kind of loss it is and # what kind of reshape we have to do on the estimate if 'CrossEntropyLoss' in repr(self.loss): # This is supposed to be a one-hot vector batchSize x nClasses assert len(estimate.shape) == 2 elif 'SmoothL1Loss' in repr(self.loss) \ or 'MSELoss' in repr(self.loss) \ or 'L1Loss' in repr(self.loss): # In this case, the estimate has to be a batchSize tensor, so if # it has two dimensions, the second dimension has to be 1 if len(estimate.shape) == 2: assert estimate.shape[1] == 1 estimate = estimate.squeeze(1) assert len(estimate.shape) == 1 return self.loss(estimate, target) def F1Score(yHat, y): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/03/04 dimensions = len(yHat.shape) C = yHat.shape[dimensions-2] N = yHat.shape[dimensions-1] yHat = yHat.reshape((-1,C,N)) yHat = torch.nn.functional.log_softmax(yHat, dim=1) yHat = torch.exp(yHat) yHat = yHat[:,1,:] y = y.reshape((-1,N)) tp = torch.sum(y*yHat,1) #tn = torch.sum((1-y)*(1-yHat),1) fp = torch.sum((1-y)*yHat,1) fn = torch.sum(y*(1-yHat),1) p = tp / (tp + fp) r = tp / (tp + fn) idx_p = p!=p idx_tp = tp==0 idx_p1 = idx_p*idx_tp p[idx_p] = 0 p[idx_p1] = 1 idx_r = r!=r idx_r1 = idx_r*idx_tp r[idx_r] = 0 r[idx_r1] = 1 f1 = 2*p*r / (p+r) f1[f1!=f1] = 0 return 1 - torch.mean(f1)
6,033
36.478261
102
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/modules/training.py
# 2020/02/25~ # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu """ training.py Training Module Trainer classes Trainer: general trainer that just computes a loss over a training set and runs an evaluation on a validation test TrainerSingleNode: trainer class that computes a loss over the training set and runs an evaluation on a validation set, but assuming that the architectures involved have a single node forward structure and that the data involved has a method for identifying the target nodes TrainerFlocking: traininer class that computes a loss over the training set, suited for the problem of flocking (i.e. it involves specific uses of the data, like computing trajectories or using DAGger) """ import torch import numpy as np import os import pickle import datetime from alegnnss.utils.dataTools import invertTensorEW class Trainer: """ Trainer: general trainer that just computes a loss over a training set and runs an evaluation on a validation test Initialization: model (Modules.model class): model to train data (Utils.data class): needs to have a getSamples and an evaluate method nEpochs (int): number of epochs (passes over the dataset) batchSize (int): size of each minibatch Optional (keyword) arguments: validationInterval (int): interval of training (number of training steps) without running a validation stage. learningRateDecayRate (float): float that multiplies the latest learning rate used. learningRateDecayPeriod (int): how many training steps before multiplying the learning rate decay rate by the actual learning rate. > Obs.: Both of these have to be defined for the learningRateDecay scheduler to be activated. logger (Visualizer): save tensorboard logs. saveDir (string): path to the directory where to save relevant training variables. printInterval (int): how many training steps after which to print partial results (0 means do not print) graphNo (int): keep track of what graph realization this is realitizationNo (int): keep track of what data realization this is >> Alternatively, these last two keyword arguments can be used to keep track of different trainings of the same model Training: .train(): trains the model and returns trainVars dict with the keys 'nEpochs': number of epochs (int) 'nBatches': number of batches (int) 'validationInterval': number of training steps in between validation steps (int) 'batchSize': batch size of each training step (np.array) 'batchIndex': indices for the start sample and end sample of each batch (np.array) 'lossTrain': loss function on the training samples for each training step (np.array) 'evalTrain': evaluation function on the training samples for each training step (np.array) 'lossValid': loss function on the validation samples for each validation step (np.array) 'evalValid': evaluation function on the validation samples for each validation step (np.array) """ def __init__(self, model, data, nEpochs, batchSize, **kwargs): #\\\ Store model self.model = model self.data = data #################################### # ARGUMENTS (Store chosen options) # #################################### # Training Options: if 'doLogging' in kwargs.keys(): doLogging = kwargs['doLogging'] else: doLogging = False if 'doSaveVars' in kwargs.keys(): doSaveVars = kwargs['doSaveVars'] else: doSaveVars = True if 'printInterval' in kwargs.keys(): printInterval = kwargs['printInterval'] if printInterval > 0: doPrint = True else: doPrint = False else: doPrint = True printInterval = (data.nTrain//batchSize)//5 if 'learningRateDecayRate' in kwargs.keys() and \ 'learningRateDecayPeriod' in kwargs.keys(): doLearningRateDecay = True learningRateDecayRate = kwargs['learningRateDecayRate'] learningRateDecayPeriod = kwargs['learningRateDecayPeriod'] else: doLearningRateDecay = False if 'validationInterval' in kwargs.keys(): validationInterval = kwargs['validationInterval'] else: validationInterval = data.nTrain//batchSize if 'earlyStoppingLag' in kwargs.keys(): doEarlyStopping = True earlyStoppingLag = kwargs['earlyStoppingLag'] else: doEarlyStopping = False earlyStoppingLag = 0 if 'graphNo' in kwargs.keys(): graphNo = kwargs['graphNo'] else: graphNo = -1 if 'realizationNo' in kwargs.keys(): if 'graphNo' in kwargs.keys(): realizationNo = kwargs['realizationNo'] else: graphNo = kwargs['realizationNo'] realizationNo = -1 else: realizationNo = -1 if doLogging: from alegnnss.utils.visualTools import Visualizer logsTB = os.path.join(self.saveDir, self.name + '-logsTB') logger = Visualizer(logsTB, name='visualResults') else: logger = None # No training case: if nEpochs == 0: doSaveVars = False doLogging = False # If there's no training happening, there's nothing to report about # training losses and stuff. ########################################### # DATA INPUT (pick up on data parameters) # ########################################### nTrain = data.nTrain # size of the training set # Number of batches: If the desired number of batches does not split the # dataset evenly, we reduce the size of the last batch (the number of # samples in the last batch). # The variable batchSize is a list of length nBatches (number of # batches), where each element of the list is a number indicating the # size of the corresponding batch. if nTrain < batchSize: nBatches = 1 batchSize = [nTrain] elif nTrain % batchSize != 0: nBatches = np.ceil(nTrain/batchSize).astype(np.int64) batchSize = [batchSize] * nBatches # If the sum of all batches so far is not the total number of # graphs, start taking away samples from the last batch (remember # that we used ceiling, so we are overshooting with the estimated # number of batches) while sum(batchSize) != nTrain: batchSize[-1] -= 1 # If they fit evenly, then just do so. else: nBatches = np.int(nTrain/batchSize) batchSize = [batchSize] * nBatches # batchIndex is used to determine the first and last element of each # batch. # If batchSize is, for example [20,20,20] meaning that there are three # batches of size 20 each, then cumsum will give [20,40,60] which # determines the last index of each batch: up to 20, from 20 to 40, and # from 40 to 60. We add the 0 at the beginning so that # batchIndex[b]:batchIndex[b+1] gives the right samples for batch b. batchIndex = np.cumsum(batchSize).tolist() batchIndex = [0] + batchIndex ################### # SAVE ATTRIBUTES # ################### self.trainingOptions = {} self.trainingOptions['doLogging'] = doLogging self.trainingOptions['logger'] = logger self.trainingOptions['doSaveVars'] = doSaveVars self.trainingOptions['doPrint'] = doPrint self.trainingOptions['printInterval'] = printInterval self.trainingOptions['doLearningRateDecay'] = doLearningRateDecay if doLearningRateDecay: self.trainingOptions['learningRateDecayRate'] = \ learningRateDecayRate self.trainingOptions['learningRateDecayPeriod'] = \ learningRateDecayPeriod self.trainingOptions['validationInterval'] = validationInterval self.trainingOptions['doEarlyStopping'] = doEarlyStopping self.trainingOptions['earlyStoppingLag'] = earlyStoppingLag self.trainingOptions['batchIndex'] = batchIndex self.trainingOptions['batchSize'] = batchSize self.trainingOptions['nEpochs'] = nEpochs self.trainingOptions['nBatches'] = nBatches self.trainingOptions['graphNo'] = graphNo self.trainingOptions['realizationNo'] = realizationNo def trainBatch(self, thisBatchIndices): # Get the samples xTrain, yTrain = self.data.getSamples('train', thisBatchIndices) xTrain = xTrain.to(self.model.device) yTrain = yTrain.to(self.model.device) # Start measuring time startTime = datetime.datetime.now() # Reset gradients self.model.archit.zero_grad() # Obtain the output of the GNN yHatTrain = self.model.archit(xTrain) # Compute loss lossValueTrain = self.model.loss(yHatTrain, yTrain) # Compute gradients lossValueTrain.backward() # Optimize self.model.optim.step() # Finish measuring time endTime = datetime.datetime.now() timeElapsed = abs(endTime - startTime).total_seconds() # Compute the accuracy # Note: Using yHatTrain.data creates a new tensor with the # same value, but detaches it from the gradient, so that no # gradient operation is taken into account here. # (Alternatively, we could use a with torch.no_grad():) costTrain = self.data.evaluate(yHatTrain.data, yTrain) return lossValueTrain.item(), costTrain.item(), timeElapsed def validationStep(self): # Validation: xValid, yValid = self.data.getSamples('valid') xValid = xValid.to(self.model.device) yValid = yValid.to(self.model.device) # Start measuring time startTime = datetime.datetime.now() # Under torch.no_grad() so that the computations carried out # to obtain the validation accuracy are not taken into # account to update the learnable parameters. with torch.no_grad(): # Obtain the output of the GNN yHatValid = self.model.archit(xValid) # Compute loss lossValueValid = self.model.loss(yHatValid, yValid) # Finish measuring time endTime = datetime.datetime.now() timeElapsed = abs(endTime - startTime).total_seconds() # Compute accuracy: costValid = self.data.evaluate(yHatValid, yValid) return lossValueValid.item(), costValid.item(), timeElapsed def train(self): # Get back the training options assert 'trainingOptions' in dir(self) assert 'doLogging' in self.trainingOptions.keys() doLogging = self.trainingOptions['doLogging'] assert 'logger' in self.trainingOptions.keys() logger = self.trainingOptions['logger'] assert 'doSaveVars' in self.trainingOptions.keys() doSaveVars = self.trainingOptions['doSaveVars'] assert 'doPrint' in self.trainingOptions.keys() doPrint = self.trainingOptions['doPrint'] assert 'printInterval' in self.trainingOptions.keys() printInterval = self.trainingOptions['printInterval'] assert 'doLearningRateDecay' in self.trainingOptions.keys() doLearningRateDecay = self.trainingOptions['doLearningRateDecay'] if doLearningRateDecay: assert 'learningRateDecayRate' in self.trainingOptions.keys() learningRateDecayRate=self.trainingOptions['learningRateDecayRate'] assert 'learningRateDecayPeriod' in self.trainingOptions.keys() learningRateDecayPeriod=self.trainingOptions['learningRateDecayPeriod'] assert 'validationInterval' in self.trainingOptions.keys() validationInterval = self.trainingOptions['validationInterval'] assert 'doEarlyStopping' in self.trainingOptions.keys() doEarlyStopping = self.trainingOptions['doEarlyStopping'] assert 'earlyStoppingLag' in self.trainingOptions.keys() earlyStoppingLag = self.trainingOptions['earlyStoppingLag'] assert 'batchIndex' in self.trainingOptions.keys() batchIndex = self.trainingOptions['batchIndex'] assert 'batchSize' in self.trainingOptions.keys() batchSize = self.trainingOptions['batchSize'] assert 'nEpochs' in self.trainingOptions.keys() nEpochs = self.trainingOptions['nEpochs'] assert 'nBatches' in self.trainingOptions.keys() nBatches = self.trainingOptions['nBatches'] assert 'graphNo' in self.trainingOptions.keys() graphNo = self.trainingOptions['graphNo'] assert 'realizationNo' in self.trainingOptions.keys() realizationNo = self.trainingOptions['realizationNo'] # Learning rate scheduler: if doLearningRateDecay: learningRateScheduler = torch.optim.lr_scheduler.StepLR( self.model.optim,learningRateDecayPeriod,learningRateDecayRate) # Initialize counters (since we give the possibility of early stopping, # we had to drop the 'for' and use a 'while' instead): epoch = 0 # epoch counter lagCount = 0 # lag counter for early stopping # Store the training variables lossTrain = [] costTrain = [] lossValid = [] costValid = [] timeTrain = [] timeValid = [] while epoch < nEpochs \ and (lagCount < earlyStoppingLag or (not doEarlyStopping)): # The condition will be zero (stop), whenever one of the items of # the 'and' is zero. Therefore, we want this to stop only for epoch # counting when we are NOT doing early stopping. This can be # achieved if the second element of the 'and' is always 1 (so that # the first element, the epoch counting, decides). In order to # force the second element to be one whenever there is not early # stopping, we have an or, and force it to one. So, when we are not # doing early stopping, the variable 'not doEarlyStopping' is 1, # and the result of the 'or' is 1 regardless of the lagCount. When # we do early stopping, then the variable 'not doEarlyStopping' is # 0, and the value 1 for the 'or' gate is determined by the lag # count. # ALTERNATIVELY, we could just keep 'and lagCount<earlyStoppingLag' # and be sure that lagCount can only be increased whenever # doEarlyStopping is True. But I somehow figured out that would be # harder to maintain (more parts of the code to check if we are # accidentally increasing lagCount). # Randomize dataset for each epoch randomPermutation = np.random.permutation(self.data.nTrain) # Convert a numpy.array of numpy.int into a list of actual int. idxEpoch = [int(i) for i in randomPermutation] # Learning decay if doLearningRateDecay: learningRateScheduler.step() if doPrint: # All the optimization have the same learning rate, so just # print one of them # TODO: Actually, they might be different, so I will need to # print all of them. print("Epoch %d, learning rate = %.8f" % (epoch+1, learningRateScheduler.optim.param_groups[0]['lr'])) # Initialize counter batch = 0 # batch counter while batch < nBatches \ and (lagCount<earlyStoppingLag or (not doEarlyStopping)): # Extract the adequate batch thisBatchIndices = [0] #idxEpoch[batchIndex[batch] #: batchIndex[batch+1]] lossValueTrain, costValueTrain, timeElapsed = \ self.trainBatch(thisBatchIndices) # Logging values if doLogging: lossTrainTB = lossValueTrain costTrainTB = costValueTrain # Save values lossTrain += [lossValueTrain] costTrain += [costValueTrain] timeTrain += [timeElapsed] # Print: if doPrint: if (epoch * nBatches + batch) % printInterval == 0: print("\t(E: %2d, B: %3d) %6.4f / %7.4f - %6.4fs" % ( epoch+1, batch+1, costValueTrain, lossValueTrain, timeElapsed), end = ' ') if graphNo > -1: print("[%d" % graphNo, end = '') if realizationNo > -1: print("/%d" % realizationNo, end = '') print("]", end = '') print("") #\\\\\\\ #\\\ TB LOGGING (for each batch) #\\\\\\\ if doLogging: logger.scalar_summary(mode = 'Training', epoch = epoch * nBatches + batch, **{'lossTrain': lossTrainTB, 'costTrain': costTrainTB}) #\\\\\\\ #\\\ VALIDATION #\\\\\\\ if (epoch * nBatches + batch) % validationInterval == 0: lossValueValid, costValueValid, timeElapsed = \ self.validationStep() # Logging values if doLogging: lossValidTB = lossValueValid costValidTB = costValueValid # Save values lossValid += [lossValueValid] costValid += [costValueValid] timeValid += [timeElapsed] # Print: if doPrint: print("\t(E: %2d, B: %3d) %6.4f / %7.4f - %6.4fs" % ( epoch+1, batch+1, costValueValid, lossValueValid, timeElapsed), end = ' ') print("[VALIDATION", end = '') if graphNo > -1: print(".%d" % graphNo, end = '') if realizationNo > -1: print("/%d" % realizationNo, end = '') print(" (%s)]" % self.model.name) if doLogging: logger.scalar_summary(mode = 'Validation', epoch = epoch * nBatches + batch, **{'lossValid': lossValidTB, 'costValid': costValidTB}) # No previous best option, so let's record the first trial # as the best option if epoch == 0 and batch == 0: bestScore = costValueValid bestEpoch, bestBatch = epoch, batch # Save this model as the best (so far) self.model.save(label = 'Best') # Start the counter if doEarlyStopping: initialBest = True else: thisValidScore = costValueValid if thisValidScore < bestScore: bestScore = thisValidScore bestEpoch, bestBatch = epoch, batch if doPrint: print("\t=> New best achieved: %.4f" % \ (bestScore)) self.model.save(label = 'Best') # Now that we have found a best that is not the # initial one, we can start counting the lag (if # needed) initialBest = False # If we achieved a new best, then we need to reset # the lag count. if doEarlyStopping: lagCount = 0 # If we didn't achieve a new best, increase the lag # count. # Unless it was the initial best, in which case we # haven't found any best yet, so we shouldn't be doing # the early stopping count. elif doEarlyStopping and not initialBest: lagCount += 1 #\\\\\\\ #\\\ END OF BATCH: #\\\\\\\ #\\\ Increase batch count: batch += 1 #\\\\\\\ #\\\ END OF EPOCH: #\\\\\\\ #\\\ Increase epoch count: epoch += 1 #\\\ Save models: self.model.save(label = 'Last') ################# # TRAINING OVER # ################# # We convert the lists into np.arrays lossTrain = np.array(lossTrain) costTrain = np.array(costTrain) lossValid = np.array(lossValid) costValid = np.array(costValid) # And we would like to save all the relevant information from # training trainVars = {'nEpochs': nEpochs, 'nBatches': nBatches, 'validationInterval': validationInterval, 'batchSize': np.array(batchSize), 'batchIndex': np.array(batchIndex), 'lossTrain': lossTrain, 'costTrain': costTrain, 'lossValid': lossValid, 'costValid': costValid } if doSaveVars: saveDirVars = os.path.join(self.model.saveDir, 'trainVars') if not os.path.exists(saveDirVars): os.makedirs(saveDirVars) pathToFile = os.path.join(saveDirVars, self.model.name + 'trainVars.pkl') with open(pathToFile, 'wb') as trainVarsFile: pickle.dump(trainVars, trainVarsFile) # Now, if we didn't do any training (i.e. nEpochs = 0), then the last is # also the best. if nEpochs == 0: self.model.save(label = 'Best') self.model.save(label = 'Last') if doPrint: print("WARNING: No training. Best and Last models are the same.") # After training is done, reload best model before proceeding to # evaluation: self.model.load(label = 'Best') #\\\ Print out best: if doPrint and nEpochs > 0: print("=> Best validation achieved (E: %d, B: %d): %.4f" % ( bestEpoch + 1, bestBatch + 1, bestScore)) return trainVars class TrainerSingleNode(Trainer): """ TrainerSingleNode: trainer class that computes a loss over the training set and runs an evaluation on a validation set, but assuming that the architectures involved have a single node forward structure and that the data involved has a method for identifying the target nodes Initialization: model (Modules.model class): model to train data (Utils.data class): needs to have a getSamples and an evaluate method nEpochs (int): number of epochs (passes over the dataset) batchSize (int): size of each minibatch Optional (keyword) arguments: validationInterval (int): interval of training (number of training steps) without running a validation stage. learningRateDecayRate (float): float that multiplies the latest learning rate used. learningRateDecayPeriod (int): how many training steps before multiplying the learning rate decay rate by the actual learning rate. > Obs.: Both of these have to be defined for the learningRateDecay scheduler to be activated. logger (Visualizer): save tensorboard logs. saveDir (string): path to the directory where to save relevant training variables. printInterval (int): how many training steps after which to print partial results (0 means do not print) graphNo (int): keep track of what graph realization this is realitizationNo (int): keep track of what data realization this is >> Alternatively, these last two keyword arguments can be used to keep track of different trainings of the same model Training: .train(): trains the model and returns trainVars dict with the keys 'nEpochs': number of epochs (int) 'nBatches': number of batches (int) 'validationInterval': number of training steps in between validation steps (int) 'batchSize': batch size of each training step (np.array) 'batchIndex': indices for the start sample and end sample of each batch (np.array) 'lossTrain': loss function on the training samples for each training step (np.array) 'evalTrain': evaluation function on the training samples for each training step (np.array) 'lossValid': loss function on the validation samples for each validation step (np.array) 'evalValid': evaluation function on the validation samples for each validation step (np.array) """ def __init__(self, model, data, nEpochs, batchSize, **kwargs): assert 'singleNodeForward' in dir(model.archit) assert 'getLabelID' in dir(data) # Initialize supraclass super().__init__(model, data, nEpochs, batchSize, **kwargs) def trainBatch(self, thisBatchIndices): # Get the samples xTrain, yTrain = self.data.getSamples('train', thisBatchIndices) xTrain = xTrain.to(self.model.device) yTrain = yTrain.to(self.model.device) targetIDs = self.data.getLabelID('train', thisBatchIndices) # Start measuring time startTime = datetime.datetime.now() # Reset gradients self.model.archit.zero_grad() # Obtain the output of the GNN yHatTrain = self.model.archit.singleNodeForward(xTrain, targetIDs) # Compute loss lossValueTrain = self.model.loss(yHatTrain, yTrain) # Compute gradients lossValueTrain.backward() # Optimize self.model.optim.step() # Finish measuring time endTime = datetime.datetime.now() timeElapsed = abs(endTime - startTime).total_seconds() # Compute the accuracy # Note: Using yHatTrain.data creates a new tensor with the # same value, but detaches it from the gradient, so that no # gradient operation is taken into account here. # (Alternatively, we could use a with torch.no_grad():) costTrain = self.data.evaluate(yHatTrain.data, yTrain) return lossValueTrain.item(), costTrain.item(), timeElapsed def validationStep(self): # Validation: xValid, yValid = self.data.getSamples('valid') xValid = xValid.to(self.model.device) yValid = yValid.to(self.model.device) targetIDs = self.data.getLabelID('valid') # Start measuring time startTime = datetime.datetime.now() # Under torch.no_grad() so that the computations carried out # to obtain the validation accuracy are not taken into # account to update the learnable parameters. with torch.no_grad(): # Obtain the output of the GNN yHatValid = self.model.archit.singleNodeForward(xValid, targetIDs) # Compute loss lossValueValid = self.model.loss(yHatValid, yValid) # Finish measuring time endTime = datetime.datetime.now() timeElapsed = abs(endTime - startTime).total_seconds() # Compute accuracy: costValid = self.data.evaluate(yHatValid, yValid) return lossValueValid.item(), costValid.item(), timeElapsed class TrainerFlocking(Trainer): """ Trainer: trains flocking models, following the appropriate evaluation of the cost, and has options for different DAGger alternatives Initialization: model (Modules.model class): model to train data (Utils.data class): needs to have a getSamples and an evaluate method nEpochs (int): number of epochs (passes over the dataset) batchSize (int): size of each minibatch Optional (keyword) arguments: probExpert (float): initial probability of choosing the expert DAGgerType ('fixedBatch', 'randomEpoch', 'replaceTimeBatch'): 'fixedBatch' (default if 'probExpert' is defined): doubles the batch samples by considering the same initial velocities and positions, a trajectory given by the latest trained architecture, and the corresponding correction given by the optimal acceleration (i.e. for each position and velocity we give what would be the optimal acceleration, even though the next position and velocity won't reflect this decision, but the one taken by the learned policy) 'randomEpoch': forms a new training set for each epoch consisting, with probability probExpert, of samples of the original dataset (optimal trajectories) and with probability 1-probExpert, with trajectories following the latest trained dataset. 'replaceTimeBatch': creates a fixed number of new trajectories following randomly at each time step either the optimal control or the learned control; then, replaces this fixed number of new trajectores into the training set (then these might, or might not get selected by the next batch) validationInterval (int): interval of training (number of training steps) without running a validation stage. learningRateDecayRate (float): float that multiplies the latest learning rate used. learningRateDecayPeriod (int): how many training steps before multiplying the learning rate decay rate by the actual learning rate. > Obs.: Both of these have to be defined for the learningRateDecay scheduler to be activated. logger (Visualizer): save tensorboard logs. saveDir (string): path to the directory where to save relevant training variables. printInterval (int): how many training steps after which to print partial results (0 means do not print) graphNo (int): keep track of what graph realization this is realitizationNo (int): keep track of what data realization this is >> Alternatively, these last two keyword arguments can be used to keep track of different trainings of the same model Training: .train(): trains the model and returns trainVars dict with the keys 'nEpochs': number of epochs (int) 'nBatches': number of batches (int) 'validationInterval': number of training steps in between validation steps (int) 'batchSize': batch size of each training step (np.array) 'batchIndex': indices for the start sample and end sample of each batch (np.array) 'bestBatch': batch index at which the best model was achieved (int) 'bestEpoch': epoch at which the best model was achieved (int) 'bestScore': evaluation measure on the validation sample that achieved the best model (i.e. minimum achieved evaluation measure on the validation set) 'lossTrain': loss function on the training samples for each training step (np.array) 'timeTrain': time elapsed at each training step (np.array) 'evalValid': evaluation function on the validation samples for each validation step (np.array) 'timeValid': time elapsed at each validation step (np.array) """ def __init__(self, model, data, nEpochs, batchSize, **kwargs): # Initialize supraclass super().__init__(model, data, nEpochs, batchSize, **kwargs) # Add the specific options if 'probExpert' in kwargs.keys(): doDAGger = True probExpert = kwargs['probExpert'] else: doDAGger = False if 'DAGgerType' in kwargs.keys(): DAGgerType = kwargs['DAGgerType'] else: DAGgerType = 'fixedBatch' self.trainingOptions['doDAGger'] = doDAGger if doDAGger: self.trainingOptions['probExpert'] = probExpert self.trainingOptions['DAGgerType'] = DAGgerType def train(self): # Get back the training options assert 'trainingOptions' in dir(self) assert 'doLogging' in self.trainingOptions.keys() doLogging = self.trainingOptions['doLogging'] assert 'logger' in self.trainingOptions.keys() logger = self.trainingOptions['logger'] assert 'doSaveVars' in self.trainingOptions.keys() doSaveVars = self.trainingOptions['doSaveVars'] assert 'doPrint' in self.trainingOptions.keys() doPrint = self.trainingOptions['doPrint'] assert 'printInterval' in self.trainingOptions.keys() printInterval = self.trainingOptions['printInterval'] assert 'doLearningRateDecay' in self.trainingOptions.keys() doLearningRateDecay = self.trainingOptions['doLearningRateDecay'] if doLearningRateDecay: assert 'learningRateDecayRate' in self.trainingOptions.keys() learningRateDecayRate=self.trainingOptions['learningRateDecayRate'] assert 'learningRateDecayPeriod' in self.trainingOptions.keys() learningRateDecayPeriod=self.trainingOptions['learningRateDecayPeriod'] assert 'validationInterval' in self.trainingOptions.keys() validationInterval = self.trainingOptions['validationInterval'] assert 'doEarlyStopping' in self.trainingOptions.keys() doEarlyStopping = self.trainingOptions['doEarlyStopping'] assert 'earlyStoppingLag' in self.trainingOptions.keys() earlyStoppingLag = self.trainingOptions['earlyStoppingLag'] assert 'batchIndex' in self.trainingOptions.keys() batchIndex = self.trainingOptions['batchIndex'] assert 'batchSize' in self.trainingOptions.keys() batchSize = self.trainingOptions['batchSize'] assert 'nEpochs' in self.trainingOptions.keys() nEpochs = self.trainingOptions['nEpochs'] assert 'nBatches' in self.trainingOptions.keys() nBatches = self.trainingOptions['nBatches'] assert 'graphNo' in self.trainingOptions.keys() graphNo = self.trainingOptions['graphNo'] assert 'realizationNo' in self.trainingOptions.keys() realizationNo = self.trainingOptions['realizationNo'] assert 'doDAGger' in self.trainingOptions.keys() doDAGger = self.trainingOptions['doDAGger'] if doDAGger: assert 'DAGgerType' in self.trainingOptions.keys() DAGgerType = self.trainingOptions['DAGgerType'] # Get the values we need nTrain = self.data.nTrain thisArchit = self.model.archit thisLoss = self.model.loss thisOptim = self.model.optim thisDevice = self.model.device # Learning rate scheduler: if doLearningRateDecay: learningRateScheduler = torch.optim.lr_scheduler.StepLR(self.optim, learningRateDecayPeriod, learningRateDecayRate) # Initialize counters (since we give the possibility of early stopping, # we had to drop the 'for' and use a 'while' instead): epoch = 0 # epoch counter lagCount = 0 # lag counter for early stopping if doSaveVars: lossTrain = [] evalValid = [] timeTrain = [] timeValid = [] # Get original dataset xTrainOrig, yTrainOrig = self.data.getSamples('train') StrainOrig = self.data.getData('commGraph', 'train') initVelTrainAll = self.data.getData('initVel', 'train') if doDAGger: initPosTrainAll = self.data.getData('initPos', 'train') # And save it as the original "all samples" xTrainAll = xTrainOrig yTrainAll = yTrainOrig StrainAll = StrainOrig # If it is: # 'randomEpoch' assigns always the original training set at the # beginning of each epoch, so it is reset by using the variable # Orig, instead of the variable all # 'replaceTimeBatch' keeps working only in the All variables, so # every epoch updates the previous dataset, and never goes back # to the original dataset (i.e. there is no Orig involved in # the 'replaceTimeBatch' DAGger) # 'fixedBatch': it takes All = Orig from the beginning and then it # doesn't matter becuase it always acts by creating a new # batch with "corrected" trajectories for the learned policies while epoch < nEpochs \ and (lagCount < earlyStoppingLag or (not doEarlyStopping)): # The condition will be zero (stop), whenever one of the items of # the 'and' is zero. Therefore, we want this to stop only for epoch # counting when we are NOT doing early stopping. This can be # achieved if the second element of the 'and' is always 1 (so that # the first element, the epoch counting, decides). In order to # force the second element to be one whenever there is not early # stopping, we have an or, and force it to one. So, when we are not # doing early stopping, the variable 'not doEarlyStopping' is 1, # and the result of the 'or' is 1 regardless of the lagCount. When # we do early stopping, then the variable 'not doEarlyStopping' is # 0, and the value 1 for the 'or' gate is determined by the lag # count. # ALTERNATIVELY, we could just keep 'and lagCount<earlyStoppingLag' # and be sure that lagCount can only be increased whenever # doEarlyStopping is True. But I somehow figured out that would be # harder to maintain (more parts of the code to check if we are # accidentally increasing lagCount). # Randomize dataset for each epoch randomPermutation = np.random.permutation(nTrain) # Convert a numpy.array of numpy.int into a list of actual int. idxEpoch = [int(i) for i in randomPermutation] # Learning decay if doLearningRateDecay: learningRateScheduler.step() if doPrint: # All the optimization have the same learning rate, so just # print one of them # TODO: Actually, they might be different, so I will need to # print all of them. print("Epoch %d, learning rate = %.8f" % (epoch+1, learningRateScheduler.optim.param_groups[0]['lr'])) #\\\\\\\\\\\\\\\\ #\\\ Start DAGGER: randomEpoch #\\\ if doDAGger and epoch > 0 and DAGgerType == 'randomEpoch': # The 'randomEpoch' option forms a new training set for each # epoch consisting, with probability probExpert, of samples # of the original dataset (optimal trajectories) and with # probability 1-probExpert, with trajectories following the # latest trained dataset. xTrainAll, yTrainAll, StrainAll = \ self.randomEpochDAGger(epoch, xTrainOrig, yTrainOrig, StrainOrig, initPosTrainAll, initVelTrainAll) #\\\ #\\\ Finished DAGGER #\\\\\\\\\\\\\\\\\\\ # Initialize counter batch = 0 # batch counter while batch < nBatches \ and (lagCount<earlyStoppingLag or (not doEarlyStopping)): #\\\\\\\\\\\\\\\\ #\\\ Start DAGGER: replaceTimeBatch #\\\ if doDAGger and (batch > 0 or epoch > 0)\ and DAGgerType == 'replaceTimeBatch': # The option 'replaceTimeBatch' creates a fixed number of # new trajectories following randomly at each time step # either the optimal control or the learned control # Then, replaces this fixed number of new trajectores into # the training set (then these might, or might not get # selected by the next batch) xTrainAll, yTrainAll, StrainAll = \ self.replaceTimeBatchDAGger(epoch, xTrainAll, yTrainAll, StrainAll, initPosTrainAll, initVelTrainAll) #\\\ #\\\ Finished DAGGER #\\\\\\\\\\\\\\\\\\\ # Extract the adequate batch thisBatchIndices = idxEpoch[batchIndex[batch] : batchIndex[batch+1]] # Get the samples xTrain = xTrainAll[thisBatchIndices] yTrain = yTrainAll[thisBatchIndices] Strain = StrainAll[thisBatchIndices] initVelTrain = initVelTrainAll[thisBatchIndices] if doDAGger and DAGgerType == 'fixedBatch': initPosTrain = initPosTrainAll[thisBatchIndices] #\\\\\\\\\\\\\\\\ #\\\ Start DAGGER: fixedBatch #\\\ if doDAGger and (batch > 0 or epoch > 0)\ and DAGgerType == 'fixedBatch': # The 'fixedBatch' option, doubles the batch samples # by considering the same initial velocities and # positions, a trajectory given by the latest trained # architecture, and the corresponding correction # given by the optimal acceleration (i.e. for each # position and velocity we give what would be the # optimal acceleration, even though the next position # and velocity won't reflect this decision, but the # one taken by the learned policy) xDAG, yDAG, SDAG = self.fixedBatchDAGger(initPosTrain, initVelTrain) xTrain = np.concatenate((xTrain, xDAG), axis = 0) Strain = np.concatenate((Strain, SDAG), axis = 0) yTrain = np.concatenate((yTrain, yDAG), axis = 0) initVelTrain = np.tile(initVelTrain, (2,1,1)) #\\\ #\\\ Finished DAGGER #\\\\\\\\\\\\\\\\\\\ # Now that we have our dataset, move it to tensor and device # so we can use it xTrain = torch.tensor(xTrain, device = thisDevice) Strain = torch.tensor(Strain, device = thisDevice) yTrain = torch.tensor(yTrain, device = thisDevice) initVelTrain = torch.tensor(initVelTrain, device = thisDevice) # Start measuring time startTime = datetime.datetime.now() # Reset gradients thisArchit.zero_grad() # Obtain the output of the GNN yHatTrain = thisArchit(xTrain, Strain) # Compute loss lossValueTrain = thisLoss(yHatTrain, yTrain) # Compute gradients lossValueTrain.backward() # Optimize thisOptim.step() # Finish measuring time endTime = datetime.datetime.now() timeElapsed = abs(endTime - startTime).total_seconds() # Logging values if doLogging: lossTrainTB = lossValueTrain.item() # Save values if doSaveVars: lossTrain += [lossValueTrain.item()] timeTrain += [timeElapsed] # Print: if doPrint and printInterval > 0: if (epoch * nBatches + batch) % printInterval == 0: print("\t(E: %2d, B: %3d) %7.4f - %6.4fs" % ( epoch+1, batch+1, lossValueTrain.item(), timeElapsed), end = ' ') if graphNo > -1: print("[%d" % graphNo, end = '') if realizationNo > -1: print("/%d" % realizationNo, end = '') print("]", end = '') print("") # Delete variables to free space in CUDA memory del xTrain del Strain del yTrain del initVelTrain del lossValueTrain #\\\\\\\ #\\\ TB LOGGING (for each batch) #\\\\\\\ if doLogging: logger.scalar_summary(mode = 'Training', epoch = epoch * nBatches + batch, **{'lossTrain': lossTrainTB}) #\\\\\\\ #\\\ VALIDATION #\\\\\\\ if (epoch * nBatches + batch) % validationInterval == 0: # Start measuring time startTime = datetime.datetime.now() # Create trajectories # Initial data initPosValid = self.data.getData('initPos','valid') initVelValid = self.data.getData('initVel','valid') # Compute trajectories _, velTestValid, _, _, _ = self.data.computeTrajectory( initPosValid, initVelValid, self.data.duration, archit = thisArchit, doPrint = False) # Compute evaluation accValid = self.data.evaluate(vel = velTestValid) # Finish measuring time endTime = datetime.datetime.now() timeElapsed = abs(endTime - startTime).total_seconds() # Logging values if doLogging: evalValidTB = accValid # Save values if doSaveVars: evalValid += [accValid] timeValid += [timeElapsed] # Print: if doPrint: print("\t(E: %2d, B: %3d) %8.4f - %6.4fs" % ( epoch+1, batch+1, accValid, timeElapsed), end = ' ') print("[VALIDATION", end = '') if graphNo > -1: print(".%d" % graphNo, end = '') if realizationNo > -1: print("/%d" % realizationNo, end = '') print(" (%s)]" % self.model.name) if doLogging: logger.scalar_summary(mode = 'Validation', epoch = epoch * nBatches + batch, **{'evalValid': evalValidTB}) # No previous best option, so let's record the first trial # as the best option if epoch == 0 and batch == 0: bestScore = accValid bestEpoch, bestBatch = epoch, batch # Save this model as the best (so far) self.model.save(label = 'Best') # Start the counter if doEarlyStopping: initialBest = True else: thisValidScore = accValid if thisValidScore < bestScore: bestScore = thisValidScore bestEpoch, bestBatch = epoch, batch if doPrint: print("\t=> New best achieved: %.4f" % \ (bestScore)) self.model.save(label = 'Best') # Now that we have found a best that is not the # initial one, we can start counting the lag (if # needed) initialBest = False # If we achieved a new best, then we need to reset # the lag count. if doEarlyStopping: lagCount = 0 # If we didn't achieve a new best, increase the lag # count. # Unless it was the initial best, in which case we # haven't found any best yet, so we shouldn't be doing # the early stopping count. elif doEarlyStopping and not initialBest: lagCount += 1 # Delete variables to free space in CUDA memory del initVelValid del initPosValid #\\\\\\\ #\\\ END OF BATCH: #\\\\\\\ #\\\ Increase batch count: batch += 1 #\\\\\\\ #\\\ END OF EPOCH: #\\\\\\\ #\\\ Increase epoch count: epoch += 1 #\\\ Save models: self.model.save(label = 'Last') ################# # TRAINING OVER # ################# if doSaveVars: # We convert the lists into np.arrays lossTrain = np.array(lossTrain) evalValid = np.array(evalValid) # And we would like to save all the relevant information from # training trainVars = {'nEpochs': nEpochs, 'nBatches': nBatches, 'validationInterval': validationInterval, 'batchSize': np.array(batchSize), 'batchIndex': np.array(batchIndex), 'bestBatch': bestBatch, 'bestEpoch': bestEpoch, 'bestScore': bestScore, 'lossTrain': lossTrain, 'timeTrain': timeTrain, 'evalValid': evalValid, 'timeValid': timeValid } saveDirVars = os.path.join(self.model.saveDir, 'trainVars') if not os.path.exists(saveDirVars): os.makedirs(saveDirVars) pathToFile = os.path.join(saveDirVars,self.model.name + 'trainVars.pkl') with open(pathToFile, 'wb') as trainVarsFile: pickle.dump(trainVars, trainVarsFile) # Now, if we didn't do any training (i.e. nEpochs = 0), then the last is # also the best. if nEpochs == 0: self.model.save(label = 'Best') self.model.save(label = 'Last') if doPrint: print("\nWARNING: No training. Best and Last models are the same.\n") # After training is done, reload best model before proceeding to # evaluation: self.model.load(label = 'Best') #\\\ Print out best: if doPrint and nEpochs > 0: print("\t=> Best validation achieved (E: %d, B: %d): %.4f" % ( bestEpoch + 1, bestBatch + 1, bestScore)) return trainVars def randomEpochDAGger(self, epoch, xTrainOrig, yTrainOrig, StrainOrig, initPosTrainAll, initVelTrainAll): # The 'randomEpoch' option forms a new training set for each # epoch consisting, with probability probExpert, of samples # of the original dataset (optimal trajectories) and with # probability 1-probExpert, with trajectories following the # latest trained dataset. assert 'probExpert' in self.trainingOptions.kwargs() probExpert = self.trainingOptions['probExpert'] nTrain = xTrainOrig.shape[0] # Compute the prob expert chooseExpertProb = np.max((probExpert ** epoch, 0.5)) # What we will pass to the actual training epoch are: # xTrain, Strain and yTrain for computation xDAG = np.zeros(xTrainOrig.shape) yDAG = np.zeros(yTrainOrig.shape) SDAG = np.zeros(StrainOrig.shape) # initVelTrain is needed for evaluation, but doesn't change # For each sample, choose whether we keep the optimal # trajectory or we add the learned trajectory for s in range(nTrain): if np.random.binomial(1, chooseExpertProb) == 1: # If we choose the expert, we just get the values of # the optimal trajectory xDAG[s] = xTrainOrig[s] yDAG[s] = yTrainOrig[s] SDAG[s] = StrainOrig[s] else: # If not, we compute a new trajectory based on the # given architecture posDAG, velDAG, _, _, _ = self.data.computeTrajectory( initPosTrainAll[s:s+1], initVelTrainAll[s:s+1], self.data.duration, archit = self.model.archit, doPrint = False) # Now that we have the position and velocity trajectory # that we would get based on the learned controller, # we need to compute what the optimal acceleration # would actually be in each case. # And since this could be a large trajectory, we need # to split it based on how many samples maxTimeSamples = 200 if posDAG.shape[1] > maxTimeSamples: # Create the space yDAGaux = np.zeros((1, # batchSize posDAG.shape[1], # tSamples 2, posDAG.shape[3])) # nAgents for t in range(posDAG.shape[1]): # Compute the expert on the corresponding # trajectory # First, we need the difference in positions ijDiffPos, ijDistSq = \ self.data.computeDifferences(posDAG[:,t,:,:]) # And in velocities ijDiffVel, _ = \ self.data.computeDifferences(velDAG[:,t,:,:]) # Now, the second term (the one that depends # on the positions) only needs to be computed # for nodes thatare within repel distance, so # let's compute a mask to find these nodes. repelMask = (ijDistSq < (self.data.repelDist ** 2))\ .astype(ijDiffPos.dtype) # Apply this mask to the position difference # (we need not apply it to the square # differences since these will be multiplied # by the position differences which already # will be zero) # Note that we need to add the dimension of axis # to properly multiply it ijDiffPos = ijDiffPos *\ np.expand_dims(repelMask,1) # Invert the tensor elementwise (avoiding the # zeros) ijDistSqInv = invertTensorEW(ijDistSq) # Add an extra dimension, also across the # axis ijDistSqInv = np.expand_dims(ijDistSqInv, 1) # Compute the optimal solution thisAccel = -np.sum(ijDiffVel, axis = 3) \ + 2 * np.sum(ijDiffPos * \ (ijDistSqInv ** 2 + ijDistSqInv), axis = 3) # And cap it thisAccel[thisAccel > self.data.accelMax] = \ self.data.accelMax thisAccel[thisAccel < -self.data.accelMax] = \ -self.data.accelMax # Store it yDAGaux[:,t,:,:] = thisAccel else: # Compute the expert on the corresponding # trajectory # First, we need the difference in positions ijDiffPos,ijDistSq=self.data.computeDifferences(posDAG) # And in velocities ijDiffVel, _ = self.data.computeDifferences(velDAG) # Now, the second term (the one that depends on # the positions) only needs to be computed for # nodes that are within repel distance, so let's # compute a mask to find these nodes. repelMask = (ijDistSq < (self.data.repelDist ** 2))\ .astype(ijDiffPos.dtype) # Apply this mask to the position difference (we # need not apply it to the square differences, # since these will be multiplied by the position # differences, which already will be zero) # Note that we need to add the dimension of axis # to properly multiply it ijDiffPos = ijDiffPos * np.expand_dims(repelMask,2) # Invert the tensor elementwise (avoiding the # zeros) ijDistSqInv = invertTensorEW(ijDistSq) # Add an extra dimension, also across the axis ijDistSqInv = np.expand_dims(ijDistSqInv, 2) # Compute the optimal solution yDAGaux = -np.sum(ijDiffVel, axis = 4) \ + 2 * np.sum(ijDiffPos * \ (ijDistSqInv**2+ijDistSqInv), axis = 4) # And cap it yDAGaux[yDAGaux > self.data.accelMax] = self.data.accelMax yDAGaux[yDAGaux < -self.data.accelMax] = -self.data.accelMax # Finally, compute the corresponding graph of states # (pos) visited by the policy SDAGaux = self.data.computeCommunicationGraph( posDAG, self.data.commRadius, True, doPrint = False) xDAGaux = self.data.computeStates(posDAG, velDAG, SDAGaux, doPrint = False) # And save them xDAG[s] = xDAGaux[0] yDAG[s] = yDAGaux[0] SDAG[s] = SDAGaux[0] # And now that we have created the DAGger alternatives, we # just need to consider them as the basic training variables return xDAG, yDAG, SDAG def replaceTimeBatchDAGger(self, epoch, xTrainAll, yTrainAll, StrainAll, initPosTrainAll, initVelTrainAll, nReplace = 10): # The option 'replaceTimeBatch' creates a fixed number of # new trajectories following randomly at each time step # either the optimal control or the learned control # Then, replaces this fixed number of new trajectores into # the training set (then these might, or might not get # selected by the next batch) assert 'probExpert' in self.trainingOptions.kwargs() probExpert = self.trainingOptions['probExpert'] nTrain = xTrainAll.shape[0] if nReplace > nTrain: nReplace = nTrain # Select the indices of the samples to replace replaceIndices = np.random.permutation(nTrain)[0:nReplace] # Get the corresponding initial velocities and positions initPosTrainThis = initPosTrainAll[replaceIndices] initVelTrainThis = initVelTrainAll[replaceIndices] # Save the resulting trajectories xDAG = np.zeros((nReplace, xTrainAll.shape[1], 6, xTrainAll.shape[3])) yDAG = np.zeros((nReplace, yTrainAll.shape[1], 2, yTrainAll.shape[3])) SDAG = np.zeros((nReplace, StrainAll.shape[1], StrainAll.shape[2], StrainAll.shape[3])) posDAG = np.zeros(yDAG.shape) velDAG = np.zeros(yDAG.shape) # Initialize first elements posDAG[:,0,:,:] = initPosTrainThis velDAG[:,0,:,:] = initVelTrainThis SDAG[:,0,:,:] = StrainAll[replaceIndices,0] xDAG[:,0,:,:] = xTrainAll[replaceIndices,0] # Compute the prob expert chooseExpertProb = np.max((probExpert ** (epoch+1), 0.5)) # Now, for each sample for s in range(nReplace): # For each time instant for t in range(1,xTrainAll.shape[1]): # Decide whether we apply the learned or the # optimal controller if np.random.binomial(1, chooseExpertProb) == 1: # Compute the optimal acceleration ijDiffPos, ijDistSq = \ self.data.computeDifferences(posDAG[s:s+1,t-1,:,:]) ijDiffVel, _ = \ self.data.computeDifferences(velDAG[s:s+1,t-1,:,:]) repelMask = (ijDistSq < (self.data.repelDist ** 2))\ .astype(ijDiffPos.dtype) ijDiffPos = ijDiffPos *\ np.expand_dims(repelMask,1) ijDistSqInv = invertTensorEW(ijDistSq) ijDistSqInv = np.expand_dims(ijDistSqInv, 1) thisAccel = -np.sum(ijDiffVel, axis = 3) \ + 2 * np.sum(ijDiffPos * \ (ijDistSqInv ** 2 + ijDistSqInv), axis = 3) else: # Compute the learned acceleration # Add the sample dimension xThis = np.expand_dims(xDAG[s,0:t,:,:], 0) Sthis = np.expand_dims(SDAG[s,0:t,:,:], 0) # Convert to tensor xThis = torch.tensor(xThis, device=self.model.device) Sthis = torch.tensor(Sthis, device=self.model.device) # Compute the acceleration with torch.no_grad(): thisAccel = self.model.archit(xThis, Sthis) # Get only the last acceleration thisAccel = thisAccel.cpu().numpy()[:,-1,:,:] # Cap the acceleration thisAccel[thisAccel>self.data.accelMax]=self.data.accelMax thisAccel[thisAccel<-self.data.accelMax]=-self.data.accelMax # Save it yDAG[s,t-1,:,:] = thisAccel.squeeze(0) # Update the position and velocity velDAG[s,t,:,:] = \ yDAG[s,t-1,:,:] * self.data.samplingTime\ + velDAG[s,t-1,:,:] posDAG[s,t,:,:] = \ velDAG[s,t-1,:,:] * self.data.samplingTime\ + posDAG[s,t-1,:,:] # Update the state and the graph thisGraph = self.data.computeCommunicationGraph( posDAG[s:s+1,t:t+1,:,:], self.data.commRadius, True, doPrint = False) SDAG[s,t,:,:] = thisGraph.squeeze(1).squeeze(0) thisState = self.data.computeStates( posDAG[s:s+1,t:t+1,:,:], velDAG[s:s+1,t:t+1,:,:], SDAG[s:s+1,t:t+1,:,:], doPrint = False) xDAG[s,t,:,:] = thisState.squeeze(1).squeeze(0) # And now compute the last acceleration step if np.random.binomial(1, chooseExpertProb) == 1: # Compute the optimal acceleration ijDiffPos, ijDistSq = \ self.data.computeDifferences(posDAG[s:s+1,-1,:,:]) ijDiffVel, _ = \ self.data.computeDifferences(velDAG[s:s+1,-1,:,:]) repelMask = (ijDistSq < (self.data.repelDist ** 2))\ .astype(ijDiffPos.dtype) ijDiffPos = ijDiffPos *\ np.expand_dims(repelMask,1) ijDistSqInv = invertTensorEW(ijDistSq) ijDistSqInv = np.expand_dims(ijDistSqInv, 1) thisAccel = -np.sum(ijDiffVel, axis = 3) \ + 2 * np.sum(ijDiffPos * \ (ijDistSqInv ** 2 + ijDistSqInv), axis = 3) else: # Compute the learned acceleration # Add the sample dimension xThis = np.expand_dims(xDAG[s], 0) Sthis = np.expand_dims(SDAG[s], 0) # Convert to tensor xThis = torch.tensor(xThis, device=self.model.device) Sthis = torch.tensor(Sthis, device=self.model.device) # Compute the acceleration with torch.no_grad(): thisAccel = self.model.archit(xThis, Sthis) # Get only the last acceleration thisAccel = thisAccel.cpu().numpy()[:,-1,:,:] # Cap the acceleration thisAccel[thisAccel>self.data.accelMax]=self.data.accelMax thisAccel[thisAccel<-self.data.accelMax]=-self.data.accelMax # Save it yDAG[s,-1,:,:] = thisAccel.squeeze(0) # And now that we have done this for all the samples in # the replacement set, just replace them xTrainAll[replaceIndices] = xDAG yTrainAll[replaceIndices] = yDAG StrainAll[replaceIndices] = SDAG return xTrainAll, yTrainAll, StrainAll def fixedBatchDAGger(self, initPosTrain, initVelTrain): # The 'fixedBatch' option, doubles the batch samples # by considering the same initial velocities and # positions, a trajectory given by the latest trained # architecture, and the corresponding correction # given by the optimal acceleration (i.e. for each # position and velocity we give what would be the # optimal acceleration, even though the next position # and velocity won't reflect this decision, but the # one taken by the learned policy) # Note that there's no point on doing it randomly here, # since the optimal trajectory is already considered in # the batch anyways. #\\\\\\\\\\\\\\\\ #\\\ Start DAGGER # Always apply DAGger on the trained policy posPol, velPol, _, _, _ = \ self.data.computeTrajectory(initPosTrain, initVelTrain, self.data.duration, archit = self.model.archit, doPrint = False) # Compute the optimal acceleration on the trajectory given # by the trained policy maxTimeSamples = 200 if posPol.shape[1] > maxTimeSamples: # Create the space to store this yDAG = np.zeros(posPol.shape) for t in range(posPol.shape[1]): # Compute the expert on the corresponding trajectory # First, we need the difference in positions ijDiffPos, ijDistSq = \ self.data.computeDifferences(posPol[:,t,:,:]) # And in velocities ijDiffVel, _ = \ self.data.computeDifferences(velPol[:,t,:,:]) # Now, the second term (the one that depends on # the positions) only needs to be computed for # nodes thatare within repel distance, so let's # compute a mask to find these nodes. repelMask = (ijDistSq < (self.data.repelDist ** 2))\ .astype(ijDiffPos.dtype) # Apply this mask to the position difference (we # need not apply it to the square differences, # since these will be multiplied by the position # differences which already will be zero) # Note that we need to add the dimension of axis # to properly multiply it ijDiffPos = ijDiffPos * np.expand_dims(repelMask,1) # Invert the tensor elementwise (avoiding the # zeros) ijDistSqInv = invertTensorEW(ijDistSq) # Add an extra dimension, also across the axis ijDistSqInv = np.expand_dims(ijDistSqInv, 1) # Compute the optimal solution thisAccel = -np.sum(ijDiffVel, axis = 3) \ + 2 * np.sum(ijDiffPos * \ (ijDistSqInv ** 2 + ijDistSqInv), axis = 3) # And cap it thisAccel[thisAccel>self.data.accelMax]=self.data.accelMax thisAccel[thisAccel<-self.data.accelMax]=-self.data.accelMax # Store it yDAG[:,t,:,:] = thisAccel else: # Compute the expert on the corresponding trajectory # First, we need the difference in positions ijDiffPos, ijDistSq = self.data.computeDifferences(posPol) # And in velocities ijDiffVel, _ = self.data.computeDifferences(velPol) # Now, the second term (the one that depends on the # positions) only needs to be computed for nodes that # are within repel distance, so let's compute a mask # to find these nodes. repelMask = (ijDistSq < (self.data.repelDist ** 2))\ .astype(ijDiffPos.dtype) # Apply this mask to the position difference (we need # not apply it to the square differences, since these # will be multiplied by the position differences, # which already will be zero) # Note that we need to add the dimension of axis to # properly multiply it ijDiffPos = ijDiffPos * np.expand_dims(repelMask, 2) # Invert the tensor elementwise (avoiding the zeros) ijDistSqInv = invertTensorEW(ijDistSq) # Add an extra dimension, also across the axis ijDistSqInv = np.expand_dims(ijDistSqInv, 2) # Compute the optimal solution yDAG = -np.sum(ijDiffVel, axis = 4) \ + 2 * np.sum(ijDiffPos * \ (ijDistSqInv ** 2 + ijDistSqInv), axis = 4) # And cap it yDAG[yDAG > self.data.accelMax] = self.data.accelMax yDAG[yDAG < -self.data.accelMax] = -self.data.accelMax # Finally, compute the corresponding graph of states # (pos) visited by the policy graphDAG = self.data.computeCommunicationGraph(posPol, self.data.commRadius, True, doPrint = False) xDAG = self.data.computeStates(posPol, velPol, graphDAG, doPrint = False) # Add it to the existing batch return xDAG, yDAG, graphDAG
76,065
43.903188
85
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/modules/model.py
# 2018/10/02~ # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu """ model.py Model Module Utilities useful for working on the model Model: binds together the architecture, the loss function, the optimizer, the trainer, and the evaluator. """ import os import torch class Model: """ Model: binds together the architecture, the loss function, the optimizer, the trainer, and the evaluator. Initialization: architecture (nn.Module) loss (nn.modules.loss._Loss) optimizer (nn.optim) trainer (Modules.training) evaluator (Modules.evaluation) device (string or device) name (string) saveDir (string or path) .train(data, nEpochs, batchSize, **kwargs): train the model for nEpochs epochs, using batches of size batchSize and running over data data class; see the specific selected trainer for extra options .evaluate(data): evaluate the model over data data class; see the specific selected evaluator for extra options .save(label = '', [saveDir=dirPath]): save the model parameters under the name given by label, if the saveDir is different from the one specified in the initialization, it needs to be specified now .load(label = '', [loadFiles=(architLoadFile, optimLoadFile)]): loads the model parameters under the specified name inside the specific saveDir, unless they are provided externally through the keyword 'loadFiles'. .getTrainingOptions(): get a dict with the options used during training; it returns None if it hasn't been trained yet.' """ def __init__(self, # Architecture (nn.Module) architecture, # Loss Function (nn.modules.loss._Loss) loss, # Optimization Algorithm (nn.optim) optimizer, # Training Algorithm (Modules.training) trainer, # Evaluating Algorithm (Modules.evaluation) evaluator, # Other device, name, saveDir): #\\\ ARCHITECTURE # Store self.archit = architecture # Move it to device self.archit.to(device) # Count parameters (doesn't work for EdgeVarying) self.nParameters = 0 for param in list(self.archit.parameters()): if len(param.shape)>0: thisNParam = 1 for p in range(len(param.shape)): thisNParam *= param.shape[p] self.nParameters += thisNParam else: pass #\\\ LOSS FUNCTION self.loss = loss #\\\ OPTIMIZATION ALGORITHM self.optim = optimizer #\\\ TRAINING ALGORITHM self.trainer = trainer #\\\ EVALUATING ALGORITHM self.evaluator = evaluator #\\\ OTHER # Device self.device = device # Model name self.name = name # Saving directory self.saveDir = saveDir def train(self, data, nEpochs, batchSize, **kwargs): self.trainer = self.trainer(self, data, nEpochs, batchSize, **kwargs) return self.trainer.train() def evaluate(self, data, **kwargs): return self.evaluator(self, data, **kwargs) def save(self, label = '', **kwargs): if 'saveDir' in kwargs.keys(): saveDir = kwargs['saveDir'] else: saveDir = self.saveDir saveModelDir = os.path.join(saveDir,'savedModels') # Create directory savedModels if it doesn't exist yet: if not os.path.exists(saveModelDir): os.makedirs(saveModelDir) saveFile = os.path.join(saveModelDir, self.name) torch.save(self.archit.state_dict(), saveFile+'Archit'+ label+'.ckpt') torch.save(self.optim.state_dict(), saveFile+'Optim'+label+'.ckpt') def load(self, label = '', **kwargs): if 'loadFiles' in kwargs.keys(): (architLoadFile, optimLoadFile) = kwargs['loadFiles'] else: saveModelDir = os.path.join(self.saveDir,'savedModels') architLoadFile = os.path.join(saveModelDir, self.name + 'Archit' + label +'.ckpt') optimLoadFile = os.path.join(saveModelDir, self.name + 'Optim' + label + '.ckpt') self.archit.load_state_dict(torch.load(architLoadFile)) self.optim.load_state_dict(torch.load(optimLoadFile)) def getTrainingOptions(self): return self.trainer.trainingOptions \ if 'trainingOptions' in dir(self.trainer) \ else None def __repr__(self): reprString = "Name: %s\n" % (self.name) reprString += "Number of learnable parameters: %d\n"%(self.nParameters) reprString += "\n" reprString += "Model architecture:\n" reprString += "----- -------------\n" reprString += "\n" reprString += repr(self.archit) + "\n" reprString += "\n" reprString += "Loss function:\n" reprString += "---- ---------\n" reprString += "\n" reprString += repr(self.loss) + "\n" reprString += "\n" reprString += "Optimizer:\n" reprString += "----------\n" reprString += "\n" reprString += repr(self.optim) + "\n" reprString += "Training algorithm:\n" reprString += "-------- ----------\n" reprString += "\n" reprString += repr(self.trainer) + "\n" reprString += "Evaluation algorithm:\n" reprString += "---------- ----------\n" reprString += "\n" reprString += repr(self.evaluator) + "\n" return reprString
5,959
35.341463
80
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/modules/architectures.py
# 2021/03/04~ # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu """ architectures.py Architectures module Definition of GNN architectures. SelectionGNN: implements the selection GNN architecture LocalActivationGNN: implements the selection GNN architecture with a local activation function (instead of pointwise) LocalGNN: implements the selection GNN architecture by means of local operations only SpectralGNN: implements the selection GNN architecture using spectral filters NodeVariantGNN: implements the selection GNN architecture with node-variant graph filters EdgeVariantGNN: implements the selection GNN architecture with edge-variant graph filters LocalEdgeNet: implements the selection GNN architecture with edge-variant graph filters and local operations only ARMAfilterGNN: implements the selection GNN architecture using ARMA graph filters by Jacobi's method LocalARMA: implements the selection GNN architecture using ARMA graph filters by Jacobi's method and local operations only AggregationGNN: implements the aggregation GNN architecture MultiNodeAggregationGNN: implementes the multi-node aggregation GNN architecture GraphAttentionNetwork: implement the graph attention network architecture GraphConvolutionAttentionNetwork: implement the graph convolution attention network (GCAT) architecture EdgeVariantAttention: implement the edge variant graph filter, with coefficients learned following a parameterization given by the attention mechanism GraphRecurrentNN: implements a graph recurrent neural network with static GSO GatedGraphRecurrentNN: implements a gated graph recurrent neural network with static GSO. Gates can be time, node or edge gates """ import numpy as np import scipy import torch import torch.nn as nn import alegnnss.utils.graphML as gml import alegnnss.utils.graphTools from alegnnss.utils.dataTools import changeDataType zeroTolerance = 1e-9 # Absolute values below this number are considered zero. class SelectionGNN(nn.Module): """ SelectionGNN: implement the selection GNN architecture Initialization: SelectionGNN(dimNodeSignals, nFilterTaps, bias, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, # Pooling dimLayersMLP, # MLP in the end GSO, order = None, # Structure coarsening = False) Input: /** Graph convolutional layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nFilterTaps (list of int): number of filter taps on each layer (i.e. nFilterTaps-1 is the extent of neighborhoods that are reached, for example K=2 is info from the 1-hop neighbors) bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nFilterTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO >> Obs.: If coarsening = True, this variable is ignored since the number of nodes in each layer is given by the graph coarsening algorithm. poolingFunction (nn.Module in Utils.graphML or in torch.nn): summarizing function >> Obs.: If coarsening = True, then the pooling function is one of the regular 1-d pooling functions available in torch.nn (instead of one of the summarizing functions in Utils.graphML). poolingSize (list of int): size of the neighborhood to compute the summary from at each layer >> Obs.: If coarsening = True, then the pooling size is ignored since, due to the binary tree nature of the graph coarsening algorithm, it always has to be 2. /** Readout layers **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array coarsening (bool, default = False): if True uses graph coarsening instead of zero-padding to reduce the number of nodes. >> Obs.: (i) Graph coarsening only works when the number of edge features is 1 -scalar weights-. (ii) The graph coarsening forces a given order of the nodes, and this order has to be used to reordering the GSO as well as the samples during training; as such, this order is internally saved and applied to the incoming samples in the forward call -it is thus advised to use the identity ordering in the model class when using the coarsening method-. Output: nn.Module with a Selection GNN architecture with the above specified characteristics. Forward call: SelectionGNN(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] Other methods: .changeGSO(S, nSelectedNodes = [], poolingSize = []): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the SelectionGNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. The number of selected nodes and the pooling size will not change unless specifically consider those as input. Those lists need to have the same length as the number of layers. There is no need to define both, unless they change. >> Obs.: The number of nodes in the GSOs need not be the same, but unless we want to risk zero-padding beyond the original number of nodes (which just results in disconnected nodes), then we might want to update the nSelectedNodes and poolingSize accordingly, if the size of the new GSO is different. y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. """ def __init__(self, # Graph filtering dimNodeSignals, nFilterTaps, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, # Ordering order = None, # Coarsening coarsening = False): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nFilterTaps) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nFilterTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nFilterTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nFilterTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nFilterTaps # Filter taps self.E = GSO.shape[0] # Number of edge features if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('alegnnss.utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.coarsening = coarsening # Whether to do coarsening or not # If we have to do coarsening, then note that it can only be done if # we have a single edge feature, otherwise, each edge feature could be # coarsed (and thus, ordered) in a different way, and there is no # sensible way of merging back this different orderings. So, we will # only do coarsening if we have a single edge feature; otherwise, we # will default to selection sampling (therefore, always specify # nSelectedNodes) if self.coarsening and self.E == 1: self.permFunction = alegnnss.utils.graphTools.permCoarsening # Override # permutation function for the one corresponding to coarsening GSO = scipy.sparse.csr_matrix(GSO[0]) GSO, self.order = alegnnss.utils.graphTools.coarsen(GSO, levels=self.L, self_connections=False) # Now, GSO is a list of csr_matrix with self.L+1 coarsened GSOs, # we need to torch.tensor them and put them in a list. # order is just a list of indices to reorder the nodes. self.S = [] self.N = [] # It has to be reset, because now the number of # nodes is determined by the coarsening scheme for S in GSO: S = S.todense().A.reshape([self.E, S.shape[0], S.shape[1]]) # So, S.todense() returns a numpy.matrix object; a numpy # matrix cannot be converted into a tensor (i.e., added # the third dimension), therefore we need to convert it to # a numpy.array. According to the documentation, the # attribute .A in a numpy.matrix returns self as an ndarray # object. So that's why the .A is there. self.S.append(torch.tensor(S)) self.N.append(S.shape[1]) # Finally, because the graph coarsening algorithm is a binary tree # pooling, we always need to force a pooling size of 2 self.alpha = [2] * self.L else: # Call the corresponding ordering function. Recall that if no # order was selected, then this is permIdentity, so that nothing # changes. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes self.alpha = poolingSize self.coarsening = False # If it failed because there are more than # one edge feature, then just set this to false, so we do not # need to keep checking whether self.E == 1 or not, just this # one # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean # Store the rest of the variables self.sigma = nonlinearity self.rho = poolingFunction self.dimLayersMLP = dimLayersMLP # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. gfl = [] # Graph Filtering Layers for l in range(self.L): #\\ Graph filtering stage: gfl.append(gml.GraphFilter(self.F[l], self.F[l+1], self.K[l], self.E, self.bias)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. if self.coarsening: gfl[3*l].addGSO(self.S[l]) else: gfl[3*l].addGSO(self.S) #\\ Nonlinearity gfl.append(self.sigma()) #\\ Pooling if self.coarsening: gfl.append(self.rho(self.alpha[l])) else: gfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 gfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.GFL = nn.Sequential(*gfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def changeGSO(self, GSO, nSelectedNodes = [], poolingSize = []): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None # Now, if we don't have coarsening, then we need to reorder the GSO, # and since this GSO reordering will affect several parts of the non # coarsening algorithm, then we will do it now # Reorder the GSO if not self.coarsening: self.S, self.order = self.permFunction(GSO) # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Before making decisions, check if there is a new poolingSize list if len(poolingSize) > 0 and not self.coarsening: # (If it's coarsening, then the pooling size cannot change) # Check it has the right length assert len(poolingSize) == self.L # And update it self.alpha = poolingSize # Now, check if we have a new list of nodes (this only makes sense # if there is no coarsening, because if it is coarsening, the list with # the number of nodes to be considered is ignored.) if len(nSelectedNodes) > 0 and not self.coarsening: # If we do, then we need to change the pooling functions to select # less nodes. This would allow to use graphs of different size. # Note that the pooling function, there is nothing learnable, so # they can easily be re-made, re-initialized. # The first thing we need to check, is that the length of the # number of nodes is equal to the number of layers (this list # indicates the number of nodes selected at the output of each # layer) assert len(nSelectedNodes) == self.L # Then, update the N that we have stored self.N = [GSO.shape[1]] + nSelectedNodes # And get the new pooling functions for l in range(self.L): # For each layer, add the pooling function self.GFL[3*l+2] = self.rho(self.N[l], self.N[l+1], self.alpha[l]) self.GFL[3*l+2].addGSO(self.S) elif len(nSelectedNodes) == 0 and not self.coarsening: # Just update the GSO for l in range(self.L): self.GFL[3*l+2].addGSO(self.S) # If it's coarsening, then we need to compute the new coarsening # scheme if self.coarsening and self.E == 1: device = self.S[0].device GSO = scipy.sparse.csr_matrix(GSO[0]) GSO, self.order = alegnnss.utils.graphTools.coarsen(GSO, levels=self.L, self_connections=False) # Now, GSO is a list of csr_matrix with self.L+1 coarsened GSOs, # we need to torch.tensor them and put them in a list. # order is just a list of indices to reorder the nodes. self.S = [] self.N = [] # It has to be reset, because now the number of # nodes is determined by the coarsening scheme for S in GSO: S = S.todense().A.reshape([self.E, S.shape[0], S.shape[1]]) # So, S.todense() returns a numpy.matrix object; a numpy # matrix cannot be converted into a tensor (i.e., added # the third dimension), therefore we need to convert it to # a numpy.array. According to the documentation, the # attribute .A in a numpy.matrix returns self as an ndarray # object. So that's why the .A is there. self.S.append(torch.tensor(S).to(device)) self.N.append(S.shape[1]) # And we need to update the GSO in all the places. # Note that we do not need to change the pooling function, because # it is the standard pooling function that doesn't care about the # number of nodes: it still takes one every two of them. for l in range(self.L): self.GFL[3*l].addGSO(self.S[l]) # Graph convolutional layer else: # And update in the LSIGF that is still missing (recall that the # ordering for the non-coarsening case has already been done) for l in range(self.L): self.GFL[3*l].addGSO(self.S) # Graph convolutional layer def splitForward(self, x): # Reorder the nodes from the data # If we have added dummy nodes (which, has to happen when the size # is different and we chose coarsening), then we need to use the # provided permCoarsening function (which acts on data to add dummy # variables) if x.shape[2] != self.N[0] and self.coarsening: thisDevice = x.device # Save the device we where operating on x = x.cpu().numpy() # Convert to numpy x = alegnnss.utils.graphTools.permCoarsening(x, self.order) # Re order and add dummy values x = torch.tensor(x).to(thisDevice) else: # If not, simply reorder the nodes x = x[:, :, self.order] # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Let's call the graph filtering layer y = self.GFL(x) # Flatten the output yFlat = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(yFlat), y # If self.MLP is a sequential on an empty list it just does nothing. def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO if self.coarsening: for l in range(self.L): self.S[l] = self.S[l].to(device) self.GFL[3*l].addGSO(self.S[l]) else: self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.GFL[3*l].addGSO(self.S) self.GFL[3*l+2].addGSO(self.S) class LocalActivationGNN(nn.Module): """ LocalActivationGNN: implements the selection GNN architecture with a local activation function (instead of pointwise) Initialization: LocalActivationGNN(dimNodeSignals, nFilterTaps, bias, # Graph Filtering nonlinearity, kHopActivation, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, # Pool dimLayersMLP, # MLP in the end GSO, order = None) # Structure Input: /** Graph convolutional layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nFilterTaps (list of int): number of filter taps on each layer (i.e. nFilterTaps-1 is the extent of neighborhoods that are reached, for example K=2 is info from the 1-hop neighbors) bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nFilterTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from Utils.graphML non-linear local activation functions kHopActivation (list of int): number of neighborhood hop to include in the local activation function at each layer /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO >> Obs.: If coarsening = True, this variable is ignored since the number of nodes in each layer is given by the graph coarsening algorithm. poolingFunction (nn.Module in Utils.graphML or in torch.nn): summarizing function >> Obs.: If coarsening = True, then the pooling function is one of the regular 1-d pooling functions available in torch.nn (instead of one of the summarizing functions in Utils.graphML). poolingSize (list of int): size of the neighborhood to compute the summary from at each layer >> Obs.: If coarsening = True, then the pooling size is ignored since, due to the binary tree nature of the graph coarsening algorithm, it always has to be 2. /** Readout layers **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Selection GNN architecture with the above specified characteristics. Forward call: LocalActivationGNN(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] Other methods: .changeGSO(S, nSelectedNodes = [], poolingSize = []): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the SelectionGNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. The number of selected nodes and the pooling size will not change unless specifically consider those as input. Those lists need to have the same length as the number of layers. There is no need to define both, unless they change. >> Obs.: The number of nodes in the GSOs need not be the same, but unless we want to risk zero-padding beyond the original number of nodes (which just results in disconnected nodes), then we might want to update the nSelectedNodes and poolingSize accordingly, if the size of the new GSO is different. y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. """ def __init__(self, # Graph filtering dimNodeSignals, nFilterTaps, bias, # Nonlinearity nonlinearity, kHopActivation, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nFilterTaps) + 1 # kHopActivation is a list with the same number of elements as # nFilterTaps (number of layers) assert len(kHopActivation) == len(nFilterTaps) # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nFilterTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nFilterTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nFilterTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nFilterTaps # Filter taps self.kHop = kHopActivation # k-hop neighborhood for local activation self.E = GSO.shape[0] # Number of edge features if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. # Call the corresponding ordering function. Recall that if no # order was selected, then this is permIdentity, so that nothing # changes. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes self.alpha = poolingSize # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean # Store the rest of the variables self.sigma = nonlinearity self.rho = poolingFunction self.dimLayersMLP = dimLayersMLP # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. gfl = [] # Graph Filtering Layers for l in range(self.L): #\\ Graph filtering stage: gfl.append(gml.GraphFilter(self.F[l], self.F[l+1], self.K[l], self.E, self.bias)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. gfl[3*l].addGSO(self.S) #\\ Nonlinearity gfl.append(self.sigma(self.kHop[l])) # Add GSO for this layer gfl[3*l+1].addGSO(self.S) #\\ Pooling gfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 gfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.GFL = nn.Sequential(*gfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def changeGSO(self, GSO, nSelectedNodes = [], poolingSize = []): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None # Reorder the new GSO self.S, self.order = self.permFunction(GSO) # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Before making decisions, check if there is a new poolingSize list if len(poolingSize) > 0: # (If it's coarsening, then the pooling size cannot change) # Check it has the right length assert len(poolingSize) == self.L # And update it self.alpha = poolingSize # Now, check if we have a new list of nodes (this only makes sense # if there is no coarsening, because if it is coarsening, the list with # the number of nodes to be considered is ignored.) if len(nSelectedNodes) > 0: # If we do, then we need to change the pooling functions to select # less nodes. This would allow to use graphs of different size. # Note that the pooling function, there is nothing learnable, so # they can easily be re-made, re-initialized. # The first thing we need to check, is that the length of the # number of nodes is equal to the number of layers (this list # indicates the number of nodes selected at the output of each # layer) assert len(nSelectedNodes) == self.L # Then, update the N that we have stored self.N = [GSO.shape[1]] + nSelectedNodes # And get the new pooling functions for l in range(self.L): # For each layer, add the pooling function self.GFL[3*l+2] = self.rho(self.N[l], self.N[l+1], self.alpha[l]) self.GFL[3*l+2].addGSO(self.S) elif len(nSelectedNodes) == 0 and not self.coarsening: # Just update the GSO for l in range(self.L): self.GFL[3*l+2].addGSO(self.S) # And update in the LSIGF that is still missing (recall that the # ordering for the non-coarsening case has already been done) for l in range(self.L): self.GFL[3*l].addGSO(self.S) # Graph convolutional layer self.GFL[3*l+1].addGSO(self.S) # Local Activation function def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer y = self.GFL(x) # Flatten the output yFlat = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(yFlat), y # If self.MLP is a sequential on an empty list it just does nothing. def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.GFL[3*l].addGSO(self.S) self.GFL[3*l+1].addGSO(self.S) self.GFL[3*l+2].addGSO(self.S) class LocalGNN(nn.Module): """ LocalGNN: implement the selection GNN architecture where all operations are implemented locally, i.e. by means of neighboring exchanges only. More specifically, it has graph convolutional layers, but the readout layer, instead of being an MLP for the entire graph signal, it is a linear combination of the features at each node. >> Obs.: This precludes the use of clustering as a pooling operation, since clustering is not local (it changes the given graph). Initialization: LocalGNN(dimNodeSignals, nFilterTaps, bias, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, # Pooling dimReadout, # Local readout layer GSO, order = None # Structure) Input: /** Graph convolutional layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nFilterTaps (list of int): number of filter taps on each layer (i.e. nFilterTaps-1 is the extent of neighborhoods that are reached, for example K=2 is info from the 1-hop neighbors) bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nFilterTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layers **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers applied locally at each node (i.e. no exchange of information involved). /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Local GNN architecture with the above specified characteristics. Forward call: LocalGNN(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimReadout[-1] x nSelectedNodes[-1] Other methods: .changeGSO(S, nSelectedNodes = [], poolingSize = []): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the SelectionGNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. The number of selected nodes and the pooling size will not change unless specifically consider those as input. Those lists need to have the same length as the number of layers. There is no need to define both, unless they change. >> Obs.: The number of nodes in the GSOs need not be the same, but unless we want to risk zero-padding beyond the original number of nodes (which just results in disconnected nodes), then we might want to update the nSelectedNodes and poolingSize accordingly, if the size of the new GSO is different. y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimReadout[-1], as well as the output of all the GNN layers (i.e. before the readout layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. y = .singleNodeForward(x, nodes): outputs the value of the last layer at a single node. x is the usual input of shape batchSize x dimFeatures x numberNodes. nodes is either a single node (int) or a collection of nodes (list or numpy.array) of length batchSize, where for each element in the batch, we get the output at the single specified node. The output y is of shape batchSize x dimReadout[-1]. """ def __init__(self, # Graph filtering dimNodeSignals, nFilterTaps, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimReadout, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nFilterTaps) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nFilterTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nFilterTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nFilterTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nFilterTaps # Filter taps self.E = GSO.shape[0] # Number of edge features if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.alpha = poolingSize self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean # Store the rest of the variables self.sigma = nonlinearity self.rho = poolingFunction self.dimReadout = dimReadout # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. gfl = [] # Graph Filtering Layers for l in range(self.L): #\\ Graph filtering stage: gfl.append(gml.GraphFilter(self.F[l], self.F[l+1], self.K[l], self.E, self.bias)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. gfl[3*l].addGSO(self.S) #\\ Nonlinearity gfl.append(self.sigma()) #\\ Pooling gfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 gfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.GFL = nn.Sequential(*gfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to readout anything # The first layer has to connect whatever was left of the graph # filtering stage to create the number of features required by # the readout layer fc.append(nn.Linear(self.F[-1], dimReadout[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done self.Readout = nn.Sequential(*fc) # so we finally have the architecture. def changeGSO(self, GSO, nSelectedNodes = [], poolingSize = []): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None # Reorder the new GSO self.S, self.order = self.permFunction(GSO) # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Before making decisions, check if there is a new poolingSize list if len(poolingSize) > 0: # Check it has the right length assert len(poolingSize) == self.L # And update it self.alpha = poolingSize # Now, check if we have a new list of nodes if len(nSelectedNodes) > 0: # If we do, then we need to change the pooling functions to select # less nodes. This would allow to use graphs of different size. # Note that the pooling function, there is nothing learnable, so # they can easily be re-made, re-initialized. # The first thing we need to check, is that the length of the # number of nodes is equal to the number of layers (this list # indicates the number of nodes selected at the output of each # layer) assert len(nSelectedNodes) == self.L # Then, update the N that we have stored self.N = [GSO.shape[1]] + nSelectedNodes # And get the new pooling functions for l in range(self.L): # For each layer, add the pooling function self.GFL[3*l+2] = self.rho(self.N[l], self.N[l+1], self.alpha[l]) self.GFL[3*l+2].addGSO(self.S) else: # Just update the GSO for l in range(self.L): self.GFL[3*l+2].addGSO(self.S) # And update in the LSIGF that is still missing for l in range(self.L): self.GFL[3*l].addGSO(self.S) # Graph convolutional layer def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer yGFL = self.GFL(x) # Change the order, for the readout y = yGFL.permute(0, 2, 1) # B x N[-1] x F[-1] # And, feed it into the Readout layer y = self.Readout(y) # B x N[-1] x dimReadout[-1] # Reshape and return return y.permute(0, 2, 1), yGFL # B x dimReadout[-1] x N[-1], B x dimFeatures[-1] x N[-1] def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def singleNodeForward(self, x, nodes): # x is of shape B x F[0] x N[-1] batchSize = x.shape[0] # nodes is either an int, or a list/np.array of ints of size B assert type(nodes) is int \ or type(nodes) is list \ or type(nodes) is np.ndarray # Let us start by building the selection matrix # This selection matrix has to be a matrix of shape # B x N[-1] x 1 # so that when multiplying with the output of the forward, we get a # B x dimRedout[-1] x 1 # and we just squeeze the last dimension # TODO: The big question here is if multiplying by a matrix is faster # than doing torch.index_select # Let's always work with numpy arrays to make it easier. if type(nodes) is int: # Change the node number to accommodate the new order nodes = self.order.index(nodes) # If it's int, make it a list and an array nodes = np.array([nodes], dtype=np.int) # And repeat for the number of batches nodes = np.tile(nodes, batchSize) if type(nodes) is list: newNodes = [self.order.index(n) for n in nodes] nodes = np.array(newNodes, dtype = np.int) elif type(nodes) is np.ndarray: newNodes = np.array([np.where(np.array(self.order) == n)[0][0] \ for n in nodes]) nodes = newNodes.astype(np.int) # Now, nodes is an np.int np.ndarray with shape batchSize # Build the selection matrix selectionMatrix = np.zeros([batchSize, self.N[-1], 1]) selectionMatrix[np.arange(batchSize), nodes, 0] = 1. # And convert it to a tensor selectionMatrix = torch.tensor(selectionMatrix, dtype = x.dtype, device = x.device) # Now compute the output y = self.forward(x) # This output is of size B x dimReadout[-1] x N[-1] # Multiply the output y = torch.matmul(y, selectionMatrix) # B x dimReadout[-1] x 1 # Squeeze the last dimension and return return y.squeeze(2) def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.GFL[3*l].addGSO(self.S) self.GFL[3*l+2].addGSO(self.S) class SpectralGNN(nn.Module): """ SpectralGNN: implement the selection GNN architecture using spectral filters Initialization: SpectralGNN(dimNodeSignals, nCoeff, bias, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, # Pooling dimLayersMLP, # MLP in the end GSO, order = None) # Structure Input: /** Graph convolutional layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nCoeff (list of int): number of coefficients on each layer; if nCoeff[l] is less than the size of the graph, the remaining coefficients are interpolated by means of a cubic spline. bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nCoeff[l] is the number of coefficients for the filters implemented at layer l+1, thus len(nCoeff) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layers **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Selection GNN architecture with the above specified characteristics, using filters in the spectral domain. Forward call: SpectralGNN(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] Other methods: .changeGSO(S, nSelectedNodes = [], poolingSize = []): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the SelectionGNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. The number of selected nodes and the pooling size will not change unless specifically consider those as input. Those lists need to have the same length as the number of layers. There is no need to define both, unless they change. >> Obs.: This will only work if both the original GSO and the new one have the same number of nodes. y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. """ def __init__(self, # Graph filtering dimNodeSignals, nCoeff, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nCoeff) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nCoeff) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nCoeff) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nCoeff) # Number of graph filtering layers self.F = dimNodeSignals # Features self.M = nCoeff # Filter taps self.E = GSO.shape[0] # Number of edge features if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize self.dimLayersMLP = dimLayersMLP # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. sgfl = [] # Graph Filtering Layers for l in range(self.L): #\\ Graph filtering stage: sgfl.append(gml.SpectralGF(self.F[l], self.F[l+1], self.M[l], self.E, self.bias)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. sgfl[3*l].addGSO(self.S) #\\ Nonlinearity sgfl.append(self.sigma()) #\\ Pooling sgfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 sgfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.SGFL = nn.Sequential(*sgfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def changeGSO(self, GSO, nSelectedNodes = [], poolingSize = []): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None # Reorder the new GSO self.S, self.order = self.permFunction(GSO) # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Before making decisions, check if there is a new poolingSize list if len(poolingSize) > 0: # Check it has the right length assert len(poolingSize) == self.L # And update it self.alpha = poolingSize # Now, check if we have a new list of nodes if len(nSelectedNodes) > 0: # If we do, then we need to change the pooling functions to select # less nodes. This would allow to use graphs of different size. # Note that the pooling function, there is nothing learnable, so # they can easily be re-made, re-initialized. # The first thing we need to check, is that the length of the # number of nodes is equal to the number of layers (this list # indicates the number of nodes selected at the output of each # layer) assert len(nSelectedNodes) == self.L # Then, update the N that we have stored self.N = [GSO.shape[1]] + nSelectedNodes # And get the new pooling functions for l in range(self.L): # For each layer, add the pooling function self.SGFL[3*l+2] = self.rho(self.N[l], self.N[l+1], self.alpha[l]) self.SGFL[3*l+2].addGSO(self.S) else: # Just update the GSO for l in range(self.L): self.SGFL[3*l+2].addGSO(self.S) # And update in the Spectral GF that is still missing for l in range(self.L): self.SGFL[3*l].addGSO(self.S) # Graph convolutional layer def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer y = self.SGFL(x) # Flatten the output yFlat = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(yFlat), y # If self.MLP is a sequential on an empty list it just does nothing. def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.SGFL[3*l].addGSO(self.S) self.SGFL[3*l+2].addGSO(self.S) class NodeVariantGNN(nn.Module): """ NodeVariantGNN: implement the selection GNN architecture using node variant graph filters Initialization: NodeVariantGNN(dimNodeSignals, nShiftTaps, nNodeTaps, bias, # Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, # Pooling dimLayersMLP, # MLP in the end GSO, order = None) # Structure Input: /** Graph filtering layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nShiftTaps (list of int): number of shift taps on each layer (i.e. information is gathered from up to the (nShiftTaps-1)-hop neighborhood) nNodeTaps (list of int): number of node taps on each layer; if nNodesTaps = nNodes, then each node has an independent coeff. bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nShiftTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nShiftTaps) = L. >> Obs.: The length of the nShiftTaps and nNodeTaps has to be the same, and every element of one list is associated with the corresponding one on the other list to create the appropriate NVGF filter at each layer. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Selection GNN architecture with the above specified characteristics, implementing node-variant graph filters. Forward call: NodeVariantGNN(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] Other methods: y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of graph filtering from the effect of the readout layer. """ def __init__(self, # Graph filtering dimNodeSignals, nShiftTaps, nNodeTaps, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than the number of # filter taps (because of the input number of features) assert len(dimNodeSignals) == len(nShiftTaps) + 1 # The length of the shift taps list should be equal to the length of the # node taps list assert len(nShiftTaps) == len(nNodeTaps) # nSelectedNodes should be a list of size nShiftTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nShiftTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nShiftTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nShiftTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nShiftTaps # Filter Shift taps self.M = nNodeTaps # Filter node taps self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize self.dimLayersMLP = dimLayersMLP # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. nvgfl = [] # Node Variant GF Layers for l in range(self.L): #\\ Graph filtering stage: nvgfl.append(gml.NodeVariantGF(self.F[l], self.F[l+1], self.K[l], self.M[l], self.E, self.bias)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. nvgfl[3*l].addGSO(self.S) #\\ Nonlinearity nvgfl.append(self.sigma()) #\\ Pooling nvgfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 nvgfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.NVGFL = nn.Sequential(*nvgfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer y= self.NVGFL(x) # Flatten the output yFlat = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(yFlat), y # If self.MLP is a sequential on an empty list it just does nothing. def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.NVGFL[3*l].addGSO(self.S) self.NVGFL[3*l+2].addGSO(self.S) class EdgeVariantGNN(nn.Module): """ EdgeVariantGNN: implement the selection GNN architecture using edge variant graph filters (through masking, not placement) Initialization: EdgeVariantGNN(dimNodeSignals, nShiftTaps, nFilterNodes, bias, nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, dimLayersMLP, # MLP in the end GSO, order = None) # Structure Input: /** Graph filtering layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nShiftTaps (list of int): number of shift taps on each layer (i.e. information is gathered from up to the (nShiftTaps-1)-hop neighborhood) nFilterNodes (list of int): number of nodes selected for the EV part of the hybrid EV filtering (recall that the first ones in the given permutation of S are the nodes selected; if any element in nFilterNodes is equal to the number of nodes, then we have a full edge-variant filter, not an hybrid one) bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nShiftTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nShiftTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Selection GNN architecture with the above specified characteristics, implementing edge-variant graph filters. Forward call: EdgeVariantGNN(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] Other methods: y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of graph filtering from the effect of the readout layer. """ def __init__(self, # Graph filtering dimNodeSignals, nShiftTaps, nFilterNodes, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than the number of # filter taps (because of the input number of features) assert len(dimNodeSignals) == len(nShiftTaps) + 1 # Filter nodes is a list of int with the number of nodes to select for # the EV part at each layer; it should have the same length as the # number of filter taps assert len(nFilterNodes) == len(nShiftTaps) # nSelectedNodes should be a list of size nShiftTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nShiftTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nShiftTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nShiftTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nShiftTaps # Filter Shift taps self.M = nFilterNodes self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize self.dimLayersMLP = dimLayersMLP # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. evgfl = [] # Node Variant GF Layers for l in range(self.L): #\\ Graph filtering stage: evgfl.append(gml.EdgeVariantGF(self.F[l], self.F[l+1], self.K[l], self.M[l], self.N[0], self.E, self.bias)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. evgfl[3*l].addGSO(self.S) #\\ Nonlinearity evgfl.append(self.sigma()) #\\ Pooling evgfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 evgfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.EVGFL = nn.Sequential(*evgfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer y= self.EVGFL(x) # Flatten the output yFlat = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(yFlat), y # If self.MLP is a sequential on an empty list it just does nothing. def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.EVGFL[3*l].addGSO(self.S) self.EVGFL[3*l+2].addGSO(self.S) class LocalEdgeNet(nn.Module): """ LocalEdgeNet: implements the selection GNN architecture with edge-variant graph filters and local operations only Initialization: LocalEdgeNet(dimNodeSignals, nShiftTaps, nFilterNodes, bias, nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, dimReadout, # Local readout layer GSO, order = None) # Structure Input: /** Graph filtering layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nShiftTaps (list of int): number of shift taps on each layer (i.e. information is gathered from up to the (nShiftTaps-1)-hop neighborhood) nFilterNodes (list of int): number of nodes selected for the EV part of the hybrid EV filtering (recall that the first ones in the given permutation of S are the nodes selected; if any element in nFilterNodes is equal to the number of nodes, then we have a full edge-variant filter, not an hybrid one) bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nShiftTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nShiftTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layers **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers applied locally at each node (i.e. no exchange of information involved). /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Selection GNN architecture with the above specified characteristics, implementing edge-variant graph filters. Forward call: LocalEdgeNet(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimReadout[-1] x nSelectedNodes[-1] Other methods: y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of graph filtering from the effect of the readout layer. y = .singleNodeForward(x, nodes): outputs the value of the last layer at a single node. x is the usual input of shape batchSize x dimFeatures x numberNodes. nodes is either a single node (int) or a collection of nodes (list or numpy.array) of length batchSize, where for each element in the batch, we get the output at the single specified node. The output y is of shape batchSize x dimReadout[-1]. """ def __init__(self, # Graph filtering dimNodeSignals, nShiftTaps, nFilterNodes, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimReadout, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than the number of # filter taps (because of the input number of features) assert len(dimNodeSignals) == len(nShiftTaps) + 1 # Filter nodes is a list of int with the number of nodes to select for # the EV part at each layer; it should have the same length as the # number of filter taps assert len(nFilterNodes) == len(nShiftTaps) # nSelectedNodes should be a list of size nShiftTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nShiftTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nShiftTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nShiftTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nShiftTaps # Filter Shift taps self.M = nFilterNodes self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize self.dimReadout = dimReadout # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. evgfl = [] # Node Variant GF Layers for l in range(self.L): #\\ Graph filtering stage: evgfl.append(gml.EdgeVariantGF(self.F[l], self.F[l+1], self.K[l], self.M[l], self.N[0], self.E, self.bias)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. evgfl[3*l].addGSO(self.S) #\\ Nonlinearity evgfl.append(self.sigma()) #\\ Pooling evgfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 evgfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.EVGFL = nn.Sequential(*evgfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to readout anything # The first layer has to connect whatever was left of the graph # filtering stage to create the number of features required by # the readout layer fc.append(nn.Linear(self.F[-1], dimReadout[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done self.Readout = nn.Sequential(*fc) # so we finally have the architecture. def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer yEVGFL = self.EVGFL(x) # Change the order, for the readout y = yEVGFL.permute(0, 2, 1) # B x N[-1] x F[-1] # And, feed it into the Readout layer y = self.Readout(y) # B x N[-1] x dimReadout[-1] # Reshape and return return y.permute(0, 2, 1), yEVGFL # B x dimReadout[-1] x N[-1], B x dimFeatures[-1] x N[-1] def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def singleNodeForward(self, x, nodes): # x is of shape B x F[0] x N[-1] batchSize = x.shape[0] # nodes is either an int, or a list/np.array of ints of size B assert type(nodes) is int \ or type(nodes) is list \ or type(nodes) is np.ndarray # Let us start by building the selection matrix # This selection matrix has to be a matrix of shape # B x N[-1] x 1 # so that when multiplying with the output of the forward, we get a # B x dimRedout[-1] x 1 # and we just squeeze the last dimension # TODO: The big question here is if multiplying by a matrix is faster # than doing torch.index_select # Let's always work with numpy arrays to make it easier. if type(nodes) is int: # Change the node number to accommodate the new order nodes = self.order.index(nodes) # If it's int, make it a list and an array nodes = np.array([nodes], dtype=np.int) # And repeat for the number of batches nodes = np.tile(nodes, batchSize) if type(nodes) is list: newNodes = [self.order.index(n) for n in nodes] nodes = np.array(newNodes, dtype = np.int) elif type(nodes) is np.ndarray: newNodes = np.array([np.where(np.array(self.order) == n)[0][0] \ for n in nodes]) nodes = newNodes.astype(np.int) # Now, nodes is an np.int np.ndarray with shape batchSize # Build the selection matrix selectionMatrix = np.zeros([batchSize, self.N[-1], 1]) selectionMatrix[np.arange(batchSize), nodes, 0] = 1. # And convert it to a tensor selectionMatrix = torch.tensor(selectionMatrix, dtype = x.dtype, device = x.device) # Now compute the output y = self.forward(x) # This output is of size B x dimReadout[-1] x N[-1] # Multiply the output y = torch.matmul(y, selectionMatrix) # B x dimReadout[-1] x 1 # Squeeze the last dimension and return return y.squeeze(2) def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.EVGFL[3*l].addGSO(self.S) self.EVGFL[3*l+2].addGSO(self.S) class ARMAfilterGNN(nn.Module): """ ARMAfilterGNN: implements the GNN architecture using ARMA graph filters by Jacobi's method. Initialization: ARMAfilterGNN(dimNodeSignals, nDenominatorTaps, nResidueTaps, bias, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, # Pooling dimLayersMLP, # MLP in the end GSO, order = None, tMax = 5) # Structure Input: /** Graph filtering layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nDenominatorTaps (list of int): number of filter taps in the denominator polynomial at each layer nResidueTaps (list of int): number of filter taps in the residue polynomial at each layer bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nResidueTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nResidueTaps) = L. Same holds for nDenominatorTaps. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array /** Method specifics **/ tMax (int): how many iterations in the Jacobi method (default: 5) Output: nn.Module with a Selection GNN architecture with the above specified characteristics, implementing ARMA filters. Forward call: ARMAfilterGNN(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the GNN with ARMA filters; shape: batchSize x dimLayersMLP[-1] Other methods: .changeGSO(S, nSelectedNodes = [], poolingSize = []): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the SelectionGNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. The number of selected nodes and the pooling size will not change unless specifically consider those as input. Those lists need to have the same length as the number of layers. There is no need to define both, unless they change. >> Obs.: The number of nodes in the GSOs need not be the same, but unless we want to risk zero-padding beyond the original number of nodes (which just results in disconnected nodes), then we might want to update the nSelectedNodes and poolingSize accordingly, if the size of the new GSO is different. y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of graph filtering from the effect of the readout layer. """ def __init__(self, # Graph filtering dimNodeSignals, nDenominatorTaps, nResidueTaps, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None, tMax = 5): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nDenominatorTaps) + 1 assert len(dimNodeSignals) == len(nResidueTaps) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nResidueTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nResidueTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nResidueTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.P = nDenominatorTaps # Denominator taps (order - 1) self.K = nResidueTaps # Residue taps (order - 1) self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean # Store the rest of the variables if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize self.dimLayersMLP = dimLayersMLP self.tMax = tMax # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. gfl = [] # Graph Filtering Layers for l in range(self.L): #\\ Graph filtering stage: gfl.append(gml.GraphFilterARMA(self.F[l], self.F[l+1], self.P[l], self.K[l], self.E, self.bias, self.tMax)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. gfl[3*l].addGSO(self.S) #\\ Nonlinearity gfl.append(self.sigma()) #\\ Pooling gfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 gfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.jARMA = nn.Sequential(*gfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def changeGSO(self, GSO, nSelectedNodes = [], poolingSize = []): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None # Reorder the new GSO self.S, self.order = self.permFunction(GSO) # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Before making decisions, check if there is a new poolingSize list if len(poolingSize) > 0: # Check it has the right length assert len(poolingSize) == self.L # And update it self.alpha = poolingSize # Now, check if we have a new list of nodes if len(nSelectedNodes) > 0: # If we do, then we need to change the pooling functions to select # less nodes. This would allow to use graphs of different size. # Note that the pooling function, there is nothing learnable, so # they can easily be re-made, re-initialized. # The first thing we need to check, is that the length of the # number of nodes is equal to the number of layers (this list # indicates the number of nodes selected at the output of each # layer) assert len(nSelectedNodes) == self.L # Then, update the N that we have stored self.N = [GSO.shape[1]] + nSelectedNodes # And get the new pooling functions for l in range(self.L): # For each layer, add the pooling function self.jARMA[3*l+2] = self.rho(self.N[l], self.N[l+1], self.alpha[l]) self.jARMA[3*l+2].addGSO(self.S) else: # Just update the GSO for l in range(self.L): self.jARMA[3*l+2].addGSO(self.S) # And update in the LSIGF that is still missing for l in range(self.L): self.jARMA[3*l].addGSO(self.S) # Graph convolutional layer def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer y= self.jARMA(x) # Flatten the output yFlat = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(yFlat), y # If self.MLP is a sequential on an empty list it just does nothing. def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.jARMA[3*l].addGSO(self.S) self.jARMA[3*l+2].addGSO(self.S) class LocalARMA(nn.Module): """ LocalARMA: implements the selection GNN architecture using ARMA graph filters by Jacobi's method and local operations only Initialization: LocalARMA(dimNodeSignals, nDenominatorTaps, nResidueTaps, bias, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, # Pooling dimReadout, # MLP in the end GSO, order = None, tMax = 5) # Structure Input: /** Graph filtering layers **/ dimNodeSignals (list of int): dimension of the signals at each layer (i.e. number of features at each node, or size of the vector supported at each node) nDenominatorTaps (list of int): number of filter taps in the denominator polynomial at each layer nResidueTaps (list of int): number of filter taps in the residue polynomial at each layer bias (bool): include bias after graph filter on every layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nResidueTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nResidueTaps) = L. Same holds for nDenominatorTaps. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers applied locally at each node (i.e. no exchange of information involved). /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array /** Method specifics **/ tMax (int): how many iterations in the Jacobi method (default: 5) Output: nn.Module with a Local GNN architecture with the above specified characteristics, implementing ARMA filters. Forward call: LocalARMA(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the GNN with ARMA filters; shape: batchSize x dimReadout[-1] x nSelectedNodes[-1] Other methods: .changeGSO(S, nSelectedNodes = [], poolingSize = []): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the SelectionGNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. The number of selected nodes and the pooling size will not change unless specifically consider those as input. Those lists need to have the same length as the number of layers. There is no need to define both, unless they change. >> Obs.: The number of nodes in the GSOs need not be the same, but unless we want to risk zero-padding beyond the original number of nodes (which just results in disconnected nodes), then we might want to update the nSelectedNodes and poolingSize accordingly, if the size of the new GSO is different. y, yGNN = .splitForward(x): gives the output of the entire GNN y, which is of shape batchSize x dimLayersMLP[-1], as well as the output of all the GNN layers (i.e. before the MLP layers), yGNN of shape batchSize x nSelectedNodes[-1] x dimFeatures[-1]. This can be used to isolate the effect of graph filtering from the effect of the readout layer. y = .singleNodeForward(x, nodes): outputs the value of the last layer at a single node. x is the usual input of shape batchSize x dimFeatures x numberNodes. nodes is either a single node (int) or a collection of nodes (list or numpy.array) of length batchSize, where for each element in the batch, we get the output at the single specified node. The output y is of shape batchSize x dimReadout[-1]. """ def __init__(self, # Graph filtering dimNodeSignals, nDenominatorTaps, nResidueTaps, bias, # Nonlinearity nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimReadout, # Structure GSO, order = None, tMax = 5): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nDenominatorTaps) + 1 assert len(dimNodeSignals) == len(nResidueTaps) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nResidueTaps) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nResidueTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nResidueTaps) # Number of graph filtering layers self.F = dimNodeSignals # Features self.P = nDenominatorTaps # Denominator taps (order - 1) self.K = nResidueTaps # Residue taps (order - 1) self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. self.bias = bias # Boolean # Store the rest of the variables if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize self.dimReadout = dimReadout self.tMax = tMax # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. gfl = [] # Graph Filtering Layers for l in range(self.L): #\\ Graph filtering stage: gfl.append(gml.GraphFilterARMA(self.F[l], self.F[l+1], self.P[l], self.K[l], self.E, self.bias, self.tMax)) # There is a 3*l below here, because we have three elements per # layer: graph filter, nonlinearity and pooling, so after each layer # we're actually adding elements to the (sequential) list. gfl[3*l].addGSO(self.S) #\\ Nonlinearity gfl.append(self.sigma()) #\\ Pooling gfl.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 3*l+2 gfl[3*l+2].addGSO(self.S) # And now feed them into the sequential self.jARMA = nn.Sequential(*gfl) # Graph Filtering Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to readout anything # The first layer has to connect whatever was left of the graph # filtering stage to create the number of features required by # the readout layer fc.append(nn.Linear(self.F[-1], dimReadout[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done self.Readout = nn.Sequential(*fc) # so we finally have the architecture. def changeGSO(self, GSO, nSelectedNodes = [], poolingSize = []): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None # Reorder the new GSO self.S, self.order = self.permFunction(GSO) # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Before making decisions, check if there is a new poolingSize list if len(poolingSize) > 0: # Check it has the right length assert len(poolingSize) == self.L # And update it self.alpha = poolingSize # Now, check if we have a new list of nodes if len(nSelectedNodes) > 0: # If we do, then we need to change the pooling functions to select # less nodes. This would allow to use graphs of different size. # Note that the pooling function, there is nothing learnable, so # they can easily be re-made, re-initialized. # The first thing we need to check, is that the length of the # number of nodes is equal to the number of layers (this list # indicates the number of nodes selected at the output of each # layer) assert len(nSelectedNodes) == self.L # Then, update the N that we have stored self.N = [GSO.shape[1]] + nSelectedNodes # And get the new pooling functions for l in range(self.L): # For each layer, add the pooling function self.jARMA[3*l+2] = self.rho(self.N[l], self.N[l+1], self.alpha[l]) self.jARMA[3*l+2].addGSO(self.S) else: # Just update the GSO for l in range(self.L): self.jARMA[3*l+2].addGSO(self.S) # And update in the LSIGF that is still missing for l in range(self.L): self.jARMA[3*l].addGSO(self.S) # Graph convolutional layer def splitForward(self, x): # Now we compute the forward call assert len(x.shape) == 3 assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph filtering layer yARMA = self.jARMA(x) # Change the order, for the readout y = yARMA.permute(0, 2, 1) # B x N[-1] x F[-1] # And, feed it into the Readout layer y = self.Readout(y) # B x N[-1] x dimReadout[-1] # Reshape and return return y.permute(0, 2, 1), yARMA # B x dimReadout[-1] x N[-1], B x dimFeatures[-1] x N[-1] def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def singleNodeForward(self, x, nodes): # x is of shape B x F[0] x N[-1] batchSize = x.shape[0] # nodes is either an int, or a list/np.array of ints of size B assert type(nodes) is int \ or type(nodes) is list \ or type(nodes) is np.ndarray # Let us start by building the selection matrix # This selection matrix has to be a matrix of shape # B x N[-1] x 1 # so that when multiplying with the output of the forward, we get a # B x dimRedout[-1] x 1 # and we just squeeze the last dimension # TODO: The big question here is if multiplying by a matrix is faster # than doing torch.index_select # Let's always work with numpy arrays to make it easier. if type(nodes) is int: # Change the node number to accommodate the new order nodes = self.order.index(nodes) # If it's int, make it a list and an array nodes = np.array([nodes], dtype=np.int) # And repeat for the number of batches nodes = np.tile(nodes, batchSize) if type(nodes) is list: newNodes = [self.order.index(n) for n in nodes] nodes = np.array(newNodes, dtype = np.int) elif type(nodes) is np.ndarray: newNodes = np.array([np.where(np.array(self.order) == n)[0][0] \ for n in nodes]) nodes = newNodes.astype(np.int) # Now, nodes is an np.int np.ndarray with shape batchSize # Build the selection matrix selectionMatrix = np.zeros([batchSize, self.N[-1], 1]) selectionMatrix[np.arange(batchSize), nodes, 0] = 1. # And convert it to a tensor selectionMatrix = torch.tensor(selectionMatrix, dtype = x.dtype, device = x.device) # Now compute the output y = self.forward(x) # This output is of size B x dimReadout[-1] x N[-1] # Multiply the output y = torch.matmul(y, selectionMatrix) # B x dimReadout[-1] x 1 # Squeeze the last dimension and return return y.squeeze(2) def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.jARMA[3*l].addGSO(self.S) self.jARMA[3*l+2].addGSO(self.S) class AggregationGNN(nn.Module): """ AggregationGNN: implement the aggregation GNN architecture Initialization: Input: /** Regular convolutional layers **/ dimFeatures (list of int): number of features on each layer nFilterTaps (list of int): number of filter taps on each layer bias (bool): include bias after graph filter on every layer >> Obs.: dimFeatures[0] is the number of features (the dimension of the node signals) of the data, where dimFeatures[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimFeatures) = L+1. Slightly different, nFilterTaps[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Pooling **/ poolingFunction (torch.nn): module from torch.nn pooling layers poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array maxN (int): maximum number of neighborhood exchanges (default: None) /** Multiple nodes options (weight sharing) **/ nNodes (int): number of nodes on which to compute the aggregation GNN (default: 1) dimLayersAggMLP (list of int): Once the information at each of the nNodes selected is processed, then they are aggregated together through this MLP (default: [] empty list); note that using this variable makes the architecture non-local. >> Obs.: The nodes selected to carry out the aggregation are those corresponding to the first elements in the provided GSO. >> Obs.: If dimLayersAggMLP = [], the output is of shape batchSize x numberOfFeatures x nNodes where the number of features is given by the last element of the dimLayersMLP list (or, if this list is empty, by the last number of the dimFeatures list). However, if nNodes = 1, then the output is of shape batchSize x numberOfFeatures since we understand that the output is expected to be a summary of the entire graph signal Output: nn.Module with an Aggregation GNN architecture with the above specified characteristics. Forward call: Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] (Obs.: if nNodes > 1 and dimLayersAggMLP = [], then the output is another graph signal of shape batchSize x dimLayersMLP[-1] x nNodes) """ def __init__(self, # Graph filtering dimFeatures, nFilterTaps, bias, # Nonlinearity nonlinearity, # Pooling poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None, maxN = None, # Multiple nodes options nNodes = 1, dimLayersAggMLP = []): super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimFeatures) == len(nFilterTaps) + 1 # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nFilterTaps) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nFilterTaps) # Number of convolutional layers self.F = dimFeatures # Features self.K = nFilterTaps # Filter taps self.E = GSO.shape[0] self.bias = bias # Boolean if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('alegnnss.utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. GSO, self.order = self.permFunction(GSO) # We need to keep GSO as the numpy version of the GSO and self.S as the # torch version; this is because many of the upcoming operations on the # GSO to define the structure are still in numpy. self.S = GSO.copy() if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity self.rho = poolingFunction self.alpha = poolingSize # This acts as both the kernel_size and the # stride, so there is no overlap on the elements over which we take # the maximum (this is how it works as default) self.dimLayersMLP = dimLayersMLP self.dimLayersAggMLP = dimLayersAggMLP self.nNodes = nNodes # Number of nodes on which to process the GNN # Maybe we don't want to aggregate information all the way to the end, # but up to some pre-specificed value maxN (for numerical reasons, # mostly) if maxN is None: self.maxN = GSO.shape[1] else: self.maxN = maxN if maxN < GSO.shape[1] else GSO.shape[1] # Let's also record the number of nodes on each layer (L+1, actually) self.N = [self.maxN] for l in range(self.L): # In pyTorch, the convolution is a valid correlation, instead of a # full one, which means that the output is smaller than the input. # Precisely, this smaller (check documentation for nn.conv1d) outConvN = self.N[l] - (self.K[l] - 1) # Size of the conv output # The next equation to compute the number of nodes is obtained from # the maxPool1d help in the pytorch documentation self.N += [int( (outConvN - (self.alpha[l]-1) - 1)/self.alpha[l] + 1 )] # int() on a float always applies floor() # Now, compute the necessary matrix. Recall that we want to build the # vector [[x]_{i}, [Sx]_{i}, [S^2x]_{i}, ..., [S^{N-1}x]_{i}] for the # first i=0,...,nNodes-1 elements. But instead of computing the powers # of S^k and then keeping the ith row, we will multiply S with a # [delta_i]_i = 1 and 0s elsewhere and keep each result in the row. delta = np.zeros([self.E, GSO.shape[1], self.nNodes]) # E x N x nNodes for n in range(self.nNodes): delta[:, n, n] = 1. # E x N x nNodes # And create the place where to store all of this SN = delta.copy().reshape([self.E, 1, GSO.shape[1], self.nNodes]) for k in range(1, self.maxN): delta = GSO @ delta # E x N x nNodes SN = np.concatenate((SN, delta\ .reshape([self.E, 1, GSO.shape[1], self.nNodes])), axis = 1) # E x k x N x nNodes # Now, we have constructed the matrix E x maxN x N x nNodes, but we want # is that signal, when multiplied by this matrix, constructs the vector # z for each of the nNodes. This vector z is a map between the N-vector # signal and a maxN-vector z, so we want to map N to maxN linearly, # multiplying by the left. Therefore, we want a N x maxN matrix. So we # reshape the dimensions SN = SN.transpose(3, 0, 2, 1) # nNodes x E x N x maxN # This matrix SN just needs to multiply the incoming x to obtain the # aggregated vector. And that's it. self.SN = torch.tensor(SN) # The idea to handle different features and different nodes with the # same 1D convolution is realizing that: for each edge feature E we need # a different filter, and for each node we need _the same_ convolutional # filters. Therefore, the different nNodes will go to increase the # batch size, while the edge features will go to increase the feature # space. And since different edge features increase the feature space # we need to consider them now. # And now, we're finally ready to create the architecture: #\\\ Graph filtering layers \\\ # OBS.: We could join this for with the one before, but we keep separate # for clarity of code. convl = [] # Convolutional Layers for l in range(self.L): #\\ Graph filtering stage: convl.append(nn.Conv1d(self.F[l]*self.E, self.F[l+1]*self.E, self.K[l], bias = self.bias)) #\\ Nonlinearity convl.append(self.sigma()) #\\ Pooling convl.append(self.rho(self.alpha[l])) # And now feed them into the sequential self.ConvLayers = nn.Sequential(*convl) # Convolutional layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.N[-1] * self.F[-1] * self.E # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done within each node self.MLP = nn.Sequential(*fc) # Now let's aggregate the information from all nodes aggfc = [] if len(self.dimLayersAggMLP) > 0: # If there's a final aggregation layer, then it will have to mix # the number of features of each of the nNodes. Note that these # number of features will be the output of the last layer of the # MLP if there was one, or the last number of features if not dimInputAggMLP = dimLayersMLP[-1] if len(dimLayersMLP) > 0 \ else self.N[-1] * self.F[-1] * self.E # This is the input dimension for each node. So now we need to # multiply this by the number of nodes aggfc.append(nn.Linear(dimInputAggMLP * nNodes, dimLayersAggMLP[0], bias = self.bias)) # And then, for the rest of the layers for l in range(len(dimLayersAggMLP)-1): # Add the nonlinearity aggfc.append(self.sigma()) # And the linear layer aggfc.append(nn.Linear(dimLayersAggMLP[l], dimLayersAggMLP[l+1], bias = self.bias)) # so we finally have the architecture. self.AggMLP = nn.Sequential(*aggfc) def forward(self, x): # Now we compute the forward call assert len(x.shape) == 3 B = x.shape[0] # batch size assert x.shape[1] == self.F[0] assert x.shape[2] == self.SN.shape[2] # Reorder x = x[:, :, self.order] # B x F x N # So, up to here, we have: # x of shape B x F x N F = x.shape[1] N = x.shape[2] # SN of shape nNodes x E x N x maxN nNodes = self.SN.shape[0] E = self.SN.shape[1] maxN = self.SN.shape[3] # We will consider a target shape of B x nNodes x E x F x N, so we adapt x = x.reshape([B, 1, 1, F, N]) SN = self.SN.reshape([1, nNodes, E, N, maxN]) # Let's do the aggregation step z = torch.matmul(x, SN) # B x nNodes x E x F x maxN # And now, we need to join dimension 0 and 1 (batch and nNodes), and # dimensions 2 and 3 (edge features and node features) before feeding # this into the convolution as a three-dimensional vector. # And since we always join the last dimensions z = z.permute(2, 3, 4, 0, 1).reshape([E, F, maxN, B * nNodes]) z = z.permute(3, 2, 0, 1).reshape([B * nNodes, maxN, E * F]) z = z.permute(0, 2, 1) # (B * nNodes) x (E * F) x maxN # Let's call the convolutional layers y = self.ConvLayers(z) # Flatten the output y = y.reshape([B * self.nNodes, self.F[-1] * self.N[-1] * self.E]) # And, feed it into the per node MLP y = self.MLP(y) # (B * nNodes) x dimLayersMLP[-1] # And now we have to unpack it back for every node y = y.permute(1, 0).reshape([y.shape[1], B, nNodes]).permute(1, 0, 2) # So that, so far, y is a graph signal (as expected) and as such, has # shape B x dimLayersMLP[-1] x nNodes # And now, if we have to aggregate one last time, this time we cannot # just feed it in the aggregator MLP, because if we're to do so, # we need to reshape, but if there's no aggregator, then the output # has to be the graph signal, so there's no need for a reshape if nNodes == 1 or len(self.dimLayersAggMLP) > 0: y = y.reshape([B, y.shape[1] * nNodes]) y = self.AggMLP(y) # And now we're done return y def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move to device the GSO and its related variables. self.S = self.S.to(device) self.SN = self.SN.to(device) class MultiNodeAggregationGNN(nn.Module): """ MultiNodeAggregationGNN: implement the multi-node aggregation GNN architecture Initialization: Input: /** External operation: Neighboring exchanges **/ nSelectedNodes (list of int): number of selected nodes on each outer layer nShifts (list of int): number of shifts to be done by the selected nodes on each outer layer /** Internal operation: Regular convolution **/ dimFeatures (list of list of int): the external list corresponds to the outer layers, the inner list to how many features to process on each inner layer (the aggregation GNN on each node) nFilterTaps (list of list of int): the external list corresponds to the outer layers, the inner list to how many filter taps to consider on each inner layer (the aggregation GNN on each node) bias (bool): include bias after graph filter on every layer /** Internal operation: Activation function **/ nonlinearity (torch.nn): module from torch.nn non-linear activations /** Internal operation: Pooling **/ poolingFunction (torch.nn): module from torch.nn pooling layers poolingSize (list of list of int): the external list corresponds to the outer layers, the inner list to the size of the neighborhood to compute the summary from at each inner layer (the aggregation GNN on each node) /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after all the outer layers have been computed; note that using this layer makes the whole architecture nonlinear. /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Multi-Node Aggregation GNN architecture with the above specified characteristics. Forward call: Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the multi-node aggregation GNN; shape: batchSize x dimLayersMLP[-1] Example: We want to create a Multi-Node GNN with two outer layers (i.e. two rounds of exchanging information on the graph). In the first round, we select 10 nodes, and in the following round, we select 5. Then, we need to determine how many shifts (how further away) we are going to move information around. In the first round (first outer layer) we shift around 4 times, and in the second round, we shift around 8 times (i.e. we get info from up to the 4-hop neighborhood in the first round, and 8-hop neighborhood in the secound round.) nSelectedNodes = [10, 5] nShifts = [4, 8] At this point, we have finished determining the outer structure (the one that involves exchanging information with neighbors). Now, we need to determine how to process the data within each node (the aggregation GNN that happens at each node). Since we have two outer layers, each of these parameters will be a list containing two lists. Each of these two lists determines the parameters to use to process internally the data. All nodes will use the same structure during each round. Say that we step inside a single node. We start with the signal received at the first outer layer (r=0), i.e., we have a signal of length nShifts[0] = 4. We want to process this signal with a two-layer CNN creating 3 and 6 features, respectively, using 2 filter taps, and with a ReLU nonlinearity in between and a max-pooling of size 2. This will just give an output with 6 features. This processing occurs at all nSelectedNodes[0] = 10 nodes. After the second round, we get a new signal, with 6 features, but of length nShifts[1] = 8 at each of the nSelectedNodes[1] = 5 nodes. Now we want to process it through a two-layer CNN with that creates 12 and 18 features, with filters of size 2, with ReLU nonlinearities (same as before) and a max pooling (same as before) of size 2. The setting is dimFeatures = [[1, 3, 6], [6, 12, 18]] nFilterTaps = [[2, 2], [2, 2]] nonlinearity = nn.ReLU poolingFunction = nn.MaxPool1d poolingSize = [[2, 2], [2, 2]] Recall that between the last convolutional layer (internal) and the output to be shared across nodes, there is an MLP layer adapting the number of features to the expected number of features of the next layer. Once we have all dimFeatures[-1][-1] = 18 features, collected at all nSelectedNodes[-1] = 5, we collect this information in a vector and feed it through two fully-connected layers of size 20 and 10. dimLayersMLP = [20, 10] """ def __init__(self, # Outer Structure nSelectedNodes, nShifts, # Inner Structure # Graph filtering dimFeatures, nFilterTaps, bias, # Nonlinearity nonlinearity, # Pooling poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Graph Structure GSO, order = None): # Initialize parent class super().__init__() # Check that we have an adequate GSO assert len(GSO.shape) == 2 or len(GSO.shape) == 3 # And create a third dimension if necessary if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N self.N = GSO.shape[1] # Store the number of nodes # Now, the interesting thing is that dimFeatures, nFilterTaps, and # poolingSize are all now lists of lists, and all of them need to have # the same length. self.R = len(nSelectedNodes) # Number of outer layers self.P = nSelectedNodes # Number of nodes selected on each outer layer # Check that the number of selected nodes does not exceed the number # of total nodes. # TODO: Should we consider that the number of nodes might not be # nonincreasing? for r in range(self.R): if self.P[r] > self.N: # If so, just force it to be the number of nodes. self.P[r] = self.N assert len(nShifts) == self.R self.Q = nShifts # Number of shifts of each node on each outer layer assert len(dimFeatures) == len(nFilterTaps) == self.R assert len(poolingSize) == self.R self.F = dimFeatures # List of lists containing the number of features # at each inner layer of each outer layer # Note that we have to add how many features we want in the ``last'' # AggGNN layer before going into the MLP layer. Here, I will just # mix in the number of last specified features, but there are a lot of # other options, like no MLP whatsoever at the end of each convolutional # layer. But, why not? # TODO: (This adds quite the number of parameters, it would be nice to # do some reasonable tests to check whether this MLPs are necessary or # not). self.F.append([dimFeatures[-1][-1]]) self.K = nFilterTaps # List of lists containing the number of filter # taps at each inner layer of each outer layer. self.bias = bias # Boolean to include bias or not self.sigma = nonlinearity # Pointwise nonlinear function to include on # each aggregation GNN self.rho = poolingFunction # To use on every aggregation GNN self.alpha = poolingSize # Pooling size on each aggregation GNN self.dimLayersMLP = dimLayersMLP # MLP for each inner aggregation GNN if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('alegnnss.utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. GSO, self.order = self.permFunction(GSO) # We need to keep GSO as the numpy version of the GSO and self.S as the # torch version; this is because many of the upcoming operations on the # GSO to define the structure are still in numpy. self.S = GSO.copy() if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) # Now that there are several things to do next: # - The AggregationGNN module always selects the first node, so if we # want to select the first R, then we have to reorder it ourselves # before adding the GSO to each AggregationGNN structure # - A regular python list does not register the parameters of the # corresponding nn.Module leading to bugs and issues on optimization. # For this the class nn.ModuleList() has been created. Unlike # nn.Sequential(), this class does not have a forward method, because # they are not supposed to act in a cascade way, just to keep track of # dynamically changing numbers of layers. # - Another interesting observation is that, preliminary experiments, # show that nn.ModuleList() is also capable of handling lists of # lists. And this is precisely what we need: the first element (the # outer one) corresponds to each outer layer, and each one of these # elements contains another list with the Aggregation GNNs # corresponding to the number of selected nodes on each outer layer. #\\\ Ordering: # So, let us start with the ordering. P (the number of selected nodes) # determines how many different orders we need (it's just rotating # the indices so that each one of those P is first) # The order will be a list of lists, the outer list having as many # elements as maximum of P. self.innerOrder = [list(range(self.N))] # This is the order for the # first selected nodes which is, clearly, the identity order maxP = max(self.P) # Maximum number of nodes to consider for p in range(1, maxP): allNodes = list(range(self.N)) # Create a list of all the nodes in # order. allNodes.remove(p) # Get rid of the element that we need to put # first thisOrder = [p] # Take the pth element, put it in a list thisOrder.extend(allNodes) # extend that list with all other nodes, except for the pth one. self.innerOrder.append(thisOrder) # Store this in the order list #\\\ Aggregation GNN stage: self.aggGNNmodules = nn.ModuleList() # List to hold the AggGNN modules # Create the inner modules for r in range(self.R): # Add the list of inner modules self.aggGNNmodules.append(nn.ModuleList()) # And start going through the inner modules for p in range(self.P[r]): thisGSO = GSO[:,self.innerOrder[p],:][:,:,self.innerOrder[p]] # # Reorder the GSO so that the selected node comes first and # is thus selected by the AggGNN module. # Create the AggGNN module: self.aggGNNmodules[r].append( AggregationGNN(self.F[r], self.K[r], self.bias, self.sigma, self.rho, self.alpha[r], # Now, the number of features in the # output of this AggregationGNN has to # be equal to the number of input # features required at the next AggGNN # layer. [self.F[r+1][0]], thisGSO, maxN = self.Q[r])) # And this should be it for the creation of the AggGNN layers of the # MultiNodeAggregationGNN architecture. We move onto one last MLP fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. dimInputMLP = self.P[-1] * self.F[-1][0] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigma()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def forward(self, x): # Now we compute the forward call # Check all relative dimensions assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0][0] assert x.shape[2] == self.N # Reorder x = x[:, :, self.order] # B x F x N # Create an empty vector to store the output of the AggGNN of each node y = torch.empty(0).to(x.device) # For each outer layer (except the last one, since in the last one we # do not have to zero-pad) for r in range(self.R-1): # For each node for p in range(self.P[r]): # Re-order the nodes so that the selected nodes goes first xReordered = x[:, :, self.innerOrder[p]] # Compute the output of each GNN thisOutput = self.aggGNNmodules[r][p](xReordered) # Add it to the corresponding nodes y = torch.cat((y,thisOutput.unsqueeze(2)), dim = 2) # After this, y is of size B x F x P[r], but if we need to keep # going for other outer layers, we need to zero-pad so that we can # keep shifting around on the original graph if y.shape[2] < self.N: # We zero-pad zeroPad = torch.zeros(batchSize, y.shape[1], self.N-y.shape[2]) zeroPad = zeroPad.type(y.dtype).to(y.device) # Save as x x = torch.cat((y, zeroPad), dim = 2) # and reset y y = torch.empty(0).to(x.device) # At this point, note that x (and, before, y) where in order: # the first elements corresponds to the first one in the # original ordering and so on. This means that the self.order # stored for the MultiNode still holds else: # We selected all nodes, so we do not need to zero-pad x = y # Save as x, and reset y y = torch.empty(0).to(x.device) # Last layer: we do not need to zero pad afterwards, so we just compute # the output of the GNN for each node and store that for p in range(self.P[-1]): xReordered = x[:, :, self.innerOrder[p]] thisOutput = self.aggGNNmodules[-1][p](xReordered) y = torch.cat((y,thisOutput.unsqueeze(2)), dim = 2) # Flatten the output y = y.reshape(batchSize, self.F[-1][-1] * self.P[-1]) # And, feed it into the MLP return self.MLP(y) # If self.MLP is a sequential on an empty list it just does nothing. def to(self, device): # First, we initialize as always. super().to(device) # And then, in particular, move each architecture (that it will # internally move the GSOs and neighbors and stuff) for r in range(self.R): for p in range(self.P[r]): self.aggGNNmodules[r][p].to(device) class GraphAttentionNetwork(nn.Module): """ GraphAttentionNetwork: implement the graph attention network architecture Initialization: GraphAttentionNetwork(dimNodeSignals, nAttentionHeads, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, dimLayersMLP, bias, # MLP in the end GSO, order = None) # Structure Input: /** Attention layers **/ dimNodeSignals (list of int): dimension of the signals at each layer nAttentionHeads (list of int): number of attention heads on each layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nAttentionHeads[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nAttentionHeads) = L. /** Activation function **/ nonlinearity (torch.nn.functional): function from module torch.nn.functional for non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied bias (bool): include bias after each MLP layer /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Graph Attention Network architecture with the above specified characteristics. Forward call: GraphAttentionNetwork(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] """ def __init__(self, # Graph attentional layer dimNodeSignals, nAttentionHeads, # Nonlinearity (nn.functional) nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, bias, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nAttentionHeads) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nAttentionHeads) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nAttentionHeads) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nAttentionHeads) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nAttentionHeads # Attention Heads self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity # This has to be a nn.functional instead of # just a nn self.rho = poolingFunction self.alpha = poolingSize self.dimLayersMLP = dimLayersMLP self.bias = bias # And now, we're finally ready to create the architecture: #\\\ Graph Attentional Layers \\\ # OBS.: The last layer has to have concatenate False, whereas the rest # have concatenate True. So we go all the way except for the last layer gat = [] # Graph Attentional Layers if self.L > 1: # First layer (this goes separate because there are not attention # heads increasing the number of features) #\\ Graph attention stage: gat.append(gml.GraphAttentional(self.F[0], self.F[1], self.K[0], self.E, self.sigma, True)) gat[0].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[0], self.N[1], self.alpha[0])) gat[1].addGSO(self.S) # All the next layers (attention heads appear): for l in range(1, self.L-1): #\\ Graph attention stage: gat.append(gml.GraphAttentional(self.F[l] * self.K[l-1], self.F[l+1], self.K[l], self.E, self.sigma, True)) # There is a 2*l below here, because we have two elements per # layer: graph filter and pooling, so after each layer # we're actually adding elements to the (sequential) list. gat[2*l].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 2*l+1 gat[2*l+1].addGSO(self.S) # And the last layer (set concatenate to False): #\\ Graph attention stage: gat.append(gml.GraphAttentional(self.F[self.L-1] * self.K[self.L-2], self.F[self.L], self.K[self.L-1], self.E, self.sigma, False)) gat[2* (self.L - 1)].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[self.L-1], self.N[self.L], self.alpha[self.L-1])) gat[2* (self.L - 1) +1].addGSO(self.S) else: # If there's only one layer, it just go straightforward, adding a # False to the concatenation and no increase in the input features # due to attention heads gat.append(gml.GraphAttentional(self.F[0], self.F[1], self.K[0], self.E, self.sigma, False)) gat[0].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[0], self.N[1], self.alpha[0])) gat[1].addGSO(self.S) # And now feed them into the sequential self.GAT = nn.Sequential(*gat) # Graph Attentional Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. # NOTE: Because sigma is a functional, instead of the layer, then # we need to pick up the layer for the MLP part. if str(self.sigma).find('relu') >= 0: self.sigmaMLP = nn.ReLU() elif str(self.sigma).find('tanh') >= 0: self.sigmaMLP = nn.Tanh() dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigmaMLP()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def forward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph attentional layers y = self.GAT(x) # Flatten the output y = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(y) # If self.MLP is a sequential on an empty list it just does nothing. def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.GAT[2*l].addGSO(self.S) self.GAT[2*l+1].addGSO(self.S) class GraphConvolutionAttentionNetwork(nn.Module): """ GraphConvolutionAttentionNetwork: implement the graph convolution attention network (GCAT) architecture Initialization: GraphConvolutionAttentionNetwork(dimNodeSignals, nFilterTaps, nAttentionHeads, bias, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, dimLayersMLP, # MLP in the end GSO, order = None) # Structure Input: /** Graph attention convolutional layers **/ dimNodeSignals (list of int): dimension of the signals at each layer nFilterTaps (list of int): number of filter taps on each layer nAttentionHeads (list of int): number of attention heads on each layer bias (bool): include bias after the graph filter on each layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nAttentionHeads[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nAttentionHeads) = L. Same for len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn.functional): function from module torch.nn.functional for non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with a Graph Convolutional Attention Network architecture with the above specified characteristics. Forward call: GraphConvolutionAttentionNetwork(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] """ def __init__(self, # Graph attentional layer dimNodeSignals, nFilterTaps, nAttentionHeads, bias, # Nonlinearity (nn.functional) nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilterTaps # and nAttentionHeads assert len(dimNodeSignals) == len(nFilterTaps) + 1 assert len(dimNodeSignals) == len(nAttentionHeads) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nAttentionHeads) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nAttentionHeads) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nAttentionHeads) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nFilterTaps # Number of filter taps self.P = nAttentionHeads # Attention Heads self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity # This has to be a nn.functional instead of # just a nn self.rho = poolingFunction self.alpha = poolingSize self.dimLayersMLP = dimLayersMLP self.bias = bias # And now, we're finally ready to create the architecture: #\\\ Graph Attentional Layers \\\ # OBS.: The last layer has to have concatenate False, whereas the rest # have concatenate True. So we go all the way except for the last layer gat = [] # Graph Attentional Layers if self.L > 1: # First layer (this goes separate because there are not attention # heads increasing the number of features) #\\ Graph attention stage: gat.append(gml.GraphFilterAttentional(self.F[0], self.F[1], self.K[0], self.P[0], self.E, self.bias, self.sigma, True)) gat[0].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[0], self.N[1], self.alpha[0])) gat[1].addGSO(self.S) # All the next layers (attention heads appear): for l in range(1, self.L-1): #\\ Graph attention stage: gat.append(gml.GraphFilterAttentional(self.F[l] * self.P[l-1], self.F[l+1], self.K[l], self.P[l], self.E, self.bias, self.sigma, True)) # There is a 2*l below here, because we have two elements per # layer: graph filter and pooling, so after each layer # we're actually adding elements to the (sequential) list. gat[2*l].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 2*l+1 gat[2*l+1].addGSO(self.S) # And the last layer (set concatenate to False): #\\ Graph attention stage: gat.append(gml.GraphFilterAttentional(self.F[self.L-1] \ * self.P[self.L-2], self.F[self.L], self.K[self.L-1], self.P[self.L-1], self.E, self.bias, self.sigma, False)) gat[2* (self.L - 1)].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[self.L-1], self.N[self.L], self.alpha[self.L-1])) gat[2* (self.L - 1) +1].addGSO(self.S) else: # If there's only one layer, it just go straightforward, adding a # False to the concatenation and no increase in the input features # due to attention heads gat.append(gml.GraphFilterAttentional(self.F[0], self.F[1], self.K[0], self.P[0], self.E, self.bias, self.sigma, False)) gat[0].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[0], self.N[1], self.alpha[0])) gat[1].addGSO(self.S) # And now feed them into the sequential self.GCAT = nn.Sequential(*gat) # Graph Attentional Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. # NOTE: Because sigma is a functional, instead of the layer, then # we need to pick up the layer for the MLP part. if str(self.sigma).find('relu') >= 0: self.sigmaMLP = nn.ReLU() elif str(self.sigma).find('tanh') >= 0: self.sigmaMLP = nn.Tanh() dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigmaMLP()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def forward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph attentional layers y = self.GCAT(x) # Flatten the output y = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(y) # If self.MLP is a sequential on an empty list it just does nothing. def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.GCAT[2*l].addGSO(self.S) self.GCAT[2*l+1].addGSO(self.S) class EdgeVariantAttention(nn.Module): """ EdgeVariantAttention: implement the edge variant graph filter, with coefficients learned following a parameterization given by the attention mechanism Initialization: EdgeVariantAttention(dimNodeSignals, nFilterTaps, nAttentionHeads, bias, # Graph Filtering nonlinearity, # Nonlinearity nSelectedNodes, poolingFunction, poolingSize, dimLayersMLP, # MLP in the end GSO, order = None) # Structure Input: /** Graph attention filtering layer **/ dimNodeSignals (list of int): dimension of the signals at each layer nFilterTaps (list of int): number of filter taps on each layer nAttentionHeads (list of int): number of attention heads on each layer bias (bool): include bias after the graph filter on each layer >> Obs.: dimNodeSignals[0] is the number of features (the dimension of the node signals) of the data, where dimNodeSignals[l] is the dimension obtained at the output of layer l, l=1,...,L. Therefore, for L layers, len(dimNodeSignals) = L+1. Slightly different, nAttentionHeads[l] is the number of filter taps for the filters implemented at layer l+1, thus len(nAttentionHeads) = L. Same for len(nFilterTaps) = L. /** Activation function **/ nonlinearity (torch.nn.functional): function from module torch.nn.functional for non-linear activations /** Pooling **/ nSelectedNodes (list of int): number of nodes to keep after pooling on each layer >> Obs.: The selected nodes are the first nSelectedNodes[l] starting from the first element in the order specified by the given GSO poolingFunction (nn.Module in Utils.graphML): summarizing function poolingSize (list of int): size of the neighborhood to compute the summary from at each layer /** Readout layer **/ dimLayersMLP (list of int): number of output hidden units of a sequence of fully connected layers after the graph filters have been applied /** Graph structure **/ GSO (np.array): graph shift operator of choice. order (string or None, default = None): determine the criteria to use when reordering the nodes (i.e. for pooling reasons); the string has to be such that there is a function named 'perm' + order in Utils.graphTools that takes as input the GSO and returns a new GSO ordered by the specified criteria and an order array Output: nn.Module with an edge variant graph filter whose coefficients are parameterized by an attention mechanism Forward call: EdgeVariantAttention(x) Input: x (torch.tensor): input data of shape batchSize x dimFeatures x numberNodes Output: y (torch.tensor): output data after being processed by the selection GNN; shape: batchSize x dimLayersMLP[-1] """ def __init__(self, # Graph attentional layer dimNodeSignals, nFilterTaps, nAttentionHeads, bias, # Nonlinearity (nn.functional) nonlinearity, # Pooling nSelectedNodes, poolingFunction, poolingSize, # MLP in the end dimLayersMLP, # Structure GSO, order = None): # Initialize parent: super().__init__() # dimNodeSignals should be a list and of size 1 more than nFilter taps. assert len(dimNodeSignals) == len(nFilterTaps) + 1 assert len(dimNodeSignals) == len(nAttentionHeads) + 1 # nSelectedNodes should be a list of size nFilterTaps, since the number # of nodes in the first layer is always the size of the graph assert len(nSelectedNodes) == len(nAttentionHeads) # poolingSize also has to be a list of the same size assert len(poolingSize) == len(nAttentionHeads) # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Store the values (using the notation in the paper): self.L = len(nAttentionHeads) # Number of graph filtering layers self.F = dimNodeSignals # Features self.K = nFilterTaps # Filter taps self.P = nAttentionHeads # Attention Heads self.E = GSO.shape[0] # Number of edge features self.N = [GSO.shape[1]] + nSelectedNodes # Number of nodes # See that we adding N_{0} = N as the number of nodes input the first # layer: this above is the list containing how many nodes are between # each layer. if order is not None: # If there's going to be reordering, then the value of the # permutation function will be given by the criteria in # self.reorder. For instance, if self.reorder = 'Degree', then # we end up calling the function Utils.graphTools.permDegree. # We need to be sure that the function 'perm' + self.reorder # is available in the Utils.graphTools module. self.permFunction = eval('Utils.graphTools.perm' + order) else: self.permFunction = alegnnss.utils.graphTools.permIdentity # This is overriden if coarsening is selected, since the ordering # function is native to that pooling method. self.S, self.order = self.permFunction(GSO) if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) self.sigma = nonlinearity # This has to be a nn.functional instead of # just a nn self.rho = poolingFunction self.alpha = poolingSize self.dimLayersMLP = dimLayersMLP self.bias = bias # And now, we're finally ready to create the architecture: #\\\ Graph Attentional Layers \\\ # OBS.: The last layer has to have concatenate False, whereas the rest # have concatenate True. So we go all the way except for the last layer gat = [] # Graph Attentional Layers if self.L > 1: # First layer (this goes separate because there are not attention # heads increasing the number of features) #\\ Graph attention stage: gat.append(gml.EdgeVariantAttentional(self.F[0], self.F[1], self.K[0], self.P[0], self.E, self.bias, self.sigma, True)) gat[0].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[0], self.N[1], self.alpha[0])) gat[1].addGSO(self.S) # All the next layers (attention heads appear): for l in range(1, self.L-1): #\\ Graph attention stage: gat.append(gml.EdgeVariantAttentional(self.F[l]*self.P[l-1], self.F[l+1], self.K[l], self.P[l], self.E, self.bias, self.sigma, True)) # There is a 2*l below here, because we have two elements per # layer: graph filter and pooling, so after each layer # we're actually adding elements to the (sequential) list. gat[2*l].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[l], self.N[l+1], self.alpha[l])) # Same as before, this is 2*l+1 gat[2*l+1].addGSO(self.S) # And the last layer (set concatenate to False): #\\ Graph attention stage: gat.append(gml.EdgeVariantAttentional(self.F[self.L-1] \ * self.K[self.L-2], self.F[self.L], self.K[self.L-1], self.P[self.L-1], self.E, self.bias, self.sigma, False)) gat[2* (self.L - 1)].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[self.L-1], self.N[self.L], self.alpha[self.L-1])) gat[2* (self.L - 1) +1].addGSO(self.S) else: # If there's only one layer, it just go straightforward, adding a # False to the concatenation and no increase in the input features # due to attention heads gat.append(gml.EdgeVariantAttentional(self.F[0], self.F[1], self.K[0], self.P[0], self.E, self.bias, self.sigma, False)) gat[0].addGSO(self.S) #\\ Pooling gat.append(self.rho(self.N[0], self.N[1], self.alpha[0])) gat[1].addGSO(self.S) # And now feed them into the sequential self.EVGAT = nn.Sequential(*gat) # Graph Attentional Layers #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimLayersMLP) > 0: # Maybe we don't want to MLP anything # The first layer has to connect whatever was left of the graph # signal, flattened. # NOTE: Because sigma is a functional, instead of the layer, then # we need to pick up the layer for the MLP part. if str(self.sigma).find('relu') >= 0: self.sigmaMLP = nn.ReLU() elif str(self.sigma).find('tanh') >= 0: self.sigmaMLP = nn.Tanh() dimInputMLP = self.N[-1] * self.F[-1] # (i.e., we have N[-1] nodes left, each one described by F[-1] # features which means this will be flattened into a vector of size # N[-1]*F[-1]) fc.append(nn.Linear(dimInputMLP, dimLayersMLP[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimLayersMLP)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.sigmaMLP()) # And add the linear layer fc.append(nn.Linear(dimLayersMLP[l], dimLayersMLP[l+1], bias = self.bias)) # And we're done self.MLP = nn.Sequential(*fc) # so we finally have the architecture. def forward(self, x): # Now we compute the forward call assert len(x.shape) == 3 batchSize = x.shape[0] assert x.shape[1] == self.F[0] assert x.shape[2] == self.N[0] # Reorder x = x[:, :, self.order] # B x F x N # Let's call the graph attentional layers y = self.EVGAT(x) # Flatten the output y = y.reshape(batchSize, self.F[-1] * self.N[-1]) # And, feed it into the MLP return self.MLP(y) # If self.MLP is a sequential on an empty list it just does nothing. def to(self, device): # Because only the filter taps and the weights are registered as # parameters, when we do a .to(device) operation it does not move the # GSOs. So we need to move them ourselves. # Call the parent .to() method (to move the registered parameters) super().to(device) # Move the GSO self.S = self.S.to(device) # And all the other variables derived from it. for l in range(self.L): self.EVGAT[2*l].addGSO(self.S) self.EVGAT[2*l+1].addGSO(self.S) class GraphRecurrentNN(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/03/04 """ GraphRecurrentNN: implements the GRNN architecture. It is a single-layer GRNN and the hidden state is initialized at random drawing from a standard gaussian. Initialization: GraphRecurrentNN(dimInputSignals, dimOutputSignals, dimHiddenSignals, nFilterTaps, bias, # Filtering nonlinearityHidden, nonlinearityOutput, nonlinearityReadout, # Nonlinearities dimReadout, # Local readout layer dimEdgeFeatures, # Structure GSO) Input: /** Graph convolutions **/ dimInputSignals (int): dimension of the input signals dimOutputSignals (int): dimension of the output signals dimHiddenSignals (int): dimension of the hidden state nFilterTaps (list of int): a list with two elements, the first one is the number of filter taps for the filters in the hidden state equation, the second one is the number of filter taps for the filters in the output bias (bool): include bias after graph filter on every layer /** Activation functions **/ nonlinearityHidden (torch.function): the nonlinearity to apply when computing the hidden state; it has to be a torch function, not a nn.Module nonlinearityOutput (torch.function): the nonlinearity to apply when computing the output signal; it has to be a torch function, not a nn.Module. nonlinearityReadout (nn.Module): the nonlinearity to apply at the end of the readout layer (if the readout layer has more than one layer); this one has to be a nn.Module, instead of just a torch function. /** Readout layer **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers applied locally at each node (i.e. no exchange of information involved). /** Graph structure **/ dimEdgeFeatures (int): number of edge features GSO (np.array): graph shift operator of choice. Output: nn.Module with a GRNN architecture with the above specified characteristics Forward call: GraphRecurrentNN(x) Input: x (torch.tensor): input data of shape batchSize x timeSamples x dimInputSignals x numberNodes Output: y (torch.tensor): output data after being processed by the GRNN; batchSize x timeSamples x dimReadout[-1] x numberNodes Other methods: y, yGNN = .splitForward(x): gives the output of the entire GRNN y, which has shape batchSize x timeSamples x dimReadout[-1] x numberNodes, as well as the output of the GRNN (i.e. before the readout layers), yGNN of shape batchSize x timeSamples x dimInputSignals x numberNodes. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. y = .singleNodeForward(x, nodes): outputs the value of the last layer at a single node. x is the usual input of shape batchSize x timeSamples x dimInputSignals x numberNodes. nodes is either a single node (int) or a collection of nodes (list or numpy.array) of length batchSize, where for each element in the batch, we get the output at the single specified node. The output y is of shape batchSize x timeSamples x dimReadout[-1]. .changeGSO(S): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the GraphRecurrentNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. >> Obs.: The number of nodes in the GSOs need not be the same. """ def __init__(self, # Graph filtering dimInputSignals, dimOutputSignals, dimHiddenSignals, nFilterTaps, bias, # Nonlinearities nonlinearityHidden, nonlinearityOutput, nonlinearityReadout, # nn.Module # Local MLP in the end dimReadout, # Structure GSO): # Initialize parent: super().__init__() # A list of two int, one for the number of filter taps (the computation # of the hidden state has the same number of filter taps) assert len(nFilterTaps) == 2 # Store the values (using the notation in the paper): self.F = dimInputSignals # Number of input features self.G = dimOutputSignals # Number of output features self.H = dimHiddenSignals # NUmber of hidden features self.K = nFilterTaps # Filter taps self.bias = bias # Boolean # Store the rest of the variables self.sigma = nonlinearityHidden self.rho = nonlinearityOutput self.nonlinearityReadout = nonlinearityReadout self.dimReadout = dimReadout # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N self.E = GSO.shape[0] # Number of edge features self.N = GSO.shape[1] self.S = GSO if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) #\\\ Hidden State RNN \\\ # Create the layer that generates the hidden state, and generate z0 self.hiddenState = gml.HiddenState(self.F, self.H, self.K[0], nonlinearity = self.sigma, E = self.E, bias = self.bias) #\\\ Output Graph Filters \\\ self.outputState = gml.GraphFilter(self.H, self.G, self.K[1], E = self.E, bias = self.bias) # Add the GSO for each graph filter self.hiddenState.addGSO(self.S) self.outputState.addGSO(self.S) #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to readout anything # The first layer has to connect whatever was left of the graph # filtering stage to create the number of features required by # the readout layer fc.append(nn.Linear(self.G, dimReadout[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.nonlinearityReadout()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done self.Readout = nn.Sequential(*fc) # so we finally have the architecture. def splitForward(self, x): # Check the dimensions of the input # S: E x N x N # x: B x T x F[0] x N assert len(self.S.shape) == 3 assert self.S.shape[0] == self.E N = self.S.shape[1] assert self.S.shape[2] == N assert len(x.shape) == 4 B = x.shape[0] T = x.shape[1] assert x.shape[2] == self.F assert x.shape[3] == N # This can be generated here or generated outside of here, not clear yet # what's the most coherent option z0 = torch.randn((B, self.H, N), device = x.device) # Compute the trajectory of hidden states z, _ = self.hiddenState(x, z0) z = z.reshape((B*T,self.H,N)) # Compute the output trajectory from the hidden states yOut = self.outputState(z) yOut = self.rho(yOut) # Don't forget the nonlinearity! yOut = yOut.reshape((B,T,self.G,N)) # B x T x G x N # Change the order, for the readout y = yOut.permute(0, 1, 3, 2) # B x T x N x G # And, feed it into the Readout layer y = self.Readout(y) # B x T x N x dimReadout[-1] # Reshape and return return y.permute(0, 1, 3, 2), yOut # B x T x dimReadout[-1] x N, B x T x dimFeatures[-1] x N def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def singleNodeForward(self, x, nodes): # x is of shape B x T x F[0] x N batchSize = x.shape[0] N = x.shape[3] # nodes is either an int, or a list/np.array of ints of size B assert type(nodes) is int \ or type(nodes) is list \ or type(nodes) is np.ndarray # Let us start by building the selection matrix # This selection matrix has to be a matrix of shape # B x 1 x N[-1] x 1 # so that when multiplying with the output of the forward, we get a # B x T x dimRedout[-1] x 1 # and we just squeeze the last dimension # TODO: The big question here is if multiplying by a matrix is faster # than doing torch.index_select # Let's always work with numpy arrays to make it easier. if type(nodes) is int: # Change the node number to accommodate the new order nodes = self.order.index(nodes) # If it's int, make it a list and an array nodes = np.array([nodes], dtype=np.int) # And repeat for the number of batches nodes = np.tile(nodes, batchSize) if type(nodes) is list: newNodes = [self.order.index(n) for n in nodes] nodes = np.array(newNodes, dtype = np.int) elif type(nodes) is np.ndarray: newNodes = np.array([np.where(np.array(self.order) == n)[0][0] \ for n in nodes]) nodes = newNodes.astype(np.int) # Now, nodes is an np.int np.ndarray with shape batchSize # Build the selection matrix selectionMatrix = np.zeros([batchSize, 1, N, 1]) selectionMatrix[np.arange(batchSize), nodes, 0] = 1. # And convert it to a tensor selectionMatrix = torch.tensor(selectionMatrix, dtype = x.dtype, device = x.device) # Now compute the output y = self.forward(x) # This output is of size B x T x dimReadout[-1] x N # Multiply the output y = torch.matmul(y, selectionMatrix) # B x T x dimReadout[-1] x 1 # Squeeze the last dimension and return return y.squeeze(3) def changeGSO(self, GSO): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None self.S = GSO # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Add the GSO for each graph filter self.hiddenState.addGSO(self.S) self.outputState.addGSO(self.S) class GatedGraphRecurrentNN(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/03/04 """ GatedGraphRecurrentNN: implements the (time, node, edge)-gated GRNN architecture. It is a single-layer gated GRNN and the hidden state is initialized at random drawing from a standard gaussian. Initialization: GatedGraphRecurrentNN(dimInputSignals, dimOutputSignals, dimHiddenSignals, nFilterTaps, bias, # Filtering nonlinearityHidden, nonlinearityOutput, nonlinearityReadout, # Nonlinearities dimReadout, # Local readout layer dimEdgeFeatures, GSO, # Structure gateType) # Gating Input: /** Graph convolutions **/ dimInputSignals (int): dimension of the input signals dimOutputSignals (int): dimension of the output signals dimHiddenSignals (int): dimension of the hidden state nFilterTaps (list of int): a list with two elements, the first one is the number of filter taps for the filters in the hidden state equation, the second one is the number of filter taps for the filters in the output bias (bool): include bias after graph filter on every layer /** Activation functions **/ nonlinearityHidden (torch.function): the nonlinearity to apply when computing the hidden state; it has to be a torch function, not a nn.Module nonlinearityOutput (torch.function): the nonlinearity to apply when computing the output signal; it has to be a torch function, not a nn.Module. nonlinearityReadout (nn.Module): the nonlinearity to apply at the end of the readout layer (if the readout layer has more than one layer); this one has to be a nn.Module, instead of just a torch function. /** Readout layer **/ dimReadout (list of int): number of output hidden units of a sequence of fully connected layers applied locally at each node (i.e. no exchange of information involved). /** Graph structure **/ dimEdgeFeatures (int): number of edge features GSO (np.array): graph shift operator of choice. /** Gating **/ gateType (string): 'time', 'node' or 'edge' gating Output: nn.Module with a gated GRNN architecture with the above specified characteristics Forward call: GatedGraphRecurrentNN(x) Input: x (torch.tensor): input data of shape batchSize x timeSamples x dimInputSignals x numberNodes Output: y (torch.tensor): output data after being processed by the GRNN; batchSize x timeSamples x dimReadout[-1] x numberNodes Other methods: y, yGNN = .splitForward(x): gives the output of the entire GRNN y, which has shape batchSize x timeSamples x dimReadout[-1] x numberNodes, as well as the output of the GRNN (i.e. before the readout layers), yGNN of shape batchSize x timeSamples x dimInputSignals x numberNodes. This can be used to isolate the effect of the graph convolutions from the effect of the readout layer. y = .singleNodeForward(x, nodes): outputs the value of the last layer at a single node. x is the usual input of shape batchSize x timeSamples x dimInputSignals x numberNodes. nodes is either a single node (int) or a collection of nodes (list or numpy.array) of length batchSize, where for each element in the batch, we get the output at the single specified node. The output y is of shape batchSize x timeSamples x dimReadout[-1]. .changeGSO(S): takes as input a new graph shift operator S as a tensor of shape (dimEdgeFeatures x) numberNodes x numberNodes Then, next time the GatedGraphRecurrentNN is run, it will run over the graph with GSO S, instead of running over the original GSO S. This is particularly useful when training on one graph, and testing on another one. >> Obs.: The number of nodes in the GSOs need not be the same. """ def __init__(self, # Graph filtering dimInputSignals, dimOutputSignals, dimHiddenSignals, nFilterTaps, bias, # Nonlinearities nonlinearityHidden, nonlinearityOutput, nonlinearityReadout, # nn.Module # Local MLP in the end dimReadout, # Structure GSO, # Gating gateType): # Initialize parent: super().__init__() # A list of two int, one for the number of filter taps (the computation # of the hidden state has the same number of filter taps) assert len(nFilterTaps) == 2 # Store the values (using the notation in the paper): self.F = dimInputSignals # Number of input features self.G = dimOutputSignals # Number of output features self.H = dimHiddenSignals # NUmber of hidden features self.K = nFilterTaps # Filter taps self.bias = bias # Boolean # Store the rest of the variables self.sigma = nonlinearityHidden self.rho = nonlinearityOutput self.nonlinearityReadout = nonlinearityReadout self.dimReadout = dimReadout # Check whether the GSO has features or not. After that, always handle # it as a matrix of dimension E x N x N. assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N self.E = GSO.shape[0] # Number of edge features self.N = GSO.shape[1] self.S = GSO if 'torch' not in repr(self.S.dtype): self.S = torch.tensor(self.S) #\\\ Hidden State RNN \\\ # Create the layer that generates the hidden state, and generate z0 # The type of hidden layer depends on the type of gating assert gateType == 'time' or gateType == 'node' or gateType == 'edge' if gateType == 'time': self.hiddenState = gml.TimeGatedHiddenState(self.F, self.H, self.K[0], nonlinearity = self.sigma, E = self.E, bias = self.bias) elif gateType == 'node': self.hiddenState = gml.NodeGatedHiddenState(self.F, self.H, self.K[0], nonlinearity = self.sigma, E = self.E, bias = self.bias) elif gateType == 'edge': self.hiddenState = gml.EdgeGatedHiddenState(self.F, self.H, self.K[0], nonlinearity = self.sigma, E = self.E, bias = self.bias) #\\\ Output Graph Filters \\\ self.outputState = gml.GraphFilter(self.H, self.G, self.K[1], E = self.E, bias = self.bias) # Add the GSO for each graph filter self.hiddenState.addGSO(self.S) self.outputState.addGSO(self.S) #\\\ MLP (Fully Connected Layers) \\\ fc = [] if len(self.dimReadout) > 0: # Maybe we don't want to readout anything # The first layer has to connect whatever was left of the graph # filtering stage to create the number of features required by # the readout layer fc.append(nn.Linear(self.G, dimReadout[0], bias = self.bias)) # The last linear layer cannot be followed by nonlinearity, because # usually, this nonlinearity depends on the loss function (for # instance, if we have a classification problem, this nonlinearity # is already handled by the cross entropy loss or we add a softmax.) for l in range(len(dimReadout)-1): # Add the nonlinearity because there's another linear layer # coming fc.append(self.nonlinearityReadout()) # And add the linear layer fc.append(nn.Linear(dimReadout[l], dimReadout[l+1], bias = self.bias)) # And we're done self.Readout = nn.Sequential(*fc) # so we finally have the architecture. def splitForward(self, x): # Check the dimensions of the input # S: E x N x N # x: B x T x F[0] x N assert len(self.S.shape) == 3 assert self.S.shape[0] == self.E N = self.S.shape[1] assert self.S.shape[2] == N assert len(x.shape) == 4 B = x.shape[0] T = x.shape[1] assert x.shape[2] == self.F assert x.shape[3] == N # This can be generated here or generated outside of here, not clear yet # what's the most coherent option z0 = torch.randn((B, self.H, N), device = x.device) # Compute the trajectory of hidden states z, _ = self.hiddenState(x, z0) z = z.reshape((B*T,self.H,N)) # Compute the output trajectory from the hidden states yOut = self.outputState(z) yOut = self.rho(yOut) # Don't forget the nonlinearity! yOut = yOut.reshape((B,T,self.G,N)) # B x T x G x N # Change the order, for the readout y = yOut.permute(0, 1, 3, 2) # B x T x N x G # And, feed it into the Readout layer y = self.Readout(y) # B x T x N x dimReadout[-1] # Reshape and return return y.permute(0, 1, 3, 2), yOut # B x T x dimReadout[-1] x N, B x T x dimFeatures[-1] x N def forward(self, x): # Most of the times, we just need the actual, last output. But, since in # this case, we also want to compare with the output of the GNN itself, # we need to create this other forward funciton that takes both outputs # (the GNN and the MLP) and returns only the MLP output in the proper # forward function. output, _ = self.splitForward(x) return output def singleNodeForward(self, x, nodes): # x is of shape B x T x F[0] x N batchSize = x.shape[0] N = x.shape[3] # nodes is either an int, or a list/np.array of ints of size B assert type(nodes) is int \ or type(nodes) is list \ or type(nodes) is np.ndarray # Let us start by building the selection matrix # This selection matrix has to be a matrix of shape # B x 1 x N[-1] x 1 # so that when multiplying with the output of the forward, we get a # B x T x dimRedout[-1] x 1 # and we just squeeze the last dimension # TODO: The big question here is if multiplying by a matrix is faster # than doing torch.index_select # Let's always work with numpy arrays to make it easier. if type(nodes) is int: # Change the node number to accommodate the new order nodes = self.order.index(nodes) # If it's int, make it a list and an array nodes = np.array([nodes], dtype=np.int) # And repeat for the number of batches nodes = np.tile(nodes, batchSize) if type(nodes) is list: newNodes = [self.order.index(n) for n in nodes] nodes = np.array(newNodes, dtype = np.int) elif type(nodes) is np.ndarray: newNodes = np.array([np.where(np.array(self.order) == n)[0][0] \ for n in nodes]) nodes = newNodes.astype(np.int) # Now, nodes is an np.int np.ndarray with shape batchSize # Build the selection matrix selectionMatrix = np.zeros([batchSize, 1, N, 1]) selectionMatrix[np.arange(batchSize), nodes, 0] = 1. # And convert it to a tensor selectionMatrix = torch.tensor(selectionMatrix, dtype = x.dtype, device = x.device) # Now compute the output y = self.forward(x) # This output is of size B x T x dimReadout[-1] x N # Multiply the output y = torch.matmul(y, selectionMatrix) # B x T x dimReadout[-1] x 1 # Squeeze the last dimension and return return y.squeeze(3) def changeGSO(self, GSO): # We use this to change the GSO, using the same graph filters. # Check that the new GSO has the correct assert len(GSO.shape) == 2 or len(GSO.shape) == 3 if len(GSO.shape) == 2: assert GSO.shape[0] == GSO.shape[1] GSO = GSO.reshape([1, GSO.shape[0], GSO.shape[1]]) # 1 x N x N else: assert GSO.shape[1] == GSO.shape[2] # E x N x N # Get dataType and device of the current GSO, so when we replace it, it # is still located in the same type and the same device. dataType = self.S.dtype if 'device' in dir(self.S): device = self.S.device else: device = None self.S = GSO # Change data type and device as required self.S = changeDataType(self.S, dataType) if device is not None: self.S = self.S.to(device) # Add the GSO for each graph filter self.hiddenState.addGSO(self.S) self.outputState.addGSO(self.S)
245,270
48.201805
88
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/modules/__init__.py
0
0
0
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/modules/evaluation.py
# 2020/02/25~ # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu """ evaluation.py Evaluation Module Methods for evaluating the models. evaluate: evaluate a model evaluateSingleNode: evaluate a model that has a single node forward evaluateFlocking: evaluate a model using the flocking cost """ import os import torch import pickle def evaluate(model, data, **kwargs): """ evaluate: evaluate a model using classification error Input: model (model class): class from Modules.model data (data class): a data class from the Utils.dataTools; it needs to have a getSamples method and an evaluate method. doPrint (optional, bool): if True prints results Output: evalVars (dict): 'errorBest' contains the error rate for the best model, and 'errorLast' contains the error rate for the last model """ # Get the device we're working on device = model.device if 'doSaveVars' in kwargs.keys(): doSaveVars = kwargs['doSaveVars'] else: doSaveVars = True ######## # DATA # ######## xTest, yTest = data.getSamples('test') xTest = xTest.to(device) yTest = yTest.to(device) ############## # BEST MODEL # ############## model.load(label = 'Best') with torch.no_grad(): # Process the samples yHatTest = model.archit(xTest) # yHatTest is of shape # testSize x numberOfClasses # We compute the error costBest = data.evaluate(yHatTest, yTest) ############## # LAST MODEL # ############## model.load(label = 'Last') with torch.no_grad(): # Process the samples yHatTest = model.archit(xTest) # yHatTest is of shape # testSize x numberOfClasses # We compute the error costLast = data.evaluate(yHatTest, yTest) evalVars = {} evalVars['costBest'] = costBest.item() evalVars['costLast'] = costLast.item() if doSaveVars: saveDirVars = os.path.join(model.saveDir, 'evalVars') if not os.path.exists(saveDirVars): os.makedirs(saveDirVars) pathToFile = os.path.join(saveDirVars, model.name + 'evalVars.pkl') with open(pathToFile, 'wb') as evalVarsFile: pickle.dump(evalVars, evalVarsFile) return evalVars def evaluateSingleNode(model, data, **kwargs): """ evaluateSingleNode: evaluate a model that has a single node forward Input: model (model class): class from Modules.model, needs to have a 'singleNodeForward' method data (data class): a data class from the Utils.dataTools; it needs to have a getSamples method and an evaluate method and it also needs to have a 'getLabelID' method doPrint (optional, bool): if True prints results Output: evalVars (dict): 'errorBest' contains the error rate for the best model, and 'errorLast' contains the error rate for the last model """ assert 'singleNodeForward' in dir(model.archit) assert 'getLabelID' in dir(data) # Get the device we're working on device = model.device if 'doSaveVars' in kwargs.keys(): doSaveVars = kwargs['doSaveVars'] else: doSaveVars = True ######## # DATA # ######## xTest, yTest = data.getSamples('test') xTest = xTest.to(device) yTest = yTest.to(device) targetIDs = data.getLabelID('test') ############## # BEST MODEL # ############## model.load(label = 'Best') with torch.no_grad(): # Process the samples yHatTest = model.archit.singleNodeForward(xTest, targetIDs) # yHatTest is of shape # testSize x numberOfClasses # We compute the error costBest = data.evaluate(yHatTest, yTest) ############## # LAST MODEL # ############## model.load(label = 'Last') with torch.no_grad(): # Process the samples yHatTest = model.archit.singleNodeForward(xTest, targetIDs) # yHatTest is of shape # testSize x numberOfClasses # We compute the error costLast = data.evaluate(yHatTest, yTest) evalVars = {} evalVars['costBest'] = costBest.item() evalVars['costLast'] = costLast.item() if doSaveVars: saveDirVars = os.path.join(model.saveDir, 'evalVars') if not os.path.exists(saveDirVars): os.makedirs(saveDirVars) pathToFile = os.path.join(saveDirVars, model.name + 'evalVars.pkl') with open(pathToFile, 'wb') as evalVarsFile: pickle.dump(evalVars, evalVarsFile) return evalVars def evaluateFlocking(model, data, **kwargs): """ evaluateClassif: evaluate a model using the flocking cost of velocity variacne of the team Input: model (model class): class from Modules.model data (data class): the data class that generates the flocking data doPrint (optional; bool, default: True): if True prints results nVideos (optional; int, default: 3): number of videos to save graphNo (optional): identify the run with a number realizationNo (optional): identify the run with another number Output: evalVars (dict): 'costBestFull': cost of the best model over the full trajectory 'costBestEnd': cost of the best model at the end of the trajectory 'costLastFull': cost of the last model over the full trajectory 'costLastEnd': cost of the last model at the end of the trajectory """ if 'doPrint' in kwargs.keys(): doPrint = kwargs['doPrint'] else: doPrint = True if 'nVideos' in kwargs.keys(): nVideos = kwargs['nVideos'] else: nVideos = 3 if 'graphNo' in kwargs.keys(): graphNo = kwargs['graphNo'] else: graphNo = -1 if 'realizationNo' in kwargs.keys(): if 'graphNo' in kwargs.keys(): realizationNo = kwargs['realizationNo'] else: graphNo = kwargs['realizationNo'] realizationNo = -1 else: realizationNo = -1 #\\\\\\\\\\\\\\\\\\\\ #\\\ TRAJECTORIES \\\ #\\\\\\\\\\\\\\\\\\\\ ######## # DATA # ######## # Initial data initPosTest = data.getData('initPos', 'test') initVelTest = data.getData('initVel', 'test') ############## # BEST MODEL # ############## model.load(label = 'Best') if doPrint: print("\tComputing learned trajectory for best model...", end = ' ', flush = True) posTestBest, \ velTestBest, \ accelTestBest, \ stateTestBest, \ commGraphTestBest = \ data.computeTrajectory(initPosTest, initVelTest, data.duration, archit = model.archit) if doPrint: print("OK") ############## # LAST MODEL # ############## model.load(label = 'Last') if doPrint: print("\tComputing learned trajectory for last model...", end = ' ', flush = True) posTestLast, \ velTestLast, \ accelTestLast, \ stateTestLast, \ commGraphTestLast = \ data.computeTrajectory(initPosTest, initVelTest, data.duration, archit = model.archit) if doPrint: print("OK") ########### # PREVIEW # ########### learnedTrajectoriesDir = os.path.join(model.saveDir, 'learnedTrajectories') if not os.path.exists(learnedTrajectoriesDir): os.mkdir(learnedTrajectoriesDir) if graphNo > -1: learnedTrajectoriesDir = os.path.join(learnedTrajectoriesDir, '%03d' % graphNo) if not os.path.exists(learnedTrajectoriesDir): os.mkdir(learnedTrajectoriesDir) if realizationNo > -1: learnedTrajectoriesDir = os.path.join(learnedTrajectoriesDir, '%03d' % realizationNo) if not os.path.exists(learnedTrajectoriesDir): os.mkdir(learnedTrajectoriesDir) learnedTrajectoriesDir = os.path.join(learnedTrajectoriesDir, model.name) if not os.path.exists(learnedTrajectoriesDir): os.mkdir(learnedTrajectoriesDir) if doPrint: print("\tPreview data...", end = ' ', flush = True) data.saveVideo(os.path.join(learnedTrajectoriesDir,'Best'), posTestBest, nVideos, commGraph = commGraphTestBest, vel = velTestBest, videoSpeed = 0.5, doPrint = False) data.saveVideo(os.path.join(learnedTrajectoriesDir,'Last'), posTestLast, nVideos, commGraph = commGraphTestLast, vel = velTestLast, videoSpeed = 0.5, doPrint = False) if doPrint: print("OK", flush = True) #\\\\\\\\\\\\\\\\\\ #\\\ EVALUATION \\\ #\\\\\\\\\\\\\\\\\\ evalVars = {} evalVars['costBestFull'] = data.evaluate(vel = velTestBest) evalVars['costBestEnd'] = data.evaluate(vel = velTestBest[:,-1:,:,:]) evalVars['costLastFull'] = data.evaluate(vel = velTestLast) evalVars['costLastEnd'] = data.evaluate(vel = velTestLast[:,-1:,:,:]) return evalVars
9,535
28.073171
80
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/utils/graphML.py
# 2021/03/04~ # Fernando Gama, fgama@seas.upenn.edu. # Luana Ruiz, rubruiz@seas.upenn.edu. # Kate Tolstaya, eig@seas.upenn.edu """ graphML.py Module for basic GSP and graph machine learning functions. Functionals LSIGF: Applies a linear shift-invariant graph filter spectralGF: Applies a linear shift-invariant graph filter in spectral form NVGF: Applies a node-variant graph filter EVGF: Applies an edge-variant graph filter jARMA: Applies an ARMA filter using Jacobi iterations learnAttentionGSO: Computes the GSO following the attention mechanism graphAttention: Applies a graph attention layer graphAttentionLSIGF: Applies a LSIGF over the learned graph graphAttentionEVGF: Applies a EVGF over the learned graph LSIGF_DB: Applies a delayed linear shift-invariant graph filter for batch GSO GRNN_DB: Computes the sequence of hidden states for batch GSO GatedGRNN: Computes the sequence of hidden states for static GSO Filtering Layers (nn.Module) GraphFilter: Creates a graph convolutional layer using LSI graph filters SpectralGF: Creates a graph convolutional layer using LSI graph filters in spectral form NodeVariantGF: Creates a graph filtering layer using node-variant graph filters EdgeVariantGF: Creates a graph filtering layer using edge-variant graph filters GraphFilterARMA: Creates a (linear) layer that applies a ARMA graph filter using Jacobi's method GraphAttentional: Creates a layer using graph attention mechanisms GraphFilterAttentional: Creates a layer using a graph convolution on a GSO learned through attention EdgeVariantAttentional: Creates a layer using an edge variant graph filter parameterized by several attention mechanisms GraphFilter_DB: Creates a graph convolutional layer using LSI graph filters that are applied to a delayed sequence of shift operators HiddenState_DB: Creates the layer for computing the hidden state of a GRNN HiddenState: Creates the layer for computing the hidden state of a GRNN (with static GSO) TimeGatedHiddenState: Creates the layer for computing the time gated hidden state of a GRNN NodeGatedHiddenState: Creates the layer for computing the node gated hidden state of a GRNN EdgeGatedHiddenState: Creates the layer for computing the edge gated hidden state of a GRNN Activation Functions - Nonlinearities (nn.Module) MaxLocalActivation: Creates a localized max activation function layer MedianLocalActivation: Creates a localized median activation function layer NoActivation: Creates a layer for no activation function Summarizing Functions - Pooling (nn.Module) NoPool: No summarizing function. MaxPoolLocal: Max-summarizing function """ import math import numpy as np import torch import torch.nn as nn import alegnnss.utils.graphTools as graphTools zeroTolerance = 1e-9 # Values below this number are considered zero. infiniteNumber = 1e12 # infinity equals this number # WARNING: Only scalar bias. ############################################################################# # # # FUNCTIONALS # # # ############################################################################# def LSIGF(h, S, x, b=None): """ LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear shift-invariant graph filter on input and then adds bias. Denote as G the number of input features, F the number of output features, E the number of edge features, K the number of filter taps, N the number of nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the input data where x_{g} in R^{N} is the graph signal representing feature g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the bias for feature f. Then, the LSI-GF is computed as y_{f} = \sum_{e=1}^{E} \sum_{k=0}^{K-1} \sum_{g=1}^{G} [h_{f,g,e}]_{k} S_{e}^{k} x_{g} + b_{f} for f = 1, ..., F. Inputs: filter_taps (torch.tensor): array of filter taps; shape: output_features x edge_features x filter_taps x input_features GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} Outputs: output: filtered signals; shape: batch_size x output_features x number_nodes """ # The basic idea of what follows is to start reshaping the input and the # GSO so the filter coefficients go just as a very plain and simple # linear operation, so that all the derivatives and stuff on them can be # easily computed. # h is output_features x edge_weights x filter_taps x input_features # S is edge_weighs x number_nodes x number_nodes # x is batch_size x input_features x number_nodes # b is output_features x number_nodes # Output: # y is batch_size x output_features x number_nodes # Get the parameter numbers: F = h.shape[0] E = h.shape[1] K = h.shape[2] G = h.shape[3] assert S.shape[0] == E N = S.shape[1] assert S.shape[2] == N B = x.shape[0] assert x.shape[1] == G assert x.shape[2] == N # Or, in the notation we've been using: # h in F x E x K x G # S in E x N x N # x in B x G x N # b in F x N # y in B x F x N # Now, we have x in B x G x N and S in E x N x N, and we want to come up # with matrix multiplication that yields z = x * S with shape # B x E x K x G x N. # For this, we first add the corresponding dimensions x = x.reshape([B, 1, G, N]) S = S.reshape([1, E, N, N]) z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0 # We need to repeat along the E dimension, because for k=0, S_{e} = I for # all e, and therefore, the same signal values have to be used along all # edge feature dimensions. for k in range(1,K): x = torch.matmul(x, S) # B x E x G x N xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N z = torch.cat((z, xS), dim = 2) # B x E x k x G x N # This output z is of size B x E x K x G x N # Now we have the x*S_{e}^{k} product, and we need to multiply with the # filter taps. # We multiply z on the left, and h on the right, the output is to be # B x N x F (the multiplication is not along the N dimension), so we reshape # z to be B x N x E x K x G and reshape it to B x N x EKG (remember we # always reshape the last dimensions), and then make h be E x K x G x F and # reshape it to EKG x F, and then multiply y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]), h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1) # And permute againt to bring it from B x N x F to B x F x N. # Finally, add the bias if b is not None: y = y + b return y def spectralGF(h, V, VH, x, b=None): """ spectralGF(filter_coeff, eigenbasis, eigenbasis_hermitian, input, bias=None) Computes the output of a linear shift-invariant graph filter in spectral form applying filter_coefficients on the graph fourier transform of the input . Denote as G the number of input features, F the number of output features, E the number of edge features, N the number of nodes, S_{e} in R^{N x N} the GSO for edge feature e with S_{e} = V_{e} Lambda_{e} V_{e}^{H} as eigendecomposition, x in R^{G x N} the input data where x_{g} in R^{N} is the graph signal representing feature g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the bias for feature f. Then, the LSI-GF in spectral form is computed as y_{f} = \sum_{e=1}^{E} \sum_{g=1}^{G} V_{e} diag(h_{f,g,e}) V_{e}^{H} x_{g} + b_{f} for f = 1, ..., F, with h_{f,g,e} in R^{N} the filter coefficients for output feature f, input feature g and edge feature e. Inputs: filter_coeff (torch.tensor): array of filter coefficients; shape: output_features x edge_features x input_features x number_nodes eigenbasis (torch.tensor): eigenbasis of the graph shift operator;shape: edge_features x number_nodes x number_nodes eigenbasis_hermitian (torch.tensor): hermitian of the eigenbasis; shape: edge_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} Outputs: output: filtered signals; shape: batch_size x output_features x number_nodes Obs.: While we consider most GSOs to be normal (so that the eigenbasis is an orthonormal basis), this function would also work if V^{-1} is used as input instead of V^{H} """ # The decision to input both V and V_H is to avoid any time spent in # permuting/inverting the matrix. Because this depends on the graph and not # the data, it can be done faster if we just input it. # h is output_features x edge_weights x input_features x number_nodes # V is edge_weighs x number_nodes x number_nodes # VH is edge_weighs x number_nodes x number_nodes # x is batch_size x input_features x number_nodes # b is output_features x number_nodes # Output: # y is batch_size x output_features x number_nodes # Get the parameter numbers: F = h.shape[0] E = h.shape[1] G = h.shape[2] N = h.shape[3] assert V.shape[0] == VH.shape[0] == E assert V.shape[1] == VH.shape[1] == V.shape[2] == VH.shape[2] == N B = x.shape[0] assert x.shape[1] == G assert x.shape[2] == N # Or, in the notation I've been using: # h in F x E x G x N # V in E x N x N # VH in E x N x N # x in B x G x N # b in F x N # y in B x F x N # We will do proper matrix multiplication in this case (algebraic # multiplication using column vectors instead of CS notation using row # vectors). # We will multiply separate VH with x, and V with diag(h). # First, to multiply VH with x, we need to add one dimension for each one # of them (dimension E for x and dimension B for VH) x = x.reshape([B, 1, G, N]).permute(0, 1, 3, 2) # B x 1 x N x G VH = VH.reshape([1, E, N, N]) # 1 x E x N x N # Now we multiply. Note that we also permute to make it B x E x G x N # instead of B x E x N x G because we want to multiply for a specific e and # g, there we do not want to sum (yet) over G. VHx = torch.matmul(VH, x).permute(0, 1, 3, 2) # B x E x G x N # Now we want to multiply V * diag(h), both are matrices. So first, we # add the necessary dimensions (B and G for V and an extra N for h to make # it a matrix from a vector) V = V.reshape([1, E, 1, N, N]) # 1 x E x 1 x N x N # We note that multiplying by a diagonal matrix to the right is equivalent # to an elementwise multiplication in which each column is multiplied by # a different number, so we will do this to make it faster (elementwise # multiplication is faster than matrix multiplication). We need to repeat # the vector we have columnwise. diagh = h.reshape([F, E, G, 1, N]).repeat(1, 1, 1, N, 1) # F x E x G x N x N # And now we do elementwise multiplication Vdiagh = V * diagh # F x E x G x N x N # Finally, we make the multiplication of these two matrices. First, we add # the corresponding dimensions Vdiagh = Vdiagh.reshape([1, F, E, G, N, N]) # 1 x F x E x G x N x N VHx = VHx.reshape([B, 1, E, G, N, 1]) # B x 1 x E x G x N x 1 # And do matrix multiplication to get all the corresponding B,F,E,G vectors VdiaghVHx = torch.matmul(Vdiagh, VHx) # B x F x E x G x N x 1 # Get rid of the last dimension which we do not need anymore y = VdiaghVHx.squeeze(5) # B x F x E x G x N # Sum over G y = torch.sum(y, dim = 3) # B x F x E x N # Sum over E y = torch.sum(y, dim = 2) # B x F x N # Finally, add the bias if b is not None: y = y + b return y def NVGF(h, S, x, b=None): """ NVGF(filter_taps, GSO, input, bias=None) Computes the output of a node-variant graph filter on input and then adds bias. Denote as G the number of input features, F the number of output features, E the number of edge features, K the number of shifts, N the number of nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the input data where x_{g} in R^{N} is the graph signal representing feature g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the bias for feature f. Denote as h_{k}^{efg} in R^{N} the vector with the N filter taps corresponding to the efg filter for shift k. Then, the NV-GF is computed as y_{f} = \sum_{e=1}^{E} \sum_{k=0}^{K-1} \sum_{g=1}^{G} diag(h_{k}^{efg}) S_{e}^{k} x_{g} + b_{f} for f = 1, ..., F. Inputs: filter_taps (torch.tensor): array of filter taps; shape: output_features x edge_features x filter_taps x input_features x number_nodes GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} Outputs: output: filtered signals; shape: batch_size x output_features x number_nodes """ # h is output_features x edge_weights x filter_taps x input_features # x number_nodes # S is edge_weighs x number_nodes x number_nodes # x is batch_size x input_features x number_nodes # b is output_features x number_nodes # Output: # y is batch_size x output_features x number_nodes # Get the parameter numbers: F = h.shape[0] E = h.shape[1] K = h.shape[2] G = h.shape[3] N = h.shape[4] assert S.shape[0] == E assert S.shape[1] == S.shape[2] == N B = x.shape[0] assert x.shape[1] == G assert x.shape[2] == N # Or, in the notation I've been using: # h in F x E x K x G x N # S in E x N x N # x in B x G x N # b in F x N # y in B x F x N # Now, we have x in B x G x N and S in E x N x N, and we want to come up # with matrix multiplication that yields z = x * S with shape # B x E x K x G x N. # For this, we first add the corresponding dimensions xr = x.reshape([B, 1, G, N]) Sr = S.reshape([1, E, N, N]) z = xr.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0 # We need to repeat along the E dimension, because for k=0, S_{e} = I for # all e, and therefore, the same signal values have to be used along all # edge feature dimensions. for k in range(1,K): xr = torch.matmul(xr, Sr) # B x E x G x N xS = xr.reshape([B, E, 1, G, N]) # B x E x 1 x G x N z = torch.cat((z, xS), dim = 2) # B x E x k x G x N # This output z is of size B x E x K x G x N # Now we have the x*S_{e}^{k} product, and we need to multiply with the # filter taps. # This multiplication with filter taps is ``element wise'' on N since for # each node we have a different element # First, add the extra dimension (F for z, and B for h) z = z.reshape([B, 1, E, K, G, N]) h = h.reshape([1, F, E, K, G, N]) # Now let's do elementwise multiplication zh = z * h # And sum over the dimensions E, K, G to get B x F x N y = torch.sum(zh, dim = 4) # Sum over G y = torch.sum(y, dim = 3) # Sum over K y = torch.sum(y, dim = 2) # Sum over E # Finally, add the bias if b is not None: y = y + b return y def EVGF(S, x, b=None): """ EVGF(filter_matrices, input, bias=None) Computes the output of an edge-variant graph filter on input and then adds bias. Denote as G the number of input features, F the number of output features, E the number of edge features, K the number of shifts, N the number of nodes, Phi_{efg} in R^{N x N} the filter matrix for edge feature e, output feature f and input feature g (recall that Phi_{efg}^{k} has the same sparsity pattern as the graph, except for Phi_{efg}^{0} which is expected to be a diagonal matrix), x in R^{G x N} the input data where x_{g} in R^{N} is the graph signal representing feature g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the bias for feature f. Then, the EV-GF is computed as y_{f} = \sum_{e=1}^{E} \sum_{k=0}^{K-1} \sum_{g=1}^{G} Phi_{efg}^{k:0} x_{g} + b_{f} for f = 1, ..., F, with Phi_{efg}^{k:0} = Phi_{efg}^{k} Phi_{efg}^{k-1} ... Phi_{efg}^{0}. Inputs: filter_matrices (torch.tensor): array of filter matrices; shape: output_features x edge_features x filter_taps x input_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} Outputs: output: filtered signals; shape: batch_size x output_features x number_nodes """ # We just need to multiply by the filter_matrix recursively, and then # add for all E, G, and K features. # S is output_features x edge_features x filter_taps x input_features # x number_nodes x number_nodes # x is batch_size x input_features x number_nodes # b is output_features x number_nodes # Output: # y is batch_size x output_features x number_nodes # Get the parameter numbers: F = S.shape[0] E = S.shape[1] K = S.shape[2] G = S.shape[3] N = S.shape[4] assert S.shape[5] == N B = x.shape[0] assert x.shape[1] == G assert x.shape[2] == N # Or, in the notation I've been using: # S in F x E x K x G x N x N # x in B x G x N # b in F x N # y in B x F x N # We will be doing matrix multiplications in the algebraic way, trying to # multiply the N x N matrix corresponding to the appropriate e, f, k and g # dimensions, with the respective x vector (N x 1 column vector) # For this, we first add the corresponding dimensions (for x we add # dimensions F, E and the last dimension for column vector) x = x.reshape([B, 1, 1, G, N, 1]) # When we do index_select along dimension K we get rid of this dimension Sk = torch.index_select(S, 2, torch.tensor(0).to(S.device)).squeeze(2) # Sk in F x E x G x N x N # And we add one further dimension for the batch size B Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N # Matrix multiplication x = torch.matmul(Sk, x) # B x F x E x G x N x 1 # And we collect this for every k in a vector z, along the K dimension z = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N # Now we do all the matrix multiplication for k in range(1,K): # Extract the following k Sk = torch.index_select(S, 2, torch.tensor(k).to(S.device)).squeeze(2) # Sk in F x E x G x N x N # Give space for the batch dimension B Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N # Multiply with the previously cumulative Sk * x x = torch.matmul(Sk, x) # B x F x E x G x N x 1 # Get rid of the last dimension (of a column vector) Sx = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N # Add to the z z = torch.cat((z, Sx), dim = 2) # B x F x E x k x G x N # Sum over G z = torch.sum(z, dim = 4) # Sum over K z = torch.sum(z, dim = 3) # Sum over E y = torch.sum(z, dim = 2) if b is not None: y = y + b return y def jARMA(psi, varphi, phi, S, x, b=None, tMax = 5): """ jARMA(inverse_taps, direct_taps, filter_taps, GSO, input, bias = None, tMax = 5) Computes the output of an ARMA filter using Jacobi iterations. The output of an ARMA computed by means of tMax Jacobi iterations is given as follows y^{f} = \sum_{e=1}^{E} \sum_{g=1}^{G} \sum_{p=0}^{P-1} H_{p}^{1}(S) (\bar{S}_{p}^{fge})^{-1} x + H_{p}^{2}(S) x + H^{3}(S) x where E is the total number of edge features, G is the total number of input features, and P is the order of the denominator polynomial. The filters are H_{p}^{1}(S) = \sum_{tau=0}^{t} (-1)^{tau} varphi_{p}^{fge} (\barS_{p}^{-1} \tilde{S})^{tau} H_{p}^{2}(S) = (-1)^{t+1} ((\bar{S}_{p}^{fge})^{-1} \tilde{S})^{t+1} H^{2}(S) = \sum_{k=0}^{K-1} phi_{k}^{fge} S^{k} where varphi_{p}^{fge} are the direct filter taps of the rational one ARMA filter, phi_{k}^{fge} are the filter taps of the residue LSIGF filter, and the GSOs used derive from GSO S and are \bar{S}_{p}^{fge} = Diag(S) - psi_{p}^{fge} I_{N} \tilde{S} = DiagOff(S) with psi_{p}^{fge} the inverse filter taps of the rational one ARMA filter. Inputs: inverse_taps (torch.tensor): array of filter taps psi_{p}^{fge}; shape: out_features x edge_features x denominator_order x in_features direct_taps (torch.tensor): array of taps varphi_{p}^{fge}; shape: out_features x edge_features x denominator_order x in_features filter_taps (torch.tensor): array of filter taps phi_{p}^{fge}; shape: out_features x edge_features x residue_order x in_features GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} (default: None) tMax (int): value of t for computing the Jacobi approximation (default: 5) Outputs: output: filtered signals; shape: batch_size x output_features x number_nodes """ # The inputs are: # psi in F x E x P x G (inverse coefficient in order-one rational) # varphi in F x E x P x G (direct coefficient in order-one rational) # phi in F x E x K x G (direct filter coefficients) # x in B x G x N # S in E x N x N F = psi.shape[0] # out_features E = psi.shape[1] # edge_features P = psi.shape[2] # inverse polynomial order G = psi.shape[3] # in_features assert varphi.shape[0] == F assert varphi.shape[1] == E assert varphi.shape[2] == P assert varphi.shape[3] == G assert phi.shape[0] == F assert phi.shape[1] == E assert phi.shape[3] == G B = x.shape[0] # batch_size assert x.shape[1] == G N = x.shape[2] # number_nodes assert S.shape[0] == E assert S.shape[1] == S.shape[2] == N # First, let's build Stilde and Sbar Stilde = torch.empty(0).to(S.device) # Will be of shape E x N x N DiagS = torch.empty(0).to(S.device) # Will be of shape E x N x N for e in range(E): thisS = torch.index_select(S,0,torch.tensor(e).to(S.device)).squeeze(0) thisDiagS = torch.diag(torch.diag(thisS)) DiagOffS = (thisS - thisDiagS).unsqueeze(0) # E x N x N Stilde = torch.cat((Stilde, DiagOffS), dim = 0) DiagS = torch.cat((DiagS, thisDiagS.unsqueeze(0)), dim = 0) I = torch.eye(N).reshape([1, 1, 1, 1, N, N]).to(S.device) # (FxExPxGxNxN) psiI = psi.reshape([F, E, P, G, 1, 1]) * I DiagS = DiagS.reshape([1, E, 1, 1, N, N]) Sbar = DiagS - psiI # F x E x P x G x N x N # Now, invert Sbar, that doesn't depend on t either, and multiply it by x # Obs.: We cannot just do 1/Sbar, because all the nonzero elements will # give inf, ruining everything. So we will force the off-diagonal elements # to be one, and then get rid of them offDiagonalOnes = (torch.ones(N,N) - torch.eye(N)).to(Sbar.device) SbarInv = 1/(Sbar + offDiagonalOnes) # F x E x P x G x N x N SbarInv = SbarInv * torch.eye(N).to(Sbar.device) SbarInvX = torch.matmul(SbarInv.reshape([1, F, E, P, G, N, N]), x.reshape([B, 1, 1, 1, G, N, 1])).squeeze(6) # B x F x E x P x G x N # And also multiply SbarInv with Stilde which is also used in H1 and H2 SbarInvStilde = torch.matmul(SbarInv, Stilde.reshape([1, E, 1, 1, N, N])) # B x F x E x P x G x N x N # Next, filtering through H^{3}(S) also doesn't depend on t or p, so H3x = LSIGF(phi, S, x) # Last, build the output from combining all filters H1, H2 and H3 # Compute H1 SbarInvX z = SbarInvX.reshape([B, F, E, 1, P, G, N]) y = x.reshape([B, 1, 1, 1, G, N, 1]) # (B x F x E x P x G x N x 1) x1 = SbarInvX.unsqueeze(6) # B x F x E x P x G x N x 1 # (B x F x E x tau x P x G x N) for tau in range(1,tMax+1): x1 = torch.matmul(SbarInvStilde.unsqueeze(0),# 1 x F x E x P x G x N x N x1) # B x F x E x P x G x N x 1 z = torch.cat((z, x1.squeeze(6).unsqueeze(3)), dim = 3) # B x F x E x tau x P x G x N y = torch.matmul(SbarInvStilde.unsqueeze(0), # 1 x F x E x P x G x N x N y) # B x F x E x P x G x N x 1 thisCoeffs = torch.tensor((-1.) ** np.arange(0,tMax+1)).to(x.device) thisCoeffs = thisCoeffs.reshape([1, 1, 1, tMax+1, 1, 1]) * \ varphi.reshape([1, F, E, 1, P, G])\ .repeat(1, 1, 1, tMax+1, 1, 1) # 1 x F x E x (tMax+1) x P x G thisCoeffs = thisCoeffs.permute(0, 4, 1, 2, 3, 5) # 1 x P x F x E x (tMax+1) x G z = z.permute(0, 4, 1, 6, 2, 3, 5) # B x P x F x N x E x (tMax+1) x G thisCoeffs = thisCoeffs.reshape([1, P, F, E*(tMax+1)*G]).unsqueeze(4) # 1 x P x F x E(tMax+1)G x 1 z = z.reshape(B, P, F, N, E*(tMax+1)*G) # B x P x F x N x E*(tMax+1)*G H1x = torch.matmul(z, thisCoeffs).squeeze(4) # B x P x F x N # Now, to compute H2x we need y, but y went only up to value tMax, and # we need to go to tMax+1, so we need to multiply it once more y = torch.matmul(SbarInvStilde.unsqueeze(0), y).squeeze(6) # B x F x E x P x G x N H2x = -y if np.mod(tMax,2) == 0 else y H2x = torch.sum(H2x, dim = 4) # sum over G, shape: B x F x E x P x N H2x = torch.sum(H2x, dim = 2) # sume over E, shape: B x F x P x N H2x = H2x.permute(0, 2, 1, 3) # B x P x F x N # Finally, we add up H1x and H2x and sum over all p, and add to H3 to # update u u = torch.sum(H1x + H2x, dim = 1) + H3x if b is not None: u = u+b return u def learnAttentionGSO(x, a, W, S, negative_slope=0.2): """ learnAttentionGSO(x, a, W, S) Computes the GSO following the attention mechanism Denote as G the number of input features, F the number of output features, E the number of edge features, P the number of attention heads, Ji the number of nodes in N_{i}, the neighborhood of node i, and N the number of nodes. Let x_{i} in R^{G} be the feature associated to node i, W^{ep} in R^{F x G} the weight marix associated to edge feature e and attention head p, and a^{ep} in R^{2F} the mixing vector. Let alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for edge feature e and attention head p, and let s_{ij}^{e} be the value of feature e of the edge connecting nodes i and j. Each elements of the new GSO is alpha_{ij}^{ep} computed as alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} ( (a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})] )) for all j in N_{i}, and where beta is the negative slope of the leaky ReLU. Inputs: x (torch.tensor): input; shape: batch_size x input_features x number_nodes a (torch.tensor): mixing parameter; shape: number_heads x edge_features x 2 * output_features W (torch.tensor): linear parameter; shape: number_heads x edge_features x output_features x input_features S (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes negative_slope (float): negative slope of the leaky relu (default: 0.2) Outputs: aij: output GSO; shape: batch_size x number_heads x edge_features x number_nodes x number_nodes """ B = x.shape[0] # batch_size G = x.shape[1] # input_features N = x.shape[2] # number_nodes P = a.shape[0] # number_heads E = a.shape[1] # edge_features assert W.shape[0] == P assert W.shape[1] == E F = W.shape[2] # output_features assert a.shape[2] == int(2*F) G = W.shape[3] # input_features assert S.shape[0] == E assert S.shape[1] == S.shape[2] == N # Add ones of the GSO at all edge feature levels so that the node always # has access to itself. The fact that it's one is not so relevant, because # the attention coefficient that is learned would compensate for this S = S + torch.eye(N).reshape([1,N,N]).repeat(E,1,1).to(S.device) # WARNING: # (If the GSOs already have self-connections, then these will be added a 1, # which might be a problem if the self-connection is a -1. I will have to # think of this more carefully) # W is of size P x E x F x G # a is of size P x E x 2F # Compute Wx for all nodes x = x.reshape([B, 1, 1, G, N]) W = W.reshape([1, P, E, F, G]) Wx = torch.matmul(W, x) # B x P x E x F x N # Now, do a_1^T Wx, and a_2^T Wx to get a tensor of shape B x P x E x 1 x N # because we're applying the inner product on the F dimension. a1 = torch.index_select(a, 2, torch.arange(F).to(x.device)) # K x E x F a2 = torch.index_select(a, 2, torch.arange(F, 2*F).to(x.device)) # K x E x F a1Wx = torch.matmul(a1.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N a2Wx = torch.matmul(a2.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N # And then, use this to sum them accordingly and create a B x P x E x N x N # matrix. aWx = a1Wx + a2Wx.permute(0, 1, 2, 4, 3) # B x P x E x N x N # Obs.: In this case, we have one column vector and one row vector; then, # what the sum does, is to repeat the column and the row, respectively, # until both matrices are of the same size, and then adds up, which is # precisely what we want to do # Apply the LeakyRelu eij = nn.functional.leaky_relu(aWx, negative_slope = negative_slope) # B x P x E x N x N # Each element of this N x N matrix is, precisely, e_ij (eq. 1) in the GAT # paper. # And apply the softmax. For the softmax, we do not want to consider # the places where there are no neighbors, so we need to set them to -infty # so that they will be assigned a zero. # First, get places where we have edges maskEdges = torch.sum(torch.abs(S.data), dim = 0) # Make it a binary matrix maskEdges = (maskEdges > zeroTolerance).type(x.dtype) # Make it -infinity where there are zeros infinityMask = (1-maskEdges) * infiniteNumber # Compute the softmax plus the -infinity (we first force the places where # there is no edge to be zero, and then we add -infinity to them) aij = nn.functional.softmax(eij*maskEdges - infinityMask, dim = 4) # B x P x E x N x N # This will give me a matrix of all the alpha_ij coefficients. # Re-inforce the zeros just to be sure return aij * maskEdges # B x P x E x N x N def graphAttention(x, a, W, S, negative_slope=0.2): """ graphAttention(x, a, W, S) Computes attention following GAT layer taking into account multiple edge features. Denote as G the number of input features, F the number of output features, E the number of edge features, P the number of attention heads, Ji the number of nodes in N_{i}, the neighborhood of node i, and N the number of nodes. Let x_{i} in R^{G} be the feature associated to node i, W^{ep} in R^{F x G} the weight marix associated to edge feature e and attention head p, and a^{ep} in R^{2F} the mixing vector. Let alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for edge feature e and attention head p, and let s_{ij}^{e} be the value of feature e of the edge connecting nodes i and j. Let y_{i}^{p} in R^{F} be the output of the graph attention at node i for attention head p. It is computed as y_{i}^{p} = \sum_{e=1}^{E} \sum_{j in N_{i}} s_{ij}^{e} alpha_{ij}^{ep} W^{ep} x_{j} with alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} ( (a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})] )) for all j in N_{i}, and where beta is the negative slope of the leaky ReLU. Inputs: x (torch.tensor): input; shape: batch_size x input_features x number_nodes a (torch.tensor): mixing parameter; shape: number_heads x edge_features x 2 * output_features W (torch.tensor): linear parameter; shape: number_heads x edge_features x output_features x input_features S (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes negative_slope (float): negative slope of the leaky relu (default: 0.2) Outputs: y: output; shape: batch_size x number_heads x output_features x number_nodes """ B = x.shape[0] # batch_size G = x.shape[1] # input_features N = x.shape[2] # number_nodes P = a.shape[0] # number_heads E = a.shape[1] # edge_features assert W.shape[0] == P assert W.shape[1] == E F = W.shape[2] # output_features assert a.shape[2] == int(2*F) G = W.shape[3] # input_features assert S.shape[0] == E assert S.shape[1] == S.shape[2] == N # First, we need to learn the attention GSO aij = learnAttentionGSO(x, a, W, S, negative_slope = negative_slope) # B x P x E x N x N # Then, we need to compute the high-level features # W is of size P x E x F x G # a is of size P x E x 2F # Compute Wx for all nodes x = x.reshape([B, 1, 1, G, N]) W = W.reshape([1, P, E, F, G]) Wx = torch.matmul(W, x) # B x P x E x F x N # Finally, we just need to apply this matrix to the Wx which we have already # computed, and done. y = torch.matmul(Wx, S.reshape([1, 1, E, N, N]) * aij) # B x P x E x F x N # And sum over all edges return torch.sum(y, dim = 2) # B x P x F x N def graphAttentionLSIGF(h, x, a, W, S, b=None, negative_slope=0.2): """ graphAttentionLSIGF(h, x, a, W, S) Computes a graph convolution (LSIGF) over a graph shift operator learned through the attention mechanism Inputs: h (torch.tensor): array of filter taps; shape: edge_features x filter_taps x (torch.tensor): input; shape: batch_size x input_features x number_nodes a (torch.tensor): mixing parameter; shape: number_heads x edge_features x 2 * out_features W (torch.tensor): linear parameter; shape: number_heads x edge_features x out_features x input_features S (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes bias (torch.tensor): shape: output_features x number_nodes negative_slope (float): negative slope of the leaky relu (default: 0.2) Outputs: y: output; shape: batch_size x number_heads x output_features x number_nodes """ E = h.shape[0] # edge_features K = h.shape[1] # filter_taps B = x.shape[0] # batch_size G = x.shape[1] # input_features N = x.shape[2] # number_nodes P = a.shape[0] # number_heads E = a.shape[1] # edge_features assert W.shape[0] == P assert W.shape[1] == E F = W.shape[2] # out_features assert W.shape[3] == G assert a.shape[2] == int(2*F) assert S.shape[0] == E assert S.shape[1] == S.shape[2] == N # First, we need to learn the attention GSO aij = learnAttentionGSO(x, a, W, S, negative_slope = negative_slope) # B x P x E x N x N # And now we need to compute an LSIGF with this learned GSO, but the filter # taps of the LSIGF are a combination of h (along K), and W (along F and G) # So, we have # h in E x K # W in P x E x G x F # The filter taps, will thus have shape # h in P x F x E x K x G h = h.reshape([1, 1, E, K, 1]) # (P x F x E x K x G) W = W.permute(0, 3, 1, 2) # P x F x E x G W = W.reshape([P, F, E, 1, G]) # (P x F x E x K x G) h = h * W # P x F x E x K x G (We hope, if not, we need to repeat on the # corresponding dimensions) x = x.reshape([B, 1, 1, G, N]) # (B x P x E x G x N) # The easiest would be to use the LSIGF function, but that takes as input # a B x F x N input, and while we could join together B and P into a single # dimension, we would still be unable to handle the E features this way. # So we basically need to copy the code from LSIGF but accounting the # matrix multiplications with multiple edge features as Wx has z = x.reshape([B, 1, 1, 1, G, N]).repeat(1, P, E, 1, 1, 1) # add the k=0 dimension (B x P x E x K x G x N) # And now do the repeated multiplication with S for k in range(1,K): x = torch.matmul(x, aij) # B x P x E x G x N xAij = x.reshape([B, P, E, 1, G, N]) # add the k dimension z = torch.cat((z, xAij), dim = 3) # B x P x E x k x G x N # This output z is of shape B x P x E x K x M x N and represents the product # x * aij_{e}^{k} (i.e. the multiplication between x and the kth power of # the learned GSO). # Now, we need to multiply this by the filter coefficients # Convert h, from F x E x K x M to EKM x F to multiply from the right h = h.reshape([1, P, F, E*K*G]) # (B x P x F x (EKG)) h = h.permute(0, 1, 3, 2) # (B x P x EKG x F) # And z from B x P x E x K x G x N to B x P x N x EKG to left multiply z = z.permute(0, 1, 5, 2, 3, 4).reshape([B, P, N, E*K*G]) # And multiply y = torch.matmul(z, h) # B x P x N x F y = y.permute(0, 1, 3, 2) # The output needs to be B x P x F x N # Finally, add the bias if b is not None: y = y+b return y def graphAttentionEVGF(x, a, W, S, b=None, negative_slope=0.2): """ graphAttentionEVGF(h, x, a, W, S) Computes an edge varying graph filter (EVGF) where each EVGF is learned by an attention mechanism Inputs: x (torch.tensor): input; shape: batch_size x input_features x number_nodes a (torch.tensor): mixing parameter; shape: number_heads x filter_taps x edge_features x 2 * out_features W (torch.tensor): linear parameter; shape: number_heads x filter_taps x edge_features x out_features x input_features S (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes bias (torch.tensor): shape: output_features x number_nodes negative_slope (float): negative slope of the leaky relu (default: 0.2) Outputs: y: output; shape: batch_size x number_heads x output_features x number_nodes """ B = x.shape[0] # batch_size G = x.shape[1] # input_features N = x.shape[2] # number_nodes P = a.shape[0] # number_heads K = a.shape[1] # filter_taps E = a.shape[2] # edge_features assert W.shape[0] == P assert W.shape[1] == K assert W.shape[2] == E F = W.shape[3] # output_features assert W.shape[4] == G assert a.shape[3] == int(2*F) assert S.shape[0] == E assert S.shape[1] == S.shape[2] == N # First, we need to compute the high-level features # W is of size P x K x E x F x G # a is of size P x K x E x 2F # To compute Wx, we need the first element (K = 0) W0 = torch.index_select(W, 1, torch.tensor(0).to(S.device)).squeeze(1) # P x E x F x G W0 = W0.reshape([1, P, E, F, G]) W0x = torch.matmul(W0, x.reshape([B, 1, 1, G, N])) # B x P x E x F x N # Now we proceed to learn the rest of the EVGF. # That first filter coefficient (for the one-hop neighborhood) is learned # from the first element along the K dimension (dim = 1) thisa = torch.index_select(a, 1, torch.tensor(0).to(S.device)).squeeze(1) thisW = torch.index_select(W, 1, torch.tensor(0).to(S.device)).squeeze(1) aij = learnAttentionGSO(x, thisa, thisW, S, negative_slope = negative_slope) # B x P x E x N x N (repesents k=0,1) W0x = torch.matmul(W0x, S.reshape([1, 1, E, N, N]) * aij) # B x P x E x F x N y = W0x # This is the first multiplication between Wx and Aij corresponding # to the first-hop neighborhood # Now, we move on to the rest of the coefficients for k in range(1, K): thisa = torch.index_select(a,1, torch.tensor(k).to(S.device)).squeeze(1) thisW = torch.index_select(W,1, torch.tensor(k).to(S.device)).squeeze(1) aij = learnAttentionGSO(x, thisa, thisW, S, negative_slope = negative_slope) W0x = torch.matmul(W0x, S.reshape([1, 1, E, N, N]) * aij) # This multiplies the previous W0x Aij^{1:k-1} with A_ij^{(k)} y = y + W0x # Adds that multiplication to the running sum for all other # ks, shape: B x P x E x F x N # Sum over all edge features y = torch.sum(y, dim = 2) # B x P x F x N # Finally, add the bias if b is not None: y = y+b return y ############################################################################# # # # FUNCTIONALS (Batch, and time-varying) # # # ############################################################################# def LSIGF_DB(h, S, x, b=None): """ LSIGF_DB(filter_taps, GSO, input, bias=None) Computes the output of a linear shift-invariant graph filter (graph convolution) on delayed input and then adds bias. Denote as G the number of input features, F the number of output features, E the number of edge features, K the number of filter taps, N the number of nodes, S_{e}(t) in R^{N x N} the GSO for edge feature e at time t, x(t) in R^{G x N} the input data at time t where x_{g}(t) in R^{N} is the graph signal representing feature g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the bias for feature f. Then, the LSI-GF is computed as y_{f} = \sum_{e=1}^{E} \sum_{k=0}^{K-1} \sum_{g=1}^{G} [h_{f,g,e}]_{k} S_{e}(t)S_{e}(t-1)...S_{e}(t-(k-1)) x_{g}(t-k) + b_{f} for f = 1, ..., F. Inputs: filter_taps (torch.tensor): array of filter taps; shape: output_features x edge_features x filter_taps x input_features GSO (torch.tensor): graph shift operator; shape: batch_size x time_samples x edge_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x time_samples x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} Outputs: output: filtered signals; shape: batch_size x time_samples x output_features x number_nodes """ # This is the LSIGF with (B)atch and (D)elay capabilities (i.e. there is # a different GSO for each element in the batch, and it handles time # sequences, both in the GSO as in the input signal. The GSO should be # transparent). # So, the input # h: F x E x K x G # S: B x T x E x N x N # x: B x T x G x N # b: F x N # And the output has to be # y: B x T x F x N # Check dimensions assert len(h.shape) == 4 F = h.shape[0] E = h.shape[1] K = h.shape[2] G = h.shape[3] assert len(S.shape) == 5 B = S.shape[0] T = S.shape[1] assert S.shape[2] == E N = S.shape[3] assert S.shape[4] == N assert len(x.shape) == 4 assert x.shape[0] == B assert x.shape[1] == T assert x.shape[2] == G assert x.shape[3] == N # We would like a z of shape B x T x K x E x G x N that represents, for # each t, x_t, S_t x_{t-1}, S_t S_{t-1} x_{t-2}, ..., # S_{t} ... S_{t-(k-1)} x_{t-k}, ..., S_{t} S_{t-1} ... x_{t-(K-1)} # But we don't want to do "for each t". We just want to do "for each k". # Let's start by reshaping x so it can be multiplied by S x = x.reshape([B, T, 1, G, N]).repeat(1, 1, E, 1, 1) # Now, for the first value of k, we just have the same signal z = x.reshape([B, T, 1, E, G, N]) # For k = 0, k is counted in dim = 2 # Now we need to start multiplying with S, but displacing the entire thing # once across time for k in range(1,K): # Across dim = 1 we need to "displace the dimension down", i.e. where # it used to be t = 1 we now need it to be t=0 and so on. For t=0 # we add a "row" of zeros. x, _ = torch.split(x, [T-1, 1], dim = 1) # The second part is the most recent time instant which we do not need # anymore (it's used only once for the first value of K) # Now, we need to add a "row" of zeros at the beginning (for t = 0) zeroRow = torch.zeros(B, 1, E, G, N, dtype=x.dtype,device=x.device) x = torch.cat((zeroRow, x), dim = 1) # And now we multiply with S x = torch.matmul(x, S) # Add the dimension along K xS = x.reshape(B, T, 1, E, G, N) # And concatenate it with z z = torch.cat((z, xS), dim = 2) # Now, we finally made it to a vector z of shape B x T x K x E x G x N # To finally multiply with the filter taps, we need to swap the sizes # and reshape z = z.permute(0, 1, 5, 3, 2, 4) # B x T x N x E x K x G z = z.reshape(B, T, N, E*K*G) # And the same with the filter taps h = h.reshape(F, E*K*G) h = h.permute(1, 0) # E*K*G x F # Multiply y = torch.matmul(z, h) # B x T x N x F # And permute y = y.permute(0, 1, 3, 2) # B x T x F x N # Finally, add the bias if b is not None: y = y + b return y def GRNN_DB(a, b, S, x, z0, sigma, xBias=None, zBias = None): """ GRNN_DB(signal_to_hidden_taps, hidden_to_hidden_taps, GSO, input, initial_hidden, nonlinearity, signal_bias, hidden_bias) Computes the sequence of hidden states for the input sequence x following the equation z_{t} = sigma(A(S) x_{t} + B(S) z_{t-1}) with initial state z0 and where sigma is the nonlinearity, and A(S) and B(S) are the Input-to-Hidden filters and the Hidden-to-Hidden filters with the corresponding taps. Inputs: signal_to_hidden_taps (torch.tensor): shape hidden_features x edge_features x filter_taps x signal_features hidden_to_hidden_taps (torch.tensor): shape hidden_features x edge_features x filter_taps x hidden_features GSO (torch.tensor): shape batch_size x time x edge_features x number_nodes x number_nodes input (torch.tensor): shape batch_size x time x signal_features x number_nodes initial_hidden: shape batch_size x hidden_features x number_nodes signal_bias (torch.tensor): shape 1 x 1 x hidden_features x 1 hidden_bias (torch.tensor): shape 1 x 1 x hidden_features x 1 Outputs: hidden_state: shape batch_size x time x hidden_features x number_nodes """ # We will compute the hidden state for a delayed and batch data. # So, the input # a: H x E x K x F (Input to Hidden filters) # b: H x E x K x H (Hidden to Hidden filters) # S: B x T x E x N x N (GSO) # x: B x T x F x N (Input signal) # z0: B x H x N (Initial state) # xBias: 1 x 1 x H x 1 (bias on the Input to Hidden features) # zBias: 1 x 1 x H x 1 (bias on the Hidden to Hidden features) # And the output has to be # z: B x T x H x N (Hidden state signal) # Check dimensions H = a.shape[0] # Number of hidden state features E = a.shape[1] # Number of edge features K = a.shape[2] # Number of filter taps F = a.shape[3] # Number of input features assert b.shape[0] == H assert b.shape[1] == E assert b.shape[2] == K assert b.shape[3] == H B = S.shape[0] T = S.shape[1] assert S.shape[2] == E N = S.shape[3] assert S.shape[4] == N assert x.shape[0] == B assert x.shape[1] == T assert x.shape[2] == F assert x.shape[3] == N assert z0.shape[0] == B assert z0.shape[1] == H assert z0.shape[2] == N # The application of A(S) x(t) doesn't change (it does not depend on z(t)) Ax = LSIGF_DB(a, S, x, b = xBias) # B x T x H x N # This is the filtered signal for all time instants. # This also doesn't split S, it only splits x. # The b parameters we will always need them in this shape b = b.unsqueeze(0).reshape(1, H, E*K*H) # 1 x H x EKH # so that we can multiply them with the product Sz that should be of shape # B x EKH x N # We will also need a selection matrix that selects the first K-1 elements # out of the original K (to avoid torch.split and torch.index_select with # more than one index) CK = torch.eye(K-1, device = S.device) # (K-1) x (K-1) zeroRow = torch.zeros((1, K-1), device = CK.device) CK = torch.cat((CK, zeroRow), dim = 0) # K x (K-1) # This matrix discards the last column when multiplying on the left CK = CK.reshape(1, 1, 1, K, K-1) # 1(B) x 1(E) x 1(H) x K x K-1 #\\\ Now compute the first time instant # We just need to multiplicate z0 = z(-1) by b(0) to get z(0) # Create the zeros that will multiply the values of b(1), b(2), ... b(K-1) # since we only need b(0) zerosK = torch.zeros((B, K-1, H, N), device = z0.device) # Concatenate them after z zK = torch.cat((z0.unsqueeze(1), zerosK), dim = 1) # B x K x H x N # Now we have a signal that has only the z(-1) and the rest are zeros, so # now we can go ahead and multiply it by b. For this to happen, we need # to reshape it as B x EKH x N, but since we are always reshaping the last # dimensions we will bring EKH to the end, reshape, and then put them back # in the middle. zK = zK.reshape(B, 1, K, H, N).repeat(1, E, 1, 1, 1) # B x E x K x H x N zK = zK.permute(0, 4, 1, 2, 3).reshape(B, N, E*K*H).permute(0, 2, 1) # B x EKH x N # Finally, we can go ahead an multiply with b zt = torch.matmul(b, zK) # B x H x N # Now that we have b(0) z(0) we can add the bias, if necessary if zBias is not None: zt = zt + zBias # And we need to add it to a(0)x(0) which is the first element of Ax in the # T dimension # Let's do a torch.index_select; not so sure a selection matrix isn't better a0x0 = torch.index_select(Ax, 1, torch.tensor(0, device = Ax.device)).reshape(B, H, N) # B x H x N # Recall that a0x0 already has the bias, so now we just need to add up and # apply the nonlinearity zt = sigma(a0x0 + zt) # B x H x N z = zt.unsqueeze(1) # B x 1 x H x N zt = zt.unsqueeze(1) # B x 1 x H x N # This is where we will keep track of the product Sz Sz = z0.reshape(B, 1, 1, H, N).repeat(1, 1, E, 1, 1) # B x 1 x E x H x N # Starting now, we need to multiply this by S every time for t in range(1,T): if t < K: # Get the current time instant St = torch.index_select(S, 1, torch.tensor(t, device = S.device)) # B x 1 x E x N x N # We need to multiply this time instant by all the elements in Sz # now, and there are t of those St = St.repeat(1, t, 1, 1, 1) # B x t x E x N x N # Multiply by the newly acquired St to do one more delay Sz = torch.matmul(Sz, St) # B x t x E x H x N # Observe that these delays are backward: the last element in the # T dimension (dim = 1) is the latest element, this makes sense # since that is the element we want to multiply by the last element # in b. # Now that we have delayed, add the newest value (which requires # no delay) ztThis = zt.unsqueeze(2).repeat(1, 1, E, 1, 1) # B x 1 x E x H x N Sz = torch.cat((ztThis, Sz), dim = 1) # B x (t+1) x E x H x N # Pad all those values that are not there yet (will multiply b # by zero) zeroRow = torch.zeros((B, K-(t+1), E, H, N), device = Sz.device) SzPad = torch.cat((Sz, zeroRow), dim = 1) # B x K x E x H x N # Reshape and permute to adapt to multiplication with b (happens # outside the if) bSz = SzPad.permute(0, 4, 2, 1, 3).reshape(B, N, E*K*H) else: # Now, we have t>=K which means that Sz is of shape # B x K x E x H x N # and thus is full, so we need to get rid of the last element in Sz # before adding the new element and multiplying by St. # We can always get rid of the last element by multiplying by a # K x (K-1) selection matrix. So we do that (first we need to # permute to have the dimensions ready for multiplication) Sz = Sz.permute(0, 2, 3, 4, 1) # B x E x H x N x K Sz = torch.matmul(Sz, CK) # B x E x H x N x (K-1) Sz = Sz.permute(0, 4, 1, 2, 3) # B x (K-1) x E x H x N # Get the current time instant St = torch.index_select(S, 1, torch.tensor(t, device = S.device)) # B x 1 x E x N x N # We need to multiply this time instant by all the elements in Sz # now, and there are K-1 of those St = St.repeat(1, K-1, 1, 1, 1) # B x (K-1) x E x N x N # Multiply by the newly acquired St to do one more delay Sz = torch.matmul(Sz, St) # B x (K-1) x E x H x N # Now that we have delayed, add the newest value (which requires # no delay) ztThis = zt.unsqueeze(2).repeat(1, 1, E, 1, 1) # B x 1 x E x H x N Sz = torch.cat((ztThis, Sz), dim = 1) # B x K x E x H x N # Reshape and permute to adapt to multiplication with b (happens # outside the if) bSz = Sz.permute(0, 4, 2, 1, 3).reshape(B, N, E*K*H) # Get back to proper order bSz = bSz.permute(0, 2, 1) # B x EKH x N # And multiply with the coefficients Bzt = torch.matmul(b, bSz) # B x H x N # Now that we have the Bz for this time instant, add the bias if zBias is not None: Bzt = Bzt + zBias # Get the corresponding value of Ax Axt = torch.index_select(Ax, 1, torch.tensor(t, device = Ax.device)) Axt = Axt.reshape(B, H, N) # Sum and apply the nonlinearity zt = sigma(Axt + Bzt).unsqueeze(1) # B x 1 x H x N z = torch.cat((z, zt), dim = 1) # B x (t+1) x H x N return z # B x T x H x N def GatedGRNN(a, b, S, x, z0, sigma, q_hat = torch.ones(1), q_check = torch.ones(1), xBias=None, zBias = None): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/01/28 """ GatedGRNN(signal_to_hidden_taps, hidden_to_hidden_taps, GSO, input, initial_hidden, nonlinearity, input_gate, forget_gate, signal_bias, hidden_bias) Computes the sequence of hidden states for the input sequence x following the equation z_{t} = sigma(Q_hat{A(S) x_{t}} + Q_check{B(S) z_{t-1}}) with initial state z0 and where sigma is the nonlinearity, A(S) and B(S) are the Input-to-Hidden filters and the Hidden-to-Hidden filters with the corresponding taps, Q_hat is the input gate operator and Q_check the forget gate operator Inputs: signal_to_hidden_taps (torch.tensor): shape hidden_features x edge_features x filter_taps x signal_features hidden_to_hidden_taps (torch.tensor): shape hidden_features x edge_features x filter_taps x hidden_features GSO (torch.tensor): shape batch_size x edge_features x number_nodes x number_nodes input (torch.tensor): shape batch_size x time x signal_features x number_nodes initial_hidden: shape batch_size x hidden_features x number_nodes input_gate: shape depends on the type of gating > no gating torch.ones(1) > time gating: batch_size x time x 1 x 1 > node gating: batch_size x time x 1 x number_nodes > edge gating: batch_size x time x 1 x number_nodes x number_nodes forget_gate: shape depends on the type of gating > no gating torch.ones(1) > time gating: batch_size x time x 1 x 1 > node gating: batch_size x time x 1 x number_nodes > edge gating: batch_size x time x 1 x number_nodes x number_nodes signal_bias (torch.tensor): shape 1 x 1 x hidden_features x 1 hidden_bias (torch.tensor): shape 1 x 1 x hidden_features x 1 Outputs: hidden_state: shape batch_size x time x hidden_features x number_nodes """ # We will compute the hidden state for a delayed and batch data. # So, the input # a: H x E x K x F (Input to Hidden filters) # b: H x E x K x H (Hidden to Hidden filters) # S: E x N x N (GSO) # x: B x T x F x N (Input signal) # z0: B x H x N (Initial state) # xBias: 1 x 1 x H x 1 (bias on the Input to Hidden features) # zBias: 1 x 1 x H x 1 (bias on the Hidden to Hidden features) # And the output has to be # z: B x T x H x N (Hidden state signal) # q_hat and q_check depend on type of gating # Check dimensions H = a.shape[0] # Number of hidden state features E = a.shape[1] # Number of edge features K = a.shape[2] # Number of filter taps F = a.shape[3] # Number of input features assert b.shape[0] == H assert b.shape[1] == E assert b.shape[2] == K assert b.shape[3] == H assert S.shape[0] == E N = S.shape[1] assert S.shape[2] == N B = x.shape[0] T = x.shape[1] assert x.shape[2] == F assert x.shape[3] == N assert z0.shape[0] == B assert z0.shape[1] == H assert z0.shape[2] == N assert q_hat.shape[0] == B or q_hat.shape[0] == 1 if len(q_hat.shape) > 1: assert q_hat.shape[1] == T assert q_hat.shape[2] == 1 assert q_hat.shape[3] == 1 or q_hat.shape[3] == N if len(q_hat.shape) > 4: assert q_hat.shape[4] == N assert q_check.shape[0] == B or q_check.shape[0] == 1 if len(q_check.shape) > 1: assert q_check.shape[1] == T assert q_check.shape[2] == 1 assert q_check.shape[3] == 1 or q_check.shape[3] == N if len(q_check.shape) > 4: assert q_check.shape[4] == N # Checking if there is bias if xBias is not None: xBias = xBias.reshape(1,H,1) if zBias is not None: zBias = zBias.reshape(1,H,1) # We start by handling the input to state transformation Ax # First, we have to check if we are NOT doing edge gating if len(q_hat.shape) < 5: # The application of A(S) x(t) doesn't change (it does not depend on z(t)) Ax = LSIGF(a, S, x.reshape((B*T,F,N)), b = xBias) # BT x H x N # We merge the batch and time dimensions of x to apply the linear shift- # invariant graph filter. Then, we re-add the time dimension Ax = Ax.reshape((B,T,H,N)) # This is the filtered signal for all time instants. Finally, we "gate" Ax = q_hat*Ax else: # If we have edge gating, we have to add a batch and a time dimension to S edgeS = S.unsqueeze(0) # 1 x E x N x N edgeS = edgeS.repeat(B*T,1,1,1) # BT x E x N x N # The first step is to gate the GSO edgeS = q_hat.reshape([B*T,E,N,N]) * edgeS # Then we reshape x to multiply each batch and sequence element by the # corresponding gated GSO... x = x.reshape([B*T, F, N]) x = x.unsqueeze(1) # B*T x 1 x F x N # ... and follow a similar filtering procedure as in the LSI-GF # u is the tensor used to store S^0x, S^1x, ..., S^{K-1}x u = x.reshape([B*T, 1, 1, F, N]).repeat(1, E, 1, 1, 1) for k in range(1,K): x = x.reshape((B*T,F,N)) x = torch.matmul(x, edgeS) # BT x BT x E x F x N x = x.reshape([B*T,B*T,E,F,N]) # BT x BT x E x F x N # We only care about the elements for which the batch-time indices # of x match the batch-time indices of S, therefore we take the # diagonal along dimensions 0 and 1 x = torch.diagonal(x) # E x F x N x BT x = x.permute(3,0,1,2) # BT x E x F x N xS = x.reshape([B*T, E, 1, F, N]) # BT x E x 1 x F x N u = torch.cat((u, xS), dim = 2) # BT x E x k x F x N # This output u is of size BT x E x K x F x N # Now we have the x*S_{e}^{k} product, and we need to multiply with the # filter taps. # We multiply u on the left, and a on the right, the output is to be # B x N x H (the multiplication is not along the N dimension), so we reshape # u to be B x N x E x K x F and reshape it to B x N x EKF (remember we # always reshape the last dimensions), and then make a be E x K x F x H and # reshape it to EKF x H, and then multiply Ax = torch.matmul(u.permute(0, 4, 1, 2, 3).reshape([B*T, N, E*K*F]), a.reshape([H, E*K*F]).permute(1, 0)).permute(0, 2, 1) # And permute againt to bring it from B x N x H to B x H x N. # Finally, add the bias if xBias is not None: Ax = Ax + xBias # We have merged the batch and time dimensions of x and S to apply the # linear shift-invariant graph filter. Now, we re-add the time dimension Ax = Ax.reshape((B,T,H,N)) # This is the filtered signal for all time instants # The second step is to handle the state-to-state transformation Bz # Assign first state as initial state zt = z0 # Calculate the hidden states for t=1,...,T for t in range(1,T+1): # First, we have to check if we are NOT doing edge gating if len(q_check.shape) < 5: # We apply the filter B(S) to the hidden state at time t-1 Bzt = LSIGF(b, S, zt.reshape((B,H,N)), b = zBias) # B x H x N # Then, if there is a gate, we select the gate corresponding to # instant t... if len(q_check.shape) > 1: this_q_check = torch.index_select(q_check, 1, torch.tensor(t-1, device = q_check.device)) this_q_check = this_q_check.squeeze(1) # B x 1 x (1 for time) or (N for node) # ... and if there is no gate, this gate is simply equal to the default gate else: this_q_check = q_check # and apply it Bzt = this_q_check*Bzt else: # If we have edge gating, we first have to select the gate corresponding # to instant t this_q_check = torch.index_select(q_check, 1, torch.tensor(t-1, device = q_check.device)) this_q_check = this_q_check.squeeze(1) # B x 1 x N x N # We also have to add a batch dimension to S edgeS = S.unsqueeze(0) # 1 x E x N x N edgeS = edgeS.repeat(B,1,1,1) # B x E x N x N # Then we're ready to gate the GSO, ... edgeS = this_q_check*edgeS # ... and follow a similar filtering procedure as in the LSI-GF zt = zt.reshape([B, H, N]) zt = zt.unsqueeze(1) # B x 1 x F x N # u is the tensor used to store S^0zt, S^1zt, ..., S^{K-1}zt u = zt.reshape([B, 1, 1, H, N]).repeat(1, E, 1, 1, 1) for k in range(1,K): zt = zt.reshape((B,H,N)) zt = torch.matmul(zt, edgeS) zt = zt.reshape([B,B,E,H,N]) # B x B x E x H x N # We only care about the elements for which the batch-time indices # of zt match the batch-time indices of S, therefore we take the # diagonal along dimensions 0 and 1 zt = torch.diagonal(zt) # E x H x N x B zt = zt.permute(3,0,1,2) # B x E x H x N ztS = zt.reshape([B, E, 1, H, N]) # B x E x 1 x H x N u = torch.cat((u, ztS), dim = 2) # B x E x k x H x N # This output u is of size B x E x K x H x N # Now we have the zt*S_{e}^{k} product, and we need to multiply with the # filter taps. # We multiply u on the left, and b on the right, the output is to be # B x N x H (the multiplication is not along the N dimension), so we reshape # u to be B x N x E x K x H and reshape it to B x N x EKH (remember we # always reshape the last dimensions), and then make b be E x K x H x H and # reshape it to EKH x H, and then multiply Bzt = torch.matmul(u.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*H]), b.reshape([H, E*K*H]).permute(1, 0)).permute(0, 2, 1) # And permute againt to bring it from B x N x H to B x H x N. # Finally, add the bias if zBias is not None: Bzt = Bzt + zBias # Now we are able to compute the current state from Ax and Bzt # Get the corresponding value of Ax Axt = torch.index_select(Ax, 1, torch.tensor(t-1, device = Ax.device)) Axt = Axt.reshape(B, H, N) # Sum and apply the nonlinearity zt = sigma(Axt + Bzt).unsqueeze(1) # B x 1 x H x N if t == 1: z = zt # initialize hidden state tensor else: z = torch.cat((z, zt), dim = 1) # B x (t+1) x H x N return z # B x T x H x N ############################################################################# # # # LAYERS (Activation Functions) # # # ############################################################################# class MaxLocalActivation(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2019/03/15 """ MaxLocalActivation creates a localized activation function layer on graphs Initialization: MaxLocalActivation(K) Inputs: K (int): number of hops (>0) Output: torch.nn.Module for a localized max activation function layer Add graph shift operator: MaxLocalActivation.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = MaxLocalActivation(x) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x number_nodes Outputs: y (torch.tensor): activated data; shape: batch_size x dim_features x number_nodes """ def __init__(self, K): super().__init__() assert K > 0 # range has to be greater than 0 self.K = K self.S = None # no GSO assigned yet self.N = None # no GSO assigned yet (N learned from the GSO) self.neighborhood = 'None' # no neighborhoods calculated yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1)) # Initialize parameters self.reset_parameters() def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N self.N = S.shape[1] assert S.shape[2] == self.N self.S = S # Change tensor S to numpy now that we have saved it as tensor in self.S S = S.cpu().numpy() # The neighborhood matrix has to be a tensor of shape # nOutputNodes x maxNeighborhoodSize neighborhood = [] maxNeighborhoodSizes = [] for k in range(1,self.K+1): # For each hop (0,1,...) in the range K thisNeighborhood = graphTools.computeNeighborhood(S, k, outputType='matrix') # compute the k-hop neighborhood neighborhood.append(torch.tensor(thisNeighborhood).to(self.S.device)) maxNeighborhoodSizes.append(thisNeighborhood.shape[1]) self.maxNeighborhoodSizes = maxNeighborhoodSizes self.neighborhood = neighborhood def forward(self, x): # x should be of shape batchSize x dimNodeSignals x N batchSize = x.shape[0] dimNodeSignals = x.shape[1] assert x.shape[2] == self.N # And given that the self.neighborhood is already a torch.tensor matrix # we can just go ahead and get it. # So, x is of shape B x F x N. But we need it to be of shape # B x F x N x maxNeighbor. Why? Well, because we need to compute the # maximum between the value of each node and those of its neighbors. # And we do this by applying a torch.max across the rows (dim = 3) so # that we end up again with a B x F x N, but having computed the max. # How to fill those extra dimensions? Well, what we have is neighborhood # matrix, and we are going to use torch.gather to bring the right # values (torch.index_select, while more straightforward, only works # along a single dimension). # Each row of the matrix neighborhood determines all the neighbors of # each node: the first row contains all the neighbors of the first node, # etc. # The values of the signal at those nodes are contained in the dim = 2 # of x. So, just for now, let's ignore the batch and feature dimensions # and imagine we have a column vector: N x 1. We have to pick some of # the elements of this vector and line them up alongside each row # so that then we can compute the maximum along these rows. # When we torch.gather along dimension 0, we are selecting which row to # pick according to each column. Thus, if we have that the first row # of the neighborhood matrix is [1, 2, 0] means that we want to pick # the value at row 1 of x, at row 2 of x in the next column, and at row # 0 of the last column. For these values to be the appropriate ones, we # have to repeat x as columns to build our b x F x N x maxNeighbor # matrix. xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop # max's it is initialized with the 0-hop neigh. (x itself) xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead x = x.unsqueeze(3) # B x F x N x 1 # And the neighbors that we need to gather are the same across the batch # and feature dimensions, so we need to repeat the matrix along those # dimensions for k in range(1,self.K+1): x_aux = x.repeat([1, 1, 1, self.maxNeighborhoodSizes[k-1]]) gatherNeighbor = self.neighborhood[k-1].reshape( [1, 1, self.N, self.maxNeighborhoodSizes[k-1]] ) gatherNeighbor = gatherNeighbor.repeat([batchSize, dimNodeSignals, 1, 1]) # And finally we're in position of getting all the neighbors in line xNeighbors=torch.gather(x_aux,2,gatherNeighbor.long().to(x.device)) # B x F x nOutput x maxNeighbor # Note that this gather function already reduces the dimension to # nOutputNodes. # And proceed to compute the maximum along this dimension v, _ = torch.max(xNeighbors, dim = 3) v = v.unsqueeze(3) # to concatenate with xK xK = torch.cat((xK,v),3) out = torch.matmul(xK,self.weight.unsqueeze(2)) # multiply each k-hop max by corresponding weight out = out.reshape([batchSize,dimNodeSignals,self.N]) return out def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.K) self.weight.data.uniform_(-stdv, stdv) def extra_repr(self): if self.neighborhood is not None: reprString = "neighborhood stored" else: reprString = "NO neighborhood stored" return reprString class MedianLocalActivation(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2019/03/27 """ MedianLocalActivation creates a localized activation function layer on graphs Initialization: MedianLocalActivation(K) Inputs: K (int): number of hops (>0) Output: torch.nn.Module for a localized median activation function layer Add graph shift operator: MedianLocalActivation.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). This function also calculates the 0-,1-,...,K-hop neighborhoods of every node Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = MedianLocalActivation(x) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x number_nodes Outputs: y (torch.tensor): activated data; shape: batch_size x dim_features x number_nodes """ def __init__(self, K): super().__init__() assert K > 0 # range has to be greater than 0 self.K = K self.S = None # no GSO assigned yet self.N = None # no GSO assigned yet (N learned from the GSO) self.neighborhood = 'None' # no neighborhoods calculated yet self.masks = 'None' # no mask yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1)) # Initialize parameters self.reset_parameters() def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N self.N = S.shape[1] assert S.shape[2] == self.N self.S = S # Change tensor S to numpy now that we have saved it as tensor in self.S S = S.cpu().numpy() # The neighborhood matrix has to be a tensor of shape # nOutputNodes x maxNeighborhoodSize neighborhood = [] for k in range(1,self.K+1): # For each hop (0,1,...) in the range K thisNeighborhood = graphTools.computeNeighborhood(S, k, outputType='list') # compute the k-hop neighborhood neighborhood.append(thisNeighborhood) self.neighborhood = neighborhood def forward(self, x): # x should be of shape batchSize x dimNodeSignals x N batchSize = x.shape[0] dimNodeSignals = x.shape[1] assert x.shape[2] == self.N xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop # max's # It is initialized with the 0-hop neigh. (x itself) xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead #x = x.unsqueeze(3) # B x F x N x 1 for k in range(1,self.K+1): kHopNeighborhood = self.neighborhood[k-1] # Fetching k-hop neighborhoods of all nodes kHopMedian = torch.empty(0).to(x.device) # Initializing the vector that will contain the k-hop median for # every node for n in range(self.N): # Iterating over the nodes # This step is necessary because here the neighborhoods are # lists of lists. It is impossible to pad them and feed them as # a matrix, as this would impact the outcome of the median # operation nodeNeighborhood = torch.tensor(np.array(kHopNeighborhood[n])) neighborhoodLen = len(nodeNeighborhood) gatherNode = nodeNeighborhood.reshape([1, 1, neighborhoodLen]) gatherNode = gatherNode.repeat([batchSize, dimNodeSignals, 1]) # Reshaping the node neighborhood for the gather operation xNodeNeighbors=torch.gather(x,2,gatherNode.long().to(x.device)) # Gathering signal values in the node neighborhood nodeMedian,_ = torch.median(xNodeNeighbors, dim = 2, keepdim=True) # Computing the median in the neighborhood kHopMedian = torch.cat([kHopMedian,nodeMedian],2) # Concatenating k-hop medians node by node kHopMedian = kHopMedian.unsqueeze(3) # Extra dimension for # concatenation with the previous (k-1)-hop median tensor xK = torch.cat([xK,kHopMedian],3) out = torch.matmul(xK,self.weight.unsqueeze(2)) # Multiplying each k-hop median by corresponding trainable weight out = out.reshape([batchSize,dimNodeSignals,self.N]) return out def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.K) self.weight.data.uniform_(-stdv, stdv) def extra_repr(self): if self.neighborhood is not None: reprString = "neighborhood stored" else: reprString = "NO neighborhood stored" return reprString class NoActivation(nn.Module): """ NoActivation creates an activation layer that does nothing It is for completeness, to be able to switch between linear models and nonlinear models, without altering the entire architecture model Initialization: NoActivation() Output: torch.nn.Module for an empty activation layer Forward call: y = NoActivation(x) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x number_nodes Outputs: y (torch.tensor): activated data; shape: batch_size x dim_features x number_nodes """ def __init__(self): super().__init__() def forward(self, x): return x def extra_repr(self): reprString = "No Activation Function" return reprString class ReLUwithS(nn.Module): """ ReLUwithS creates an activation layer that takes the input data and Laplacian matrix and return nn.ReLU(x) and the Laplacian matrix Initialization: ReLUwithS() Forward call: y = ReLUwithS(x, S) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x number_nodes S (torch.tensor): input Laplacian matrix of data of shape batchSize x edgeFeatures x numberNodes x numberNodes Outputs: y (torch.tensor): activated data; shape: batch_size x dim_features x number_nodes S (torch.tensor): input Laplacian matrix of data of shape batchSize x edgeFeatures x numberNodes x numberNodes """ def __init__(self): super().__init__() def forward(self, x, S): m = nn.ReLU() y = m(x) return y, S def extra_repr(self): reprString = "ReLU with S" return reprString ############################################################################# # # # LAYERS (Pooling) # # # ############################################################################# class NoPool(nn.Module): """ This is a pooling layer that actually does no pooling. It has the same input structure and methods of MaxPoolLocal() for consistency. Basically, this allows us to change from pooling to no pooling without necessarily creating a new architecture. In any case, we're pretty sure this function should never ship, and pooling can be avoided directly when defining the architecture. """ def __init__(self, nInputNodes, nOutputNodes, nHops): super().__init__() self.nInputNodes = nInputNodes self.nOutputNodes = nOutputNodes self.nHops = nHops self.neighborhood = None def addGSO(self, GSO): # This is necessary to keep the form of the other pooling strategies # within the SelectionGNN framework. But we do not care about any GSO. pass def forward(self, x): # x should be of shape batchSize x dimNodeSignals x nInputNodes assert x.shape[2] == self.nInputNodes # Check that there are at least the same number of nodes that # we will keep (otherwise, it would be unpooling, instead of # pooling) assert x.shape[2] >= self.nOutputNodes # And do not do anything return x def extra_repr(self): reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % ( self.nInputNodes, self.nOutputNodes, self.nHops) reprString += "no neighborhood needed" return reprString class MaxPoolLocal(nn.Module): """ MaxPoolLocal Creates a pooling layer on graphs by selecting nodes Initialization: MaxPoolLocal(in_dim, out_dim, number_hops) Inputs: in_dim (int): number of nodes at the input out_dim (int): number of nodes at the output number_hops (int): number of hops to pool information Output: torch.nn.Module for a local max-pooling layer. Observation: The selected nodes for the output are always the top ones. Add a neighborhood set: Add graph shift operator: GraphFilter.addGSO(GSO) Before being used, we need to define the GSO that will determine the neighborhood that we are going to pool. Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: v = MaxPoolLocal(x) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x in_dim Outputs: y (torch.tensor): pooled data; shape: batch_size x dim_features x out_dim """ def __init__(self, nInputNodes, nOutputNodes, nHops): super().__init__() self.nInputNodes = nInputNodes self.nOutputNodes = nOutputNodes self.nHops = nHops self.neighborhood = None def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N (And I don't care about E, because the # computeNeighborhood function takes care of it) self.N = S.shape[1] assert S.shape[2] == self.N # Get the device (before operating with S and losing it, it's cheaper # to store the device now, than to duplicate S -i.e. keep a numpy and a # tensor copy of S) device = S.device # Move the GSO to cpu and to np.array so it can be handled by the # computeNeighborhood function S = np.array(S.cpu()) # Compute neighborhood neighborhood = graphTools.computeNeighborhood(S, self.nHops, self.nOutputNodes, self.nInputNodes,'matrix') # And move the neighborhood back to a tensor neighborhood = torch.tensor(neighborhood).to(device) # The neighborhood matrix has to be a tensor of shape # nOutputNodes x maxNeighborhoodSize assert neighborhood.shape[0] == self.nOutputNodes assert neighborhood.max() <= self.nInputNodes # Store all the relevant information self.maxNeighborhoodSize = neighborhood.shape[1] self.neighborhood = neighborhood def forward(self, x): # x should be of shape batchSize x dimNodeSignals x nInputNodes batchSize = x.shape[0] dimNodeSignals = x.shape[1] assert x.shape[2] == self.nInputNodes # Check that there are at least the same number of nodes that # we will keep (otherwise, it would be unpooling, instead of # pooling) assert x.shape[2] >= self.nOutputNodes # And given that the self.neighborhood is already a torch.tensor matrix # we can just go ahead and get it. # So, x is of shape B x F x N. But we need it to be of shape # B x F x N x maxNeighbor. Why? Well, because we need to compute the # maximum between the value of each node and those of its neighbors. # And we do this by applying a torch.max across the rows (dim = 3) so # that we end up again with a B x F x N, but having computed the max. # How to fill those extra dimensions? Well, what we have is neighborhood # matrix, and we are going to use torch.gather to bring the right # values (torch.index_select, while more straightforward, only works # along a single dimension). # Each row of the matrix neighborhood determines all the neighbors of # each node: the first row contains all the neighbors of the first node, # etc. # The values of the signal at those nodes are contained in the dim = 2 # of x. So, just for now, let's ignore the batch and feature dimensions # and imagine we have a column vector: N x 1. We have to pick some of # the elements of this vector and line them up alongside each row # so that then we can compute the maximum along these rows. # When we torch.gather along dimension 0, we are selecting which row to # pick according to each column. Thus, if we have that the first row # of the neighborhood matrix is [1, 2, 0] means that we want to pick # the value at row 1 of x, at row 2 of x in the next column, and at row # 0 of the last column. For these values to be the appropriate ones, we # have to repeat x as columns to build our b x F x N x maxNeighbor # matrix. x = x.unsqueeze(3) # B x F x N x 1 x = x.repeat([1, 1, 1, self.maxNeighborhoodSize]) # BxFxNxmaxNeighbor # And the neighbors that we need to gather are the same across the batch # and feature dimensions, so we need to repeat the matrix along those # dimensions gatherNeighbor = self.neighborhood.reshape([1, 1, self.nOutputNodes, self.maxNeighborhoodSize]) gatherNeighbor = gatherNeighbor.repeat([batchSize, dimNodeSignals, 1,1]).type(torch.int64) # And finally we're in position of getting all the neighbors in line xNeighbors = torch.gather(x, 2, gatherNeighbor) # B x F x nOutput x maxNeighbor # Note that this gather function already reduces the dimension to # nOutputNodes. # And proceed to compute the maximum along this dimension v, _ = torch.max(xNeighbors, dim = 3) return v def extra_repr(self): reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % ( self.nInputNodes, self.nOutputNodes, self.nHops) if self.neighborhood is not None: reprString += "neighborhood stored" else: reprString += "NO neighborhood stored" return reprString ############################################################################# # # # LAYERS (Filtering) # # # ############################################################################# class GraphFilter(nn.Module): """ GraphFilter Creates a (linear) layer that applies a graph filter Initialization: GraphFilter(in_features, out_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = GraphFilter(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, K, E = 1, bias = True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.K = K self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.K) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N-Nin)\ .type(x.dtype).to(x.device) ), dim = 2) # Compute the filter output u = LSIGF(self.weight, self.S, x, self.bias) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.G, self.F) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class SpectralGF(nn.Module): """ SpectralGF Creates a (linear) layer that applies a LSI graph filter in the spectral domain using a cubic spline if needed. Initialization: GraphFilter(in_features, out_features, filter_coeff, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_coeff (int): number of filter spectral coefficients edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer) implemented in the spectral domain. Observation: Filter taps have shape out_features x edge_features x in_features x filter_coeff Add graph shift operator: SpectralGF.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = SpectralGF(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, M, E = 1, bias = True): # GSOs will be added later. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.M = M self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(F, E, G, M)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.M) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has to have 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S # Save S # Now we need to compute the eigendecomposition and save it # To compute the eigendecomposition, we use numpy. # So, first, get S in numpy format. Snp = np.array(S.data.cpu()) # We will compute the eigendecomposition for each edge feature, so we # create the E x N x N space for V, VH and Lambda (we need lambda for # the spline kernel) V = np.zeros([self.E, self.N, self.N]) VH = np.zeros([self.E, self.N, self.N]) Lambda = np.zeros([self.E, self.N]) # Here we save the resulting spline kernel matrix splineKernel = np.zeros([self.E, self.N, self.M]) for e in range(self.E): # Compute the eigendecomposition Lambda[e,:], V[e,:,:] = np.linalg.eig(Snp[e,:,:]) # Compute the hermitian VH[e,:,:] = V[e,:,:].conj().T # Compute the splineKernel basis matrix splineKernel[e,:,:] = graphTools.splineBasis(self.M, Lambda[e, :]) # Transform everything to tensors of appropriate type on appropriate # device, and store them. self.V = torch.tensor(V).type(S.dtype).to(S.device) # E x N x N self.VH = torch.tensor(VH).type(S.dtype).to(S.device) # E x N x N self.splineKernel = torch.tensor(splineKernel)\ .type(S.dtype).to(S.device) # E x N x M # Once we have computed the splineKernel, we do not need to save the # eigenvalues. def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # Check if we have enough spectral filter coefficients as needed, or if # we need to fill out the rest using the spline kernel. if self.M == self.N: self.h = self.weight # F x E x G x N (because N = M) else: # Adjust dimensions for proper algebraic matrix multiplication splineKernel = self.splineKernel.reshape([1,self.E,self.N,self.M]) # We will multiply a 1 x E x N x M matrix with a F x E x M x G # matrix to get the proper F x E x N x G coefficients self.h = torch.matmul(splineKernel, self.weight.permute(0,1,3,2)) # And now we rearrange it to the same shape that the function takes self.h = self.h.permute(0,1,3,2) # F x E x G x N # And now we add the zero padding (if this comes from a pooling # operation) if Nin < self.N: zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device) x = torch.cat((x, zeroPad), dim = 2) # Compute the filter output u = spectralGF(self.h, self.V, self.VH, x, self.bias) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.G, self.F) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class NodeVariantGF(nn.Module): """ NodeVariantGF Creates a filtering layer that applies a node-variant graph filter Initialization: NodeVariantGF(in_features, out_features, shift_taps, node_taps edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) shift_taps (int): number of filter taps for shifts node_taps (int): number of filter taps for nodes edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer using node-variant graph filters. Observation: Filter taps have shape out_features x edge_features x shift_taps x in_features x node_taps Add graph shift operator: NodeVariantGF.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = NodeVariantGF(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, K, M, E = 1, bias = True): # G: Number of input features # F: Number of output features # K: Number of filter shift taps # M: Number of filter node taps # GSOs will be added later. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.K = K self.M = M self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G, M)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.K * self.M) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S npS = np.array(S.data.cpu()) # Save the GSO as a numpy array because we # are going to compute the neighbors. # And now we have to fill up the parameter vector, from M to N if self.M < self.N: # The first elements of M (ordered with whatever order we want) # are the ones associated to independent node taps. copyNodes = [m for m in range(self.M)] # The rest of the nodes will copy one of these M node taps. # The way we do this is: if they are connected to one of the M # indepdendent nodes, just copy it. If they are not connected, # look at the neighbors, neighbors, and so on, until we reach one # of the independent nodes. # Ties are broken by selecting the node with the smallest index # (which, due to the ordering, is the most important node of all # the available ones) neighborList = graphTools.computeNeighborhood(npS, 1, nb = self.M) # This gets the list of 1-hop neighbors for all nodes. # Find the nodes that have no neighbors nodesWithNoNeighbors = [n for n in range(self.N) \ if len(neighborList[n]) == 0] # If there are still nodes that didn't find a neighbor K = 1 # K-hop neighbor we have looked so far while len(nodesWithNoNeighbors) > 0: # Looks for the next hop K += 1 # Get the neigbors one further hop away thisNeighborList = graphTools.computeNeighborhood(npS, K, nb = self.M) # Check if we now have neighbors for those that didn't have # before for n in nodesWithNoNeighbors: # Get the neighbors of the node thisNodeList = thisNeighborList[n] # If there are neighbors if len(thisNodeList) > 0: # Add them to the list neighborList[n] = thisNodeList # Recheck if all nodes have non-empty neighbors nodesWithNoNeighbors = [n for n in range(self.N) \ if len(neighborList[n]) == 0] # Now we have obtained the list of independent nodes connected to # all nodes, we keep the one with highest score. And since the # matrix is already properly ordered, this means keeping the # smallest index in the neighborList. for m in range(self.M, self.N): copyNodes.append(min(neighborList[m])) # And, finally create the indices of nodes to copy self.copyNodes = torch.tensor(copyNodes).to(S.device) elif self.M == self.N: # In this case, all parameters go into the vector h self.copyNodes = torch.arange(self.M).to(S.device) else: # This is the rare case in which self.M < self.N, for example, if # we train in a larger network and deploy in a smaller one. Since # the matrix is ordered by score, we just keep the first N # weights self.copyNodes = torch.arange(self.N).to(S.device) # OBS.: self.weight is updated on each training step, so we cannot # define the self.h vector (i.e. the vector with N elements) here, # because otherwise it wouldn't be updated every time. So we need, in # the for, to use index_select on the actual weights, to create the # vector h that is later feed into the NVGF computation. def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # If we have less filter coefficients than the required ones, we need # to use the copying scheme if self.M == self.N: self.h = self.weight else: self.h = torch.index_select(self.weight, 4, self.copyNodes) # And now we add the zero padding if Nin < self.N: zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device) x = torch.cat((x, zeroPad), dim = 2) # Compute the filter output u = NVGF(self.h, self.S, x, self.bias) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.G, self.F) + "shift_taps=%d, node_taps=%d, " % ( self.K, self.M) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class EdgeVariantGF(nn.Module): """ EdgeVariantGF Creates a (linear) layer that applies an edge-variant graph filter using the masking approach. If less nodes than the total number of nodes are selected, then the remaining nodes adopt an LSI filter (i.e. it becomes a hybrid edge-variant grpah filter) Initialization: EdgeVariantGF(in_features, out_features, shift_taps, selected_nodes, number_nodes, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) shift_taps (int): number of shifts to consider selected_nodes (int): number of selected nodes to implement the EV part of the filter number_nodes (int): number of nodes edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer using hybrid edge-variant graph filters. Observation: Filter taps have shape out_features x edge_features x shift_taps x in_features x number_nodes x number_nodes These weights are masked by the corresponding sparsity pattern of the graph and the desired number of selected nodes, so only weights in the nonzero edges of these nodes will be trained, the rest of the parameters contain trash. Therefore, the number of parameters will not reflect the actual number of parameters being trained. Add graph shift operator: EdgeVariantGF.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = EdgeVariantGF(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, K, M, N, E=1, bias = True): # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.K = K self.E = E self.M = M # Number of selected nodes self.N = N # Total number of nodes self.S = None # Create parameters for the Edge-Variant part: self.weightEV = nn.parameter.Parameter(torch.Tensor(F, E, K, G, N, N)) # If we want a hybrid, create parameters if self.M < self.N: self.weightLSI = nn.parameter.Parameter(torch.Tensor(F, E, K, G)) else: self.register_parameter('weightLSI', None) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.K * self.N) self.weightEV.data.uniform_(-stdv, stdv) if self.weightLSI is not None: self.weightLSI.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S # Save the GSO # Get the identity matrix across all edge features multipleIdentity = torch.eye(self.N).reshape([1, self.N, self.N])\ .repeat(self.E, 1, 1).to(S.device) # Compute the nonzero elements of S+I_{N} sparsityPattern = ((torch.abs(S) + multipleIdentity) > zeroTolerance) # Change from byte tensors to float tensors (or the same type of data as # the GSO) sparsityPattern = sparsityPattern.type(S.dtype) # But now we need to kill everything that is between elements M and N # (only if M < N) if self.M < self.N: # Create the ones in the row hybridMaskOnesRows = torch.ones([self.M, self.N]) # Create the ones int he columns hybridMaskOnesCols = torch.ones([self.N - self.M, self.M]) # Create the zeros hybridMaskZeros = torch.zeros([self.N - self.M, self.N - self.M]) # Concatenate the columns hybridMask = torch.cat((hybridMaskOnesCols,hybridMaskZeros), dim=1) # Concatenate the rows hybridMask = torch.cat((hybridMaskOnesRows,hybridMask), dim=0) else: hybridMask = torch.ones([self.N, self.N]) # Now that we have the hybrid mask, we need to mask the sparsityPattern # we got so far hybridMask = hybridMask.reshape([1, self.N, self.N]).to(S.device) # 1 x N x N sparsityPattern = sparsityPattern * hybridMask self.sparsityPattern = sparsityPattern.to(S.device) # E x N x N # This gives the sparsity pattern for each edge feature # Now, let's create it of the right shape, so we do not have to go # around wasting time with reshapes when called in the forward # The weights have shape F x E x K x G x N x N # The sparsity pattern has shape E x N x N. And we want to make it # 1 x E x K x 1 x N x N. The K dimension is to guarantee that for k=0 # we have the identity multipleIdentity = (multipleIdentity * hybridMask)\ .reshape([1, self.E, 1, 1, self.N, self.N]) if self.K > 1: # This gives a 1 x E x 1 x 1 x N x N identity matrix sparsityPattern = sparsityPattern\ .reshape([1, self.E, 1, 1, self.N, self.N]) # This gives a 1 x E x 1 x 1 x N x N sparsity pattern matrix sparsityPattern = sparsityPattern.repeat(1, 1, self.K-1, 1, 1, 1) # This repeats the sparsity pattern K-1 times giving a matrix of shape # 1 x E x (K-1) x 1 x N x N sparsityPattern = torch.cat((multipleIdentity,sparsityPattern), dim = 2) else: sparsityPattern = multipleIdentity # This sholud give me a 1 x E x K x 1 x N x N matrix with the identity # in the first element self.sparsityPatternFull = sparsityPattern.type(S.dtype).to(S.device) def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # Mask the parameters self.Phi = self.weightEV * self.sparsityPatternFull # And now we add the zero padding if Nin < self.N: zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device) x = torch.cat((x, zeroPad), dim = 2) # Compute the filter output for the EV part uEV = EVGF(self.Phi, x, self.bias) # Check if we need an LSI part if self.M < self.N: # Compute the filter output for the LSI part uLSI = LSIGF(self.weightLSI, self.S, x, self.bias) else: # If we don't, just add zero uLSI = torch.tensor(0., dtype = uEV.dtype).to(uEV.device) # Add both u = uEV + uLSI # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.G, self.F) + "shift_taps=%d, " % ( self.K) + \ "selected_nodes=%d, " % (self.M) +\ "number_nodes=%d, " % (self.N) +\ "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class GraphFilterARMA(nn.Module): """ GraphFilterARMA Creates a (linear) layer that applies a ARMA graph filter using Jacobi's method Initialization: GraphFilterARMA(in_features, out_features, denominator_taps, residue_taps edge_features=1, bias=True, tMax = 5) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) denominator_taps (int): number of filter taps in the denominator polynomial residue_taps (int): number of filter taps in the residue polynomial edge_features (int): number of features over each edge (default: 1) bias (bool): add bias vector (one bias per feature) after graph filtering (default: True) tMax (int): maximum number of Jacobi iterations (default: 5) Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilterARMA.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = GraphFilterARMA(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, P, K, E = 1, bias = True, tMax = 5): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.P = P self.K = K self.E = E self.tMax = tMax self.S = None # No GSO assigned yet # Create parameters: self.inverseWeight = nn.parameter.Parameter(torch.Tensor(F, E, P, G)) self.directWeight = nn.parameter.Parameter(torch.Tensor(F, E, P, G)) self.filterWeight = nn.parameter.Parameter(torch.Tensor(F, E, K, G)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.P) self.inverseWeight.data.uniform_(1.+1./stdv, 1.+2./stdv) self.directWeight.data.uniform_(-stdv, stdv) self.filterWeight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N-Nin)\ .type(x.dtype).to(x.device) ), dim = 2) # Compute the filter output u = jARMA(self.inverseWeight, self.directWeight, self.filterWeight, self.S, x, b = self.bias, tMax = self.tMax) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, " % self.G reprString += "out_features=%d, " % self.F reprString += "denominator_taps=%d, " % self.P reprString += "residue_taps=%d, " % self.K reprString += "edge_features=%d, " % self.E reprString += "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class GraphAttentional(nn.Module): """ GraphAttentional Creates a graph attentional layer Initialization: GraphAttentional(in_features, out_features, attention_heads, edge_features=1, nonlinearity=nn.functional.relu, concatenate=True) Inputs: in_features (int): number of input features on top of each node out_features (int): number of output features on top of each node attention_heads (int): number of attention_heads edge_features (int): number of features on top of each edge (default: 1) nonlinearity (nn.functional): nonlinearity applied after features have been updated through attention (default:nn.functional.relu) concatenate (bool): If True, the output of the attention_heads attention heads are concatenated to form the output features, if False, they are averaged (default: True) Output: torch.nn.Module for a graph attentional layer. Add graph shift operator: GraphAttentional.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = GraphAttentional(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, K, E = 1, nonlinearity = nn.functional.relu, concatenate = True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.K = K self.E = E self.S = None # No GSO assigned yet self.nonlinearity = nonlinearity self.concatenate = concatenate # Create parameters: self.mixer = nn.parameter.Parameter(torch.Tensor(K, E, 2*F)) self.weight = nn.parameter.Parameter(torch.Tensor(K, E, F, G)) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.K) self.weight.data.uniform_(-stdv, stdv) self.mixer.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N-Nin)\ .type(x.dtype).to(x.device) ), dim = 2) # And get the graph attention output y = graphAttention(x, self.mixer, self.weight, self.S) # This output is of size B x K x F x N. Now, we can either concatenate # them (inner layers) or average them (outer layer) if self.concatenate: # When we concatenate we first apply the nonlinearity y = self.nonlinearity(y) # Concatenate: Make it B x KF x N such that first iterates over f # and then over k: (k=0,f=0), (k=0,f=1), ..., (k=0,f=F-1), (k=1,f=0), # (k=1,f=1), ..., etc. y = y.permute(0, 3, 1, 2)\ .reshape([B, self.N, self.K*self.F])\ .permute(0, 2, 1) else: # When we don't, we first average y = torch.mean(y, dim = 1) # B x F x N # And then we apply the nonlinearity y = self.nonlinearity(y) if Nin < self.N: y = torch.index_select(y, 2, torch.arange(Nin).to(y.device)) return y def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.G, self.F) + "attention_heads=%d, " % ( self.K) + "edge_features=%d, " % (self.E) if self.S is not None: reprString += "GSO stored: number_nodes=%d" % (self.N) else: reprString += "no GSO stored" return reprString class GraphFilterAttentional(nn.Module): """ GraphFilterAttentional Creates a graph convolution attentional layer Initialization: GraphFilterAttentional(in_features, out_features, filter_taps, attention_heads, edge_features=1, bias=True, nonlinearity=nn.functional.relu, concatenate=True) Inputs: in_features (int): number of input features on top of each node out_features (int): number of output features on top of each node filter_taps (int): number of filter taps (power of the GSO) attention_heads (int): number of attention_heads edge_features (int): number of features on top of each edge (default: 1) bias (bool): include a bias in the LSIGF stage (default: True) nonlinearity (nn.functional): nonlinearity applied after features have been updated through attention (default:nn.functional.relu) concatenate (bool): If True, the output of the attention_heads attention heads are concatenated to form the output features, if False, they are averaged (default: True) Output: torch.nn.Module for a graph convolution attentional layer. Add graph shift operator: GraphFilterAttentional.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = GraphFilterAttentional(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, K, P, E = 1, bias = True, nonlinearity = nn.functional.relu, concatenate = True): # P: Number of heads # GSOs will be added later. # This combines both weight scalars and weight vectors. # Initialize parent super().__init__() # Save parameters: self.G = G # in_features self.F = F # out_features self.K = K # filter_taps self.P = P # attention_heads self.E = E # edge_features self.S = None # No GSO assigned yet self.nonlinearity = nonlinearity self.concatenate = concatenate # Create parameters: self.mixer = nn.parameter.Parameter(torch.Tensor(P, E, 2*F)) self.weight = nn.parameter.Parameter(torch.Tensor(P, E, F, G)) self.filterWeight = nn.parameter.Parameter(torch.Tensor(E, K)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.P) self.weight.data.uniform_(-stdv, stdv) self.mixer.data.uniform_(-stdv, stdv) self.filterWeight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N-Nin)\ .type(x.dtype).to(x.device) ), dim = 2) # And get the graph attention output y = graphAttentionLSIGF(self.filterWeight, x, self.mixer, self.weight, self.S, b = self.bias) # This output is of size B x P x F x N. Now, we can either concatenate # them (inner layers) or average them (outer layer) if self.concatenate: # When we concatenate we first apply the nonlinearity y = self.nonlinearity(y) # Concatenate: Make it B x PF x N such that first iterates over f # and then over p: (p=0,f=0), (p=0,f=1), ..., (p=0,f=F-1), (p=1,f=0), # (p=1,f=1), ..., etc. y = y.permute(0, 3, 1, 2)\ .reshape([B, self.N, self.P*self.F])\ .permute(0, 2, 1) else: # When we don't, we first average y = torch.mean(y, dim = 1) # B x F x N # And then we apply the nonlinearity y = self.nonlinearity(y) if Nin < self.N: y = torch.index_select(y, 2, torch.arange(Nin).to(y.device)) return y def extra_repr(self): reprString = "in_features=%d, " % self.G reprString += "out_features=%d, " % self.F reprString += "filter_taps=%d, " % self.K reprString += "attention_heads=%d, " % self.P reprString += "edge_features=%d, " % self.E reprString += "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored: number_nodes=%d" % (self.N) else: reprString += "no GSO stored" return reprString class EdgeVariantAttentional(nn.Module): """ EdgeVariantAttentional Creates a layer using an edge variant graph filter parameterized by several attention mechanisms Initialization: EdgeVariantAttentional(in_features, out_features, filter_taps, attention_heads, edge_features=1, bias = True, nonlinearity=nn.functional.relu, concatenate=True) Inputs: in_features (int): number of input features on top of each node out_features (int): number of output features on top of each node filter_taps (int): number of filter taps attention_heads (int): number of attention heads edge_features (int): number of features on top of each edge (default: 1) bias (bool): include a bias after the EVGF (default: True) nonlinearity (nn.functional): nonlinearity applied after features have been updated through attention (default:nn.functional.relu) concatenate (bool): If True, the output of the attention_heads attention heads are concatenated to form the output features, if False, they are averaged (default: True) Output: torch.nn.Module for an edge variant GF layer, parameterized by the attention mechanism. Add graph shift operator: EdgeVariantAttentional.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients. Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = EdgeVariantAttentional(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, K, P, E = 1, bias = True, nonlinearity = nn.functional.relu, concatenate = True): # K: Number of filter taps # P: Number of attention heads # GSOs will be added later. # This combines both weight scalars and weight vectors. # Initialize parent super().__init__() # Save parameters: self.G = G # in_features self.F = F # out_features self.K = K # filter_taps self.P = P # attention_heads self.E = E # edge_features self.S = None # No GSO assigned yet self.nonlinearity = nonlinearity self.concatenate = concatenate # Create parameters: self.mixer = nn.parameter.Parameter(torch.Tensor(P, K, E, 2*F)) self.weight = nn.parameter.Parameter(torch.Tensor(P, K, E, F, G)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.K) self.weight.data.uniform_(-stdv, stdv) self.mixer.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N-Nin)\ .type(x.dtype).to(x.device) ), dim = 2) # And get the graph attention output y = graphAttentionEVGF(x, self.mixer, self.weight, self.S, b=self.bias) # This output is of size B x K x F x N. Now, we can either concatenate # them (inner layers) or average them (outer layer) if self.concatenate: # When we concatenate we first apply the nonlinearity y = self.nonlinearity(y) # Concatenate: Make it B x KF x N such that first iterates over f # and then over k: (k=0,f=0), (k=0,f=1), ..., (k=0,f=F-1), (k=1,f=0), # (k=1,f=1), ..., etc. y = y.permute(0, 3, 1, 2)\ .reshape([B, self.N, self.K*self.F])\ .permute(0, 2, 1) else: # When we don't, we first average y = torch.mean(y, dim = 1) # B x F x N # And then we apply the nonlinearity y = self.nonlinearity(y) if Nin < self.N: y = torch.index_select(y, 2, torch.arange(Nin).to(y.device)) return y def extra_repr(self): reprString = "in_features=%d, " % self.G reprString += "out_features=%d, " % self.F reprString += "filter_taps=%d, " % self.K reprString += "attention_heads=%d, " % self.P reprString += "edge_features=%d, " % self.E reprString += "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored: number_nodes=%d" % (self.N) else: reprString += "no GSO stored" return reprString ############################################################################# # # # LAYERS (Filtering batch-dependent time-varying) # # # ############################################################################# class GraphFilter_DB(nn.Module): """ GraphFilter_DB Creates a (linear) layer that applies a graph filter (i.e. a graph convolution) considering batches of GSO and the corresponding time delays Initialization: GraphFilter_DB(in_features, out_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter_DB.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: batch_size x time_samples x edge_features x number_nodes x number_nodes Forward call: y = GraphFilter_DB(x) Inputs: x (torch.tensor): input data; shape: batch_size x time_samples x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x time_samples x out_features x number_nodes """ def __init__(self, G, F, K, E = 1, bias = True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.K = K self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.K) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 5 dimensions. assert len(S.shape) == 5 # S is of shape B x T x E x N x N assert S.shape[2] == self.E self.N = S.shape[3] assert S.shape[4] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x time x dimInFeatures x numberNodesIn assert len(x.shape) == 4 B = x.shape[0] assert self.S.shape[0] == B T = x.shape[1] assert self.S.shape[1] == T #F = x.shape[2] assert x.shape[3] == self.N # Compute the filter output u = LSIGF_DB(self.weight, self.S, x, self.bias) # u is of shape batchSize x time x dimOutFeatures x numberNodes return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.G, self.F) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class HiddenState_DB(nn.Module): """ HiddenState_DB Creates the layer for computing the hidden state of a GRNN Initialization: HiddenState_DB(signal_features, hidden_features, filter_taps, nonlinearity=torch.tanh, edge_features=1, bias=True) Inputs: signal_features (int): number of signal features hidden_features (int): number of hidden state features filter_taps (int): number of filter taps (both filters have the same number of filter taps) nonlinearity (torch function): nonlinearity applied when computing the hidden state edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after each filter Output: torch.nn.Module for a hidden state layer Observation: Input-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x signal_features Hidden-to-Hidden FIlter taps have shape hidden_features x edge_features x filter_taps x hidden_features Add graph shift operator: HiddenState_DB.addGSO(GSO) Before applying the layer, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: batch_size x time_samples x edge_features x number_nodes x number_nodes Forward call: y = HiddenState_DB(x, z0) Inputs: x (torch.tensor): input data; shape: batch_size x time_samples x signal_features x number_nodes z0 (torch.tensor): initial hidden state; shape: batch_size x hidden_features x number_nodes Outputs: y (torch.tensor): hidden state; shape: batch_size x time_samples x hidden_features x number_nodes """ def __init__(self, F, H, K, nonlinearity = torch.tanh, E = 1, bias = True): # Initialize parent: super().__init__() # Store the values (using the notation in the paper): self.F = F # Input Features self.H = H # Hidden Features self.K = K # Filter taps self.E = E # Number of edge features self.S = None self.bias = bias # Boolean self.sigma = nonlinearity # torch.nn.functional # Create parameters: self.aWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, F)) self.bWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) if self.bias: self.xBias = nn.parameter.Parameter(torch.Tensor(H, 1)) self.zBias = nn.parameter.Parameter(torch.Tensor(H, 1)) else: self.register_parameter('xBias', None) self.register_parameter('zBias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.F * self.K) self.aWeights.data.uniform_(-stdv, stdv) self.bWeights.data.uniform_(-stdv, stdv) if self.bias: self.xBias.data.uniform_(-stdv, stdv) self.zBias.data.uniform_(-stdv, stdv) def forward(self, x, z0): assert self.S is not None # Input # S: B x T (x E) x N x N # x: B x T x F x N # z0: B x H x N # Output # z: B x T x H x N # Check dimensions assert len(x.shape) == 4 B = x.shape[0] assert self.S.shape[0] == B T = x.shape[1] assert self.S.shape[1] == T assert x.shape[2] == self.F N = x.shape[3] assert len(z0.shape) == 3 assert z0.shape[0] == B assert z0.shape[1] == self.H assert z0.shape[2] == N z = GRNN_DB(self.aWeights, self.bWeights, self.S, x, z0, self.sigma, xBias = self.xBias, zBias = self.zBias) zT = torch.index_select(z, 1, torch.tensor(T-1, device = z.device)) # Give out the last one, to be used as starting point if used in # succession return z, zT.unsqueeze(1) def addGSO(self, S): # Every S has 5 dimensions. assert len(S.shape) == 5 # S is of shape B x T x E x N x N assert S.shape[2] == self.E self.N = S.shape[3] assert S.shape[4] == self.N self.S = S def extra_repr(self): reprString = "in_features=%d, hidden_features=%d, " % ( self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias) +\ "nonlinearity=%s" % (self.sigma) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class HiddenState(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/01/28 """ HiddenState Creates the layer for computing the hidden state of a GRNN Initialization: HiddenState(signal_features, hidden_features, filter_taps, nonlinearity=torch.tanh, edge_features=1, bias=True) Inputs: signal_features (int): number of signal features hidden_features (int): number of hidden state features filter_taps (int): number of filter taps (both filters have the same number of filter taps) nonlinearity (torch function): nonlinearity applied when computing the hidden state edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after each filter Output: torch.nn.Module for a hidden state layer Observation: Input-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x signal_features Hidden-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x hidden_features Add graph shift operator: HiddenState.addGSO(GSO) Before applying the layer, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = HiddenState(x, z0) Inputs: x (torch.tensor): input data; shape: batch_size x time_samples x signal_features x number_nodes z0 (torch.tensor): initial hidden state; shape: batch_size x hidden_features x number_nodes Outputs: y (torch.tensor): hidden state; shape: batch_size x time_samples x hidden_features x number_nodes """ def __init__(self, F, H, K, nonlinearity = torch.tanh, E = 1, bias = True): # Initialize parent: super().__init__() # Store the values (using the notation in the paper): self.F = F # Input Features self.H = H # Hidden Features self.K = K # Filter taps self.E = E # Number of edge features self.S = None self.bias = bias # Boolean self.sigma = nonlinearity # torch.nn.functional # Create parameters: self.aWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, F)) self.bWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) if self.bias: self.xBias = nn.parameter.Parameter(torch.Tensor(H, 1)) self.zBias = nn.parameter.Parameter(torch.Tensor(H, 1)) else: self.register_parameter('xBias', None) self.register_parameter('zBias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.F * self.K) self.aWeights.data.uniform_(-stdv, stdv) self.bWeights.data.uniform_(-stdv, stdv) if self.bias: self.xBias.data.uniform_(-stdv, stdv) self.zBias.data.uniform_(-stdv, stdv) def forward(self, x, z0): assert self.S is not None # Input # S: (E) x N x N # x: B x T x F x N # z0: B x H x N # Output # z: B x T x H x N # Check dimensions assert len(x.shape) == 4 B = x.shape[0] T = x.shape[1] assert x.shape[2] == self.F N = x.shape[3] assert len(z0.shape) == 3 assert z0.shape[0] == B assert z0.shape[1] == self.H assert z0.shape[2] == N z = GatedGRNN(self.aWeights, self.bWeights, self.S, x, z0, self.sigma, xBias = self.xBias, zBias = self.zBias) zT = torch.index_select(z, 1, torch.tensor(T-1, device = z.device)) # Give out the last one, to be used as starting point if used in # succession return z, zT.unsqueeze(1) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def extra_repr(self): reprString = "in_features=%d, hidden_features=%d, " % ( self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias) +\ "nonlinearity=%s" % (self.sigma) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class TimeGatedHiddenState(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/01/28 """ TimeGatedHiddenState Creates the layer for computing the time-gated hidden state of a GRNN Initialization: TimeGatedHiddenState(signal_features, hidden_features, filter_taps, nonlinearity=torch.tanh, edge_features=1, bias=True) Inputs: signal_features (int): number of signal features hidden_features (int): number of hidden state features filter_taps (int): number of filter taps (both filters have the same number of filter taps) nonlinearity (torch function): nonlinearity applied when computing the hidden state edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after each filter Output: torch.nn.Module for a time-gated hidden state layer Observation: Input-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x signal_features Hidden-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x hidden_features Add graph shift operator: TimeGatedHiddenState.addGSO(GSO) Before applying the layer, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = TimeGatedHiddenState(x, z0) Inputs: x (torch.tensor): input data; shape: batch_size x time_samples x signal_features x number_nodes z0 (torch.tensor): initial hidden state; shape: batch_size x hidden_features x number_nodes Outputs: y (torch.tensor): hidden state; shape: batch_size x time_samples x hidden_features x number_nodes """ def __init__(self, F, H, K, nonlinearity = torch.tanh, E = 1, bias = True): # Initialize parent: super().__init__() # Store the values (using the notation in the paper): self.F = F # Input Features self.H = H # Hidden Features self.K = K # Filter taps self.E = E # Number of edge features self.S = None self.bias = bias # Boolean self.sigma = nonlinearity # torch.nn.functional # Create parameters of the main GRNN self.aWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, F)) self.bWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) # Create input gate GRNN self.inputGateGRNN = HiddenState(F, H, K, bias = bias) # Create forget gate GRNN self.forgetGateGRNN = HiddenState(F, H, K, bias = bias) if self.bias: self.xBias = nn.parameter.Parameter(torch.Tensor(H, 1)) self.zBias = nn.parameter.Parameter(torch.Tensor(H, 1)) else: self.register_parameter('xBias', None) self.register_parameter('zBias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.F * self.K) self.aWeights.data.uniform_(-stdv, stdv) self.bWeights.data.uniform_(-stdv, stdv) if self.bias: self.xBias.data.uniform_(-stdv, stdv) self.zBias.data.uniform_(-stdv, stdv) def forward(self, x, z0): assert self.S is not None # Input # S: (E) x N x N # x: B x T x F x N # z0: B x H x N # Output # z: B x T x H x N # Check dimensions assert len(x.shape) == 4 B = x.shape[0] T = x.shape[1] assert x.shape[2] == self.F N = x.shape[3] assert len(z0.shape) == 3 assert z0.shape[0] == B assert z0.shape[1] == self.H assert z0.shape[2] == N # Calculating input gate zHat,_ = self.inputGateGRNN(x, z0) zHat = zHat.reshape((B,T,self.H*N)) qHat = torch.sigmoid(self.inputGateFC(zHat)) qHat = qHat.unsqueeze(2) # Calculating forget gate zCheck,_ = self.forgetGateGRNN(x, z0) zCheck = zCheck.reshape((B,T,self.H*N)) qCheck = torch.sigmoid(self.forgetGateFC(zCheck)) qCheck = qCheck.unsqueeze(2) z = GatedGRNN(self.aWeights, self.bWeights, self.S, x, z0, self.sigma, qHat, qCheck, xBias = self.xBias, zBias = self.zBias) zT = torch.index_select(z, 1, torch.tensor(T-1, device = z.device)) # Give out the last one, to be used as starting point if used in # succession return z, zT.unsqueeze(1) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S # Create fully connected layers mapping hidden states to gates self.inputGateFC = nn.Linear(self.H*self.N, 1, self.bias) self.forgetGateFC = nn.Linear(self.H*self.N, 1, self.bias) # Add GSO to gate GRNNs self.inputGateGRNN.addGSO(S) self.forgetGateGRNN.addGSO(S) def extra_repr(self): reprString = "in_features=%d, hidden_features=%d, " % ( self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias) +\ "nonlinearity=%s" % (self.sigma) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class NodeGatedHiddenState(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/01/28 """ NodeGatedHiddenState Creates the layer for computing the node-gated hidden state of a GRNN Initialization: NodeGatedHiddenState(signal_features, hidden_features, filter_taps, nonlinearity=torch.tanh, edge_features=1, bias=True) Inputs: signal_features (int): number of signal features hidden_features (int): number of hidden state features filter_taps (int): number of filter taps (both filters have the same number of filter taps) nonlinearity (torch function): nonlinearity applied when computing the hidden state edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after each filter Output: torch.nn.Module for a node-gated hidden state layer Observation: Input-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x signal_features Hidden-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x hidden_features Add graph shift operator: NodeGatedHiddenState.addGSO(GSO) Before applying the layer, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = NodeGatedHiddenState(x, z0) Inputs: x (torch.tensor): input data; shape: batch_size x time_samples x signal_features x number_nodes z0 (torch.tensor): initial hidden state; shape: batch_size x hidden_features x number_nodes Outputs: y (torch.tensor): hidden state; shape: batch_size x time_samples x hidden_features x number_nodes """ def __init__(self, F, H, K, nonlinearity = torch.tanh, E = 1, bias = True): # Initialize parent: super().__init__() # Store the values (using the notation in the paper): self.F = F # Input Features self.H = H # Hidden Features self.K = K # Filter taps self.E = E # Number of edge features self.S = None self.bias = bias # Boolean self.sigma = nonlinearity # torch.nn.functional # Create parameters of the main GRNN self.aWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, F)) self.bWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) # Create input gate GRNN self.inputGateGRNN = HiddenState(F, H, K, bias = bias) # Create forget gate GRNN self.forgetGateGRNN = HiddenState(F, H, K, bias = bias) if self.bias: self.xBias = nn.parameter.Parameter(torch.Tensor(H, 1)) self.zBias = nn.parameter.Parameter(torch.Tensor(H, 1)) else: self.register_parameter('xBias', None) self.register_parameter('zBias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.F * self.K) self.aWeights.data.uniform_(-stdv, stdv) self.bWeights.data.uniform_(-stdv, stdv) if self.bias: self.xBias.data.uniform_(-stdv, stdv) self.zBias.data.uniform_(-stdv, stdv) def forward(self, x, z0): assert self.S is not None # Input # S: (E) x N x N # x: B x T x F x N # z0: B x H x N # Output # z: B x T x H x N # Check dimensions assert len(x.shape) == 4 B = x.shape[0] T = x.shape[1] assert x.shape[2] == self.F N = x.shape[3] assert len(z0.shape) == 3 assert z0.shape[0] == B assert z0.shape[1] == self.H assert z0.shape[2] == N # Calculating input gate zHat,_ = self.inputGateGRNN(x, z0) zHat = zHat.reshape((B*T,self.H,N)) qHat = torch.sigmoid(self.inputGateGraphFilter(zHat)) qHat = qHat.reshape((B,T,1,N)) # Calculating forget gate zCheck,_ = self.forgetGateGRNN(x, z0) zCheck = zCheck.reshape((B*T,self.H,N)) qCheck = torch.sigmoid(self.forgetGateGraphFilter(zCheck)) qCheck = qCheck.reshape((B,T,1,N)) z = GatedGRNN(self.aWeights, self.bWeights, self.S, x, z0, self.sigma, qHat, qCheck, xBias = self.xBias, zBias = self.zBias) zT = torch.index_select(z, 1, torch.tensor(T-1, device = z.device)) # Give out the last one, to be used as starting point if used in # succession return z, zT.unsqueeze(1) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S # Create linear filters mapping hidden states to gates self.inputGateGraphFilter = GraphFilter(self.H, 1, self.K, bias=self.bias) self.forgetGateGraphFilter = GraphFilter(self.H, 1, self.K, bias=self.bias) # Add GSO to gate GRNNs self.inputGateGRNN.addGSO(S) self.forgetGateGRNN.addGSO(S) self.inputGateGraphFilter.addGSO(S) self.forgetGateGraphFilter.addGSO(S) def extra_repr(self): reprString = "in_features=%d, hidden_features=%d, " % ( self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias) +\ "nonlinearity=%s" % (self.sigma) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class EdgeGatedHiddenState(nn.Module): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/01/28 """ EdgeGatedHiddenState Creates the layer for computing the edge-gated hidden state of a GRNN Initialization: EdgeGatedHiddenState(signal_features, hidden_features, filter_taps, nonlinearity=torch.tanh, edge_features=1, bias=True) Inputs: signal_features (int): number of signal features hidden_features (int): number of hidden state features filter_taps (int): number of filter taps (both filters have the same number of filter taps) nonlinearity (torch function): nonlinearity applied when computing the hidden state edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after each filter Output: torch.nn.Module for a edge-gated hidden state layer Observation: Input-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x signal_features Hidden-to-Hidden Filter taps have shape hidden_features x edge_features x filter_taps x hidden_features Add graph shift operator: EdgeGatedHiddenState.addGSO(GSO) Before applying the layer, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = EdgeGatedHiddenState(x, z0) Inputs: x (torch.tensor): input data; shape: batch_size x time_samples x signal_features x number_nodes z0 (torch.tensor): initial hidden state; shape: batch_size x hidden_features x number_nodes Outputs: y (torch.tensor): hidden state; shape: batch_size x time_samples x hidden_features x number_nodes """ def __init__(self, F, H, K, nonlinearity = torch.tanh, E = 1, bias = True): # Initialize parent: super().__init__() # Store the values (using the notation in the paper): self.F = F # Input Features self.H = H # Hidden Features self.K = K # Filter taps self.E = E # Number of edge features self.S = None self.bias = bias # Boolean self.sigma = nonlinearity # torch.nn.functional # Create parameters of the main GRNN self.aWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, F)) self.bWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) # Create input gate GRNN self.inputGateGRNN = HiddenState(F, H, K, bias = bias) # Create forget gate GRNN self.forgetGateGRNN = HiddenState(F, H, K, bias = bias) if self.bias: self.xBias = nn.parameter.Parameter(torch.Tensor(H, 1)) self.zBias = nn.parameter.Parameter(torch.Tensor(H, 1)) else: self.register_parameter('xBias', None) self.register_parameter('zBias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.F * self.K) self.aWeights.data.uniform_(-stdv, stdv) self.bWeights.data.uniform_(-stdv, stdv) if self.bias: self.xBias.data.uniform_(-stdv, stdv) self.zBias.data.uniform_(-stdv, stdv) def forward(self, x, z0): assert self.S is not None # Input # S: (E) x N x N # x: B x T x F x N # z0: B x H x N # Output # z: B x T x H x N # Check dimensions assert len(x.shape) == 4 B = x.shape[0] T = x.shape[1] assert x.shape[2] == self.F N = x.shape[3] assert len(z0.shape) == 3 assert z0.shape[0] == B assert z0.shape[1] == self.H assert z0.shape[2] == N # Calculating input gate zHat,_ = self.inputGateGRNN(x, z0) zHat = zHat.reshape((B*T,self.H,N)) qHat = learnAttentionGSO(zHat, self.inputGateGAT.mixer, self.inputGateGAT.weight, self.inputGateGAT.S) qHat = qHat.reshape((B,T,1,N,N)) # Calculating forget gate zCheck,_ = self.forgetGateGRNN(x, z0) zCheck = zCheck.reshape((B*T,self.H,N)) qCheck = learnAttentionGSO(zCheck, self.forgetGateGAT.mixer, self.forgetGateGAT.weight, self.forgetGateGAT.S) qCheck = qCheck.reshape((B,T,1,N,N)) z = GatedGRNN(self.aWeights, self.bWeights, self.S, x, z0, self.sigma, qHat, qCheck, xBias = self.xBias, zBias = self.zBias) zT = torch.index_select(z, 1, torch.tensor(T-1, device = z.device)) # Give out the last one, to be used as starting point if used in # succession return z, zT.unsqueeze(1) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S # Create GATs self.inputGateGAT = GraphAttentional(self.H, 1, 1) self.forgetGateGAT = GraphAttentional(self.H, 1, 1) # Add GSO to gate GRNNs self.inputGateGRNN.addGSO(S) self.forgetGateGRNN.addGSO(S) self.inputGateGAT.addGSO(S) self.forgetGateGAT.addGSO(S) def extra_repr(self): reprString = "in_features=%d, hidden_features=%d, " % ( self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) +\ "bias=%s, " % (self.bias) +\ "nonlinearity=%s" % (self.sigma) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString
176,929
40.679623
98
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/utils/graphTools.py
# 2018/12/03~ # Fernando Gama, fgama@seas.upenn.edu. # Luana Ruiz, rubruiz@seas.upenn.edu. """ graphTools.py Tools for handling graphs Functions: plotGraph: plots a graph from an adjacency matrix printGraph: prints (saves) a graph from an adjacency matrix adjacencyToLaplacian: transform an adjacency matrix into a Laplacian matrix normalizeAdjacency: compute the normalized adjacency normalizeLaplacian: compute the normalized Laplacian computeGFT: Computes the eigenbasis of a GSO matrixPowers: computes the matrix powers computeNonzeroRows: compute nonzero elements across rows computeNeighborhood: compute the neighborhood of a graph computeSourceNodes: compute source nodes for the source localization problem isConnected: determines if a graph is connected sparsifyGraph: sparsifies a given graph matrix createGraph: creates an adjacency marix permIdentity: identity permutation permDegree: order nodes by degree permSpectralProxies: order nodes by spectral proxies score permEDS: order nodes by EDS score edgeFailSampling: samples the edges of a given graph splineBasis: Returns the B-spline basis (taken from github.com/mdeff) Classes: Graph: class containing a graph """ import numpy as np import scipy.sparse import scipy.spatial as sp from sklearn.cluster import SpectralClustering import os import matplotlib matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.family'] = 'serif' import matplotlib.pyplot as plt zeroTolerance = 1e-9 # Values below this number are considered zero. # If adjacency matrices are not symmetric these functions might not work as # desired: the degree will be the in-degree to each node, and the Laplacian # is not defined for directed graphs. Same caution is advised when using # graphs with self-loops. def plotGraph(adjacencyMatrix, **kwargs): """ plotGraph(A): plots a graph from adjacency matrix A of size N x N Optional keyword arguments: 'positions' (np.array, default: points in a circle of radius 1): size N x 2 of positions for each node 'figSize' (int, default: 5): size of the figure 'linewidth' (int, default: 1): edge width 'markerSize' (int, default: 15): node size 'markerShape' (string, default: 'o'): node shape 'color' (hex code string, default: '#01256E'): color of the nodes 'nodeLabel' (list, default: None): list of length N where each element corresponds to the label of each node """ # Data # Adjacency matrix W = adjacencyMatrix assert W.shape[0] == W.shape[1] N = W.shape[0] # Positions (optional) if 'positions' in kwargs.keys(): pos = kwargs['positions'] else: angle = np.linspace(0, 2*np.pi*(1-1/N), num = N) radius = 1 pos = np.array([ radius * np.sin(angle), radius * np.cos(angle) ]) # Create figure # Figure size if 'figSize' in kwargs.keys(): figSize = kwargs['figSize'] else: figSize = 5 # Line width if 'lineWidth' in kwargs.keys(): lineWidth = kwargs['lineWidth'] else: lineWidth = 1 # Marker Size if 'markerSize' in kwargs.keys(): markerSize = kwargs['markerSize'] else: markerSize = 15 # Marker shape if 'markerShape' in kwargs.keys(): markerShape = kwargs['markerShape'] else: markerShape = 'o' # Marker color if 'color' in kwargs.keys(): markerColor = kwargs['color'] else: markerColor = '#01256E' # Node labeling if 'nodeLabel' in kwargs.keys(): doText = True nodeLabel = kwargs['nodeLabel'] assert len(nodeLabel) == N else: doText = False # Plot figGraph = plt.figure(figsize = (1*figSize, 1*figSize)) for i in range(N): for j in range(N): if W[i,j] > 0: plt.plot([pos[0,i], pos[0,j]], [pos[1,i], pos[1,j]], linewidth = W[i,j] * lineWidth, color = '#A8AAAF') for i in range(N): plt.plot(pos[0,i], pos[1,i], color = markerColor, marker = markerShape, markerSize = markerSize) if doText: plt.text(pos[0,i], pos[1,i], nodeLabel[i], verticalalignment = 'center', horizontalalignment = 'center', color = '#F2F2F3') return figGraph def printGraph(adjacencyMatrix, **kwargs): """ printGraph(A): Wrapper for plot graph to directly save it as a graph (with no axis, nor anything else like that, more aesthetic, less changes) Optional keyword arguments: 'saveDir' (os.path, default: '.'): directory where to save the graph 'legend' (default: None): Text for a legend 'xLabel' (str, default: None): Text for the x axis 'yLabel' (str, default: None): Text for the y axis 'graphName' (str, default: 'graph'): name to save the file 'positions' (np.array, default: points in a circle of radius 1): size N x 2 of positions for each node 'figSize' (int, default: 5): size of the figure 'linewidth' (int, default: 1): edge width 'markerSize' (int, default: 15): node size 'markerShape' (string, default: 'o'): node shape 'color' (hex code string, default: '#01256E'): color of the nodes 'nodeLabel' (list, default: None): list of length N where each element corresponds to the label of each node """ # Wrapper for plot graph to directly save it as a graph (with no axis, # nor anything else like that, more aesthetic, less changes) W = adjacencyMatrix assert W.shape[0] == W.shape[1] # Printing options if 'saveDir' in kwargs.keys(): saveDir = kwargs['saveDir'] else: saveDir = '.' if 'legend' in kwargs.keys(): doLegend = True legendText = kwargs['legend'] else: doLegend = False if 'xLabel' in kwargs.keys(): doXlabel = True xLabelText = kwargs['xLabel'] else: doXlabel = False if 'yLabel' in kwargs.keys(): doYlabel = True yLabelText = kwargs['yLabel'] else: doYlabel = False if 'graphName' in kwargs.keys(): graphName = kwargs['graphName'] else: graphName = 'graph' figGraph = plotGraph(adjacencyMatrix, **kwargs) plt.axis('off') if doXlabel: plt.xlabel(xLabelText) if doYlabel: plt.yLabel(yLabelText) if doLegend: plt.legend(legendText) figGraph.savefig(os.path.join(saveDir, '%s.pdf' % graphName), bbox_inches = 'tight', transparent = True) def adjacencyToLaplacian(W): """ adjacencyToLaplacian: Computes the Laplacian from an Adjacency matrix Input: W (np.array): adjacency matrix Output: L (np.array): Laplacian matrix """ # Check that the matrix is square assert W.shape[0] == W.shape[1] # Compute the degree vector d = np.sum(W, axis = 1) # And build the degree matrix D = np.diag(d) # Return the Laplacian return D - W def normalizeAdjacency(W): """ NormalizeAdjacency: Computes the degree-normalized adjacency matrix Input: W (np.array): adjacency matrix Output: A (np.array): degree-normalized adjacency matrix """ # Check that the matrix is square assert W.shape[0] == W.shape[1] # Compute the degree vector d = np.sum(W, axis = 1) # Invert the square root of the degree d = 1/np.sqrt(d) # And build the square root inverse degree matrix D = np.diag(d) # Return the Normalized Adjacency return D @ W @ D def normalizeLaplacian(L): """ NormalizeLaplacian: Computes the degree-normalized Laplacian matrix Input: L (np.array): Laplacian matrix Output: normL (np.array): degree-normalized Laplacian matrix """ # Check that the matrix is square assert L.shape[0] == L.shape[1] # Compute the degree vector (diagonal elements of L) d = np.diag(L) # Invert the square root of the degree d = 1/np.sqrt(d) # And build the square root inverse degree matrix D = np.diag(d) # Return the Normalized Laplacian return D @ L @ D def computeGFT(S, order = 'no'): """ computeGFT: Computes the frequency basis (eigenvectors) and frequency coefficients (eigenvalues) of a given GSO Input: S (np.array): graph shift operator matrix order (string): 'no', 'increasing', 'totalVariation' chosen order of frequency coefficients (default: 'no') Output: E (np.array): diagonal matrix with the frequency coefficients (eigenvalues) in the diagonal V (np.array): matrix with frequency basis (eigenvectors) """ # Check the correct order input assert order == 'totalVariation' or order == 'no' or order == 'increasing' # Check the matrix is square assert S.shape[0] == S.shape[1] # Check if it is symmetric symmetric = np.allclose(S, S.T, atol = zeroTolerance) # Then, compute eigenvalues and eigenvectors if symmetric: e, V = np.linalg.eigh(S) else: e, V = np.linalg.eig(S) # Sort the eigenvalues by the desired error: if order == 'totalVariation': eMax = np.max(e) sortIndex = np.argsort(np.abs(e - eMax)) elif order == 'increasing': sortIndex = np.argsort(np.abs(e)) else: sortIndex = np.arange(0, S.shape[0]) e = e[sortIndex] V = V[:, sortIndex] E = np.diag(e) return E, V def matrixPowers(S,K): """ matrixPowers(A, K) Computes the matrix powers A^k for k = 0, ..., K-1 Inputs: A: either a single N x N matrix or a collection E x N x N of E matrices. K: integer, maximum power to be computed (up to K-1) Outputs: AK: either a collection of K matrices K x N x N (if the input was a single matrix) or a collection E x K x N x N (if the input was a collection of E matrices). """ # S can be either a single GSO (N x N) or a collection of GSOs (E x N x N) if len(S.shape) == 2: N = S.shape[0] assert S.shape[1] == N E = 1 S = S.reshape(1, N, N) scalarWeights = True elif len(S.shape) == 3: E = S.shape[0] N = S.shape[1] assert S.shape[2] == N scalarWeights = False # Now, let's build the powers of S: thisSK = np.tile(np.eye(N, N).reshape(1,N,N), [E, 1, 1]) SK = thisSK.reshape(E, 1, N, N) for k in range(1,K): thisSK = thisSK @ S SK = np.concatenate((SK, thisSK.reshape(E, 1, N, N)), axis = 1) # Take out the first dimension if it was a single GSO if scalarWeights: SK = SK.reshape(K, N, N) return SK def computeNonzeroRows(S, Nl = 'all'): """ computeNonzeroRows: Find the position of the nonzero elements of each row of a matrix Input: S (np.array): matrix Nl (int or 'all'): number of rows to compute the nonzero elements; if 'all', then Nl = S.shape[0]. Rows are counted from the top. Output: nonzeroElements (list): list of size Nl where each element is an array of the indices of the nonzero elements of the corresponding row. """ # Find the position of the nonzero elements of each row of the matrix S. # Nl = 'all' means for all rows, otherwise, it will be an int. if Nl == 'all': Nl = S.shape[0] assert Nl <= S.shape[0] # Save neighborhood variable neighborhood = [] # For each of the selected nodes for n in range(Nl): neighborhood += [np.flatnonzero(S[n,:])] return neighborhood def computeNeighborhood(S, K, N = 'all', nb = 'all', outputType = 'list'): """ computeNeighborhood: compute the set of nodes within the K-hop neighborhood of a graph (i.e. all nodes that can be reached within K-hops of each node) computeNeighborhood(W, K, N = 'all', nb = 'all', outputType = 'list') Input: W (np.array): adjacency matrix K (int): K-hop neighborhood to compute the neighbors N (int or 'all'): how many nodes (from top) to compute the neighbors from (default: 'all'). nb (int or 'all'): how many nodes to consider valid when computing the neighborhood (i.e. nodes beyond nb are not trimmed out of the neighborhood; note that nodes smaller than nb that can be reached by nodes greater than nb, are included. default: 'all') outputType ('list' or 'matrix'): choose if the output is given in the form of a list of arrays, or a matrix with zero-padding of neighbors with neighborhoods smaller than the maximum neighborhood (default: 'list') Output: neighborhood (np.array or list): contains the indices of the neighboring nodes following the order established by the adjacency matrix. """ # outputType is either a list (a list of np.arrays) or a matrix. assert outputType == 'list' or outputType == 'matrix' # Here, we can assume S is already sparse, in which case is a list of # sparse matrices, or that S is full, in which case it is a 3-D array. if isinstance(S, list): # If it is a list, it has to be a list of matrices, where the length # of the list has to be the number of edge weights. But we actually need # to sum over all edges to be sure we consider all reachable nodes on # at least one of the edge dimensions newS = 0. for e in len(S): # First check it's a matrix, and a square one assert len(S[e]) == 2 assert S[e].shape[0] == S[e].shape[1] # For each edge, convert to sparse (in COO because we care about # coordinates to find the neighborhoods) newS += scipy.sparse.coo_matrix( (np.abs(S[e]) > zeroTolerance).astype(S[e].dtype)) S = (newS > zeroTolerance).astype(newS.dtype) else: # if S is not a list, check that it is either a E x N x N or a N x N # array. assert len(S.shape) == 2 or len(S.shape) == 3 if len(S.shape) == 3: assert S.shape[1] == S.shape[2] # If it has an edge feature dimension, just add over that dimension. # We only need one non-zero value along the vector to have an edge # there. (Obs.: While normally assume that all weights are positive, # let's just add on abs() value to avoid any cancellations). S = np.sum(np.abs(S), axis = 0) S = scipy.sparse.coo_matrix((S > zeroTolerance).astype(S.dtype)) else: # In this case, if it is a 2-D array, we do not need to add over the # edge dimension, so we just sparsify it assert S.shape[0] == S.shape[1] S = scipy.sparse.coo_matrix((S > zeroTolerance).astype(S.dtype)) # Now, we finally have a sparse, binary matrix, with the connections. # Now check that K and N are correct inputs. # K is an int (target K-hop neighborhood) # N is either 'all' or an int determining how many rows assert K >= 0 # K = 0 is just the identity # Check how many nodes we want to obtain if N == 'all': N = S.shape[0] if nb == 'all': nb = S.shape[0] assert N >= 0 and N <= S.shape[0] # Cannot return more nodes than there are assert nb >= 0 and nb <= S.shape[0] # All nodes are in their own neighborhood, so allNeighbors = [ [n] for n in range(S.shape[0])] # Now, if K = 0, then these are all the neighborhoods we need. # And also keep track only about the nodes we care about neighbors = [ [n] for n in range(N)] # But if K > 0 if K > 0: # Let's start with the one-hop neighborhood of all nodes (we need this) nonzeroS = list(S.nonzero()) # This is a tuple with two arrays, the first one containing the row # index of the nonzero elements, and the second one containing the # column index of the nonzero elements. # Now, we want the one-hop neighborhood of all nodes (and all nodes have # a one-hop neighborhood, since the graphs are connected) for n in range(len(nonzeroS[0])): # The list in index 0 is the nodes, the list in index 1 is the # corresponding neighbor allNeighbors[nonzeroS[0][n]].append(nonzeroS[1][n]) # Now that we have the one-hop neighbors, we just need to do a depth # first search looking for the one-hop neighborhood of each neighbor # and so on. oneHopNeighbors = allNeighbors.copy() # We have already visited the nodes themselves, since we already # gathered the one-hop neighbors. visitedNodes = [ [n] for n in range(N)] # Keep only the one-hop neighborhood of the ones we're interested in neighbors = [list(set(allNeighbors[n])) for n in range(N)] # For each hop for k in range(1,K): # For each of the nodes we care about for i in range(N): # Store the new neighbors to be included for node i newNeighbors = [] # Take each of the neighbors we already have for j in neighbors[i]: # and if we haven't visited those neighbors yet if j not in visitedNodes[i]: # Just look for our neighbor's one-hop neighbors and # add them to the neighborhood list newNeighbors.extend(oneHopNeighbors[j]) # And don't forget to add the node to the visited ones # (we already have its one-hope neighborhood) visitedNodes[i].append(j) # And now that we have added all the new neighbors, we add them # to the old neighbors neighbors[i].extend(newNeighbors) # And get rid of those that appear more than once neighbors[i] = list(set(neighbors[i])) # Now that all nodes have been collected, get rid of those beyond nb for i in range(N): # Get the neighborhood thisNeighborhood = neighbors[i].copy() # And get rid of the excess nodes neighbors[i] = [j for j in thisNeighborhood if j < nb] if outputType == 'matrix': # List containing all the neighborhood sizes neighborhoodSizes = [len(x) for x in neighbors] # Obtain max number of neighbors maxNeighborhoodSize = max(neighborhoodSizes) # then we have to check each neighborhood and find if we need to add # more nodes (itself) to pad it so we can build a matrix paddedNeighbors = [] for n in range(N): paddedNeighbors += [np.concatenate( (neighbors[n], n * np.ones(maxNeighborhoodSize - neighborhoodSizes[n])) )] # And now that every element in the list paddedNeighbors has the same # length, we can make it a matrix neighbors = np.array(paddedNeighbors, dtype = np.int) return neighbors def computeSourceNodes(A, C): """ computeSourceNodes: compute source nodes for the source localization problem Input: A (np.array): adjacency matrix of shape N x N C (int): number of classes Output: sourceNodes (list): contains the indices of the C source nodes Uses the adjacency matrix to compute C communities by means of spectral clustering, and then selects the node with largest degree within each community """ sourceNodes = [] degree = np.sum(A, axis = 0) # degree of each vector # Compute communities communityClusters = SpectralClustering(n_clusters = C, affinity = 'precomputed', assign_labels = 'discretize') communityClusters = communityClusters.fit(A) communityLabels = communityClusters.labels_ # For each community for c in range(C): communityNodes = np.nonzero(communityLabels == c)[0] degreeSorted = np.argsort(degree[communityNodes]) sourceNodes = sourceNodes + [communityNodes[degreeSorted[-1]]] return sourceNodes def isConnected(W): """ isConnected: determine if a graph is connected Input: W (np.array): adjacency matrix Output: connected (bool): True if the graph is connected, False otherwise Obs.: If the graph is directed, we consider it is connected when there is at least one edge that would make it connected (i.e. if we drop the direction of all edges, and just keep them as undirected, then the resulting graph would be connected). """ undirected = np.allclose(W, W.T, atol = zeroTolerance) if not undirected: W = 0.5 * (W + W.T) L = adjacencyToLaplacian(W) E, V = computeGFT(L) e = np.diag(E) # only eigenvavlues # Check how many values are greater than zero: nComponents = np.sum(e < zeroTolerance) # Number of connected components if nComponents == 1: connected = True else: connected = False return connected def sparsifyGraph(W, sparsificationType, p): """ sparsifyGraph: sparsifies a given graph matrix Input: W (np.array): adjacency matrix sparsificationType ('threshold' or 'NN'): threshold or nearest-neighbor sparsificationParameter (float): sparsification parameter (value of the threshold under which edges are deleted or the number of NN to keep) Output: W (np.array): adjacency matrix of sparsified graph Observation: - If it is an undirected graph, when computing the kNN edges, the resulting graph might be directed. Then, the graph is converted into an undirected one by taking the average of incoming and outgoing edges (this might result in a graph where some nodes have more than kNN neighbors). - If it is a directed graph, remember that element (i,j) of the adjacency matrix corresponds to edge (j,i). This means that each row of the matrix has nonzero elements on all the incoming edges. In the directed case, the number of nearest neighbors is with respect to the incoming edges (i.e. kNN incoming edges are kept). - If the original graph is connected, then thresholding might lead to a disconnected graph. If this is the case, the threshold will be increased in small increments until the resulting graph is connected. To recover the actual treshold used (higher than the one specified) do np.min(W[np.nonzero(W)]). In the case of kNN, if the resulting graph is disconnected, the parameter k is increased in 1 until the resultin graph is connected. """ # Check input arguments N = W.shape[0] assert W.shape[1] == N assert sparsificationType == 'threshold' or sparsificationType == 'NN' connected = isConnected(W) undirected = np.allclose(W, W.T, atol = zeroTolerance) # np.allclose() gives true if matrices W and W.T are the same up to # atol. # Start with thresholding if sparsificationType == 'threshold': Wnew = W.copy() Wnew[np.abs(Wnew) < p] = 0. # If the original graph was connected, we need to be sure this one is # connected as well if connected: # Check if the new graph is connected newGraphIsConnected = isConnected(Wnew) # While it is not connected while not newGraphIsConnected: # We need to reduce the size of p until we get it connected p = p/2. Wnew = W.copy() Wnew[np.abs(Wnew) < p] = 0. # Check if it is connected now newGraphIsConnected = isConnected(Wnew) # Now, let's move to k nearest neighbors elif sparsificationType == 'NN': # We sort the values of each row (in increasing order) Wsorted = np.sort(W, axis = 1) # Pick the kth largest kthLargest = Wsorted[:, -p] # array of size N # Find the elements that are greater or equal that these values maskOfEdgesToKeep = (W >= kthLargest.reshape([N,1])).astype(W.dtype) # And keep those edges Wnew = W * maskOfEdgesToKeep # If the original graph was connected if connected: # Check if the new graph is connected newGraphIsConnected = isConnected(Wnew) # While it is not connected while not newGraphIsConnected: # Increase the number of k-NN by 1 p = p + 1 # Compute the new kth Largest kthLargest = Wsorted[:, -p] # array of size N # Find the elements that are greater or equal that these values maskOfEdgesToKeep = (W >= kthLargest.reshape([N,1]))\ .astype(W.dtype) # And keep those edges Wnew = W * maskOfEdgesToKeep # Check if it is connected now newGraphIsConnected = isConnected(Wnew) # if it's undirected, this is the moment to reconvert it as undirected if undirected: Wnew = 0.5 * (Wnew + Wnew.T) return Wnew def createGraph(graphType, N, graphOptions): """ createGraph: creates a graph of a specified type Input: graphType (string): 'SBM', 'SmallWorld', 'fuseEdges', and 'adjacency' N (int): Number of nodes graphOptions (dict): Depends on the type selected. Obs.: More types to come. Output: W (np.array): adjacency matrix of shape N x N Optional inputs (by keyword): graphType: 'SBM' 'nCommunities': (int) number of communities 'probIntra': (float) probability of drawing an edge between nodes inside the same community 'probInter': (float) probability of drawing an edge between nodes of different communities Obs.: This always results in a connected graph. graphType: 'SmallWorld' 'probEdge': probability of drawing an edge between nodes 'probRewiring': probability of rewiring an edge Obs.: This always results in a connected graph. graphType: 'fuseEdges' (Given a collection of adjacency matrices of graphs with the same number of nodes, this graph type is a fusion of the edges of the collection of graphs, following different desirable properties) 'adjacencyMatrices' (np.array): collection of matrices in a tensor np.array of dimension nGraphs x N x N 'aggregationType' ('sum' or 'avg'): if 'sum', edges are summed across the collection of matrices, if 'avg' they are averaged 'normalizationType' ('rows', 'cols' or 'no'): if 'rows', the values of the rows (after aggregated) are normalized to sum to one, if 'cols', it is for the columns, if it is 'no' there is no normalization. 'isolatedNodes' (bool): if True, keep isolated nodes should there be any 'forceUndirected' (bool): if True, make the resulting graph undirected by replacing directed edges by the average of the outgoing and incoming edges between each pair of nodes 'forceConnected' (bool): if True, make the graph connected by taking the largest connected component 'nodeList' (list): this is an empty list that, after calling the function, will contain a list of the nodes that were kept when creating the adjacency matrix out of fusing the given ones with the desired options 'extraComponents' (list, optional): if the resulting fused adjacency matrix is not connected, and then forceConnected = True, then this list will contain two lists, the first one with the adjacency matrices of the smaller connected components, and the second one a corresponding list with the index of the nodes that were kept for each of the smaller connected components (Obs.: If a given single graph is required to be adapted with any of the options in this function, then it can just be expanded to have one dimension along axis = 0 and fed to this function to obtain the corresponding graph with the desired properties) graphType: 'adjacency' 'adjacencyMatrix' (np.array): just return the given adjacency matrix (after checking it has N nodes) """ # Check assert N >= 0 if graphType == 'SBM': assert(len(graphOptions.keys())) == 3 C = graphOptions['nCommunities'] # Number of communities assert int(C) == C # Check that the number of communities is an integer pii = graphOptions['probIntra'] # Intracommunity probability pij = graphOptions['probInter'] # Intercommunity probability assert 0 <= pii <= 1 # Check that they are valid probabilities assert 0 <= pij <= 1 # We create the SBM as follows: we generate random numbers between # 0 and 1 and then we compare them elementwise to a matrix of the # same size of pii and pij to set some of them to one and other to # zero. # Let's start by creating the matrix of pii and pij. # First, we need to know how many numbers on each community. nNodesC = [N//C] * C # Number of nodes per community: floor division c = 0 # counter for community while sum(nNodesC) < N: # If there are still nodes to put in communities # do it one for each (balanced communities) nNodesC[c] = nNodesC[c] + 1 c += 1 # So now, the list nNodesC has how many nodes are on each community. # We proceed to build the probability matrix. # We create a zero matrix probMatrix = np.zeros([N,N]) # And fill ones on the block diagonals following the number of nodes. # For this, we need the cumulative sum of the number of nodes nNodesCIndex = [0] + np.cumsum(nNodesC).tolist() # The zero is added because it is the first index for c in range(C): probMatrix[ nNodesCIndex[c] : nNodesCIndex[c+1] , \ nNodesCIndex[c] : nNodesCIndex[c+1] ] = \ np.ones([nNodesC[c], nNodesC[c]]) # The matrix probMatrix has one in the block diagonal, which should # have probabilities p_ii and 0 in the offdiagonal that should have # probabilities p_ij. So that probMatrix = pii * probMatrix + pij * (1 - probMatrix) # has pii in the intracommunity blocks and pij in the intercommunity # blocks. # Now we're finally ready to generate a connected graph connectedGraph = False while not connectedGraph: # Generate random matrix W = np.random.rand(N,N) W = (W < probMatrix).astype(np.float64) # This matrix will have a 1 if the element ij is less or equal than # p_ij, so that if p_ij = 0.8, then it will be 1 80% of the times # (on average). # We need to make it undirected and without self-loops, so keep the # upper triangular part after the main diagonal W = np.triu(W, 1) # And add it to the lower triangular part W = W + W.T # Now let's check that it is connected connectedGraph = isConnected(W) elif graphType == 'SmallWorld': # Function provided by Tuomo Mäki-Marttunen # Connectedness introduced by Dr. S. Segarra. # Adapted to numpy by Fernando Gama. p = graphOptions['probEdge'] # Edge probability q = graphOptions['probRewiring'] # Rewiring probability # Positions on a circle posX = np.cos(2*np.pi*np.arange(0,N)/N).reshape([N,1]) # x axis posY = np.sin(2*np.pi*np.arange(0,N)/N).reshape([N,1]) # y axis pos = np.concatenate((posX, posY), axis = 1) # N x 2 position matrix connectedGraph = False W = np.zeros([N,N], dtype = pos.dtype) # Empty adjacency matrix D = sp.distance.squareform(sp.distance.pdist(pos)) ** 2 # Squared # distance matrix while not connectedGraph: # 1. The generation of locally connected network with given # in-degree: for n in range(N): # Go through all nodes in order nn = np.random.binomial(N, p) # Possible inputs are all but the node itself: pind = np.concatenate((np.arange(0,n), np.arange(n+1, N))) sortedIndices = np.argsort(D[n,pind]) dists = D[n,pind[sortedIndices]] inds_equallyfar = np.nonzero(dists == dists[nn])[0] if len(inds_equallyfar) == 1: # if a unique farthest node to # be chosen as input W[pind[sortedIndices[0:nn]],n] = 1 # choose as inputs all # from closest to the farthest-to-be-chosen else: W[pind[sortedIndices[0:np.min(inds_equallyfar)]],n] = 1 # choose each nearer than farthest-to-be-chosen r=np.random.permutation(len(inds_equallyfar)).astype(np.int) # choose randomly between the ones that are as far as # be-chosen W[pind[sortedIndices[np.min(inds_equallyfar)\ +r[0:nn-np.min(inds_equallyfar)+1]]],n] = 1; # 2. Watts-Strogatz perturbation: for n in range(N): A = np.nonzero(W[:,n])[0] # find the in-neighbours of n for j in range(len(A)): if np.random.rand() < q: freeind = 1 - W[:,n] # possible new candidates are # all the ones not yet outputting to n # (excluding n itself) freeind[n] = 0 freeind[A[j]] = 1 B = np.nonzero(freeind)[0] r = np.floor(np.random.rand()*len(B)).astype(np.int) W[A[j],n] = 0 W[B[r],n] = 1; # symmetrize M W = np.triu(W) W = W + W.T # Check that graph is connected connectedGraph = isConnected(W) elif graphType == 'fuseEdges': # This alternative assumes that there are multiple graphs that have to # be fused into one. # This will be done in two ways: average or sum. # On top, options will include: to symmetrize it or not, to make it # connected or not. # The input data is a tensor E x N x N where E are the multiple edge # features that we want to fuse. # Argument N is ignored # Data assert 7 <= len(graphOptions.keys()) <= 8 W = graphOptions['adjacencyMatrices'] # Data in format E x N x N assert len(W.shape) == 3 N = W.shape[1] # Number of nodes assert W.shape[1] == W.shape[2] # Name the list with all nodes to keep nodeList = graphOptions['nodeList'] # This should be an empty list # If there is an 8th argument, this is where we are going to save the # extra components which are not the largest if len(graphOptions.keys()) == 8: logExtraComponents = True extraComponents = graphOptions['extraComponents'] # This will be a list with two elements, the first elements will be # the adjacency matrix of the other (smaller) components, whereas # the second elements will be a list of the same size, where each # elements is yet another list of nodes to keep from the original # graph to build such an adjacency matrix (akin to nodeList) else: logExtraComponents = False # Flag to know if we need to log the # extra components or not allNodes = np.arange(N) # What type of node aggregation aggregationType = graphOptions['aggregationType'] assert aggregationType == 'sum' or aggregationType == 'avg' if aggregationType == 'sum': W = np.sum(W, axis = 0) elif aggregationType == 'avg': W = np.mean(W, axis = 0) # Normalization (sum of rows or columns is equal to 1) normalizationType = graphOptions['normalizationType'] if normalizationType == 'rows': rowSum = np.sum(W, axis = 1).reshape([N, 1]) rowSum[np.abs(rowSum) < zeroTolerance] = 1. W = W/np.tile(rowSum, [1, N]) elif normalizationType == 'cols': colSum = np.sum(W, axis = 0).reshape([1, N]) colSum[np.abs(colSum) < zeroTolerance] = 1. W = W/np.tile(colSum, [N, 1]) # Discarding isolated nodes isolatedNodes = graphOptions['isolatedNodes'] # if True, isolated nodes # are allowed, if not, discard them if isolatedNodes == False: # A Node is isolated when it's degree is zero degVector = np.sum(np.abs(W), axis = 0) # Keep nodes whose degree is not zero keepNodes = np.nonzero(degVector > zeroTolerance) # Get the first element of the output tuple, for some reason if # we take keepNodes, _ as the output it says it cannot unpack it. keepNodes = keepNodes[0] if len(keepNodes) < N: W = W[keepNodes][:, keepNodes] # Update the nodes kept allNodes = allNodes[keepNodes] # Check if we need to make it undirected or not forceUndirected = graphOptions['forceUndirected'] # if True, make it # undirected by using the average between nodes (careful, some # edges might cancel) if forceUndirected == True: W = 0.5 * (W + W.T) # Finally, making it a connected graph forceConnected = graphOptions['forceConnected'] # if True, make the # graph connected if forceConnected == True: # Check if the given graph is already connected connectedFlag = isConnected(W) # If it is not connected if not connectedFlag: # Find all connected components nComponents, nodeLabels = \ scipy.sparse.csgraph.connected_components(W) # Now, we have to pick the connected component with the largest # number of nodes, because that's the one to output. # Momentarily store the rest. # Let's get the list of nodes we have so far partialNodes = np.arange(W.shape[0]) # Create the lists to store the adjacency matrices and # the official lists of nodes to keep eachAdjacency = [None] * nComponents eachNodeList = [None] * nComponents # And we want to keep the one with largest number of nodes, but # we will do only one for, so we need to be checking which one # is, so we will compare against the maximum number of nodes # registered so far nNodesMax = 0 # To start for l in range(nComponents): # Find the nodes belonging to the lth connected component thisNodesToKeep = partialNodes[nodeLabels == l] # This adjacency matrix eachAdjacency[l] = W[thisNodesToKeep][:, thisNodesToKeep] # The actual list eachNodeList[l] = allNodes[thisNodesToKeep] # Check the number of nodes thisNumberOfNodes = len(thisNodesToKeep) # And see if this is the largest if thisNumberOfNodes > nNodesMax: # Store the new number of maximum nodes nNodesMax = thisNumberOfNodes # Store the element of the list that satisfies it indexLargestComponent = l # Once we have been over all the connected components, just # output the one with largest number of nodes W = eachAdjacency.pop(indexLargestComponent) allNodes = eachNodeList.pop(indexLargestComponent) # Check that it is effectively connected assert isConnected(W) # And, if we have the extra argument, return all the other # connected components if logExtraComponents == True: extraComponents.append(eachAdjacency) extraComponents.append(eachNodeList) # To end, update the node list, so that it is returned through argument nodeList.extend(allNodes.tolist()) elif graphType == 'adjacency': assert 'adjacencyMatrix' in graphOptions.keys() W = graphOptions['adjacencyMatrix'] assert W.shape[0] == W.shape[1] == N return W # Permutation functions def permIdentity(S): """ permIdentity: determines the identity permnutation Input: S (np.array): matrix Output: permS (np.array): matrix permuted (since, there's no permutation, it's the same input matrix) order (list): list of indices to make S become permS. """ assert len(S.shape) == 2 or len(S.shape) == 3 if len(S.shape) == 2: assert S.shape[0] == S.shape[1] S = S.reshape([1, S.shape[0], S.shape[1]]) scalarWeights = True else: assert S.shape[1] == S.shape[2] scalarWeights = False # Number of nodes N = S.shape[1] # Identity order order = np.arange(N) # If the original GSO assumed scalar weights, get rid of the extra dimension if scalarWeights: S = S.reshape([N, N]) return S, order.tolist() def permDegree(S): """ permDegree: determines the permutation by degree (nodes ordered from highest degree to lowest) Input: S (np.array): matrix Output: permS (np.array): matrix permuted order (list): list of indices to permute S to turn into permS. """ assert len(S.shape) == 2 or len(S.shape) == 3 if len(S.shape) == 2: assert S.shape[0] == S.shape[1] S = S.reshape([1, S.shape[0], S.shape[1]]) scalarWeights = True else: assert S.shape[1] == S.shape[2] scalarWeights = False # Compute the degree d = np.sum(np.sum(S, axis = 1), axis = 0) # Sort ascending order (from min degree to max degree) order = np.argsort(d) # Reverse sorting order = np.flip(order,0) # And update S S = S[:,order,:][:,:,order] # If the original GSO assumed scalar weights, get rid of the extra dimension if scalarWeights: S = S.reshape([S.shape[1], S.shape[2]]) return S, order.tolist() def permSpectralProxies(S): """ permSpectralProxies: determines the permutation by the spectral proxies score (from highest to lowest) Input: S (np.array): matrix Output: permS (np.array): matrix permuted order (list): list of indices to permute S to turn into permS. """ # Design decisions: k = 8 # Parameter of the spectral proxies method. This is fixed for # consistency with the calls of the other permutation functions. # Design decisions: If we are given a multi-edge GSO, we're just going to # average all the edge dimensions and use that to compute the spectral # proxies. # Check S is of correct shape assert len(S.shape) == 2 or len(S.shape) == 3 # If it is a matrix, just use it if len(S.shape) == 2: assert S.shape[0] == S.shape[1] scalarWeights = True simpleS = S.copy() # If it is a tensor of shape E x N x N, average over dimension E. else: assert S.shape[1] == S.shape[2] scalarWeights = False # Average over dimension E simpleS = np.mean(S, axis = 0) N = simpleS.shape[0] # Number of nodes ST = simpleS.conj().T # Transpose of S, needed for the method Sk = np.linalg.matrix_power(simpleS,k) # S^k STk = np.linalg.matrix_power(ST,k) # (S^T)^k STkSk = STk @ Sk # (S^T)^k * S^k, needed for the method nodes = [] # Where to save the nodes, order according the criteria it = 1 M = N # This opens up the door if we want to use this code for the actual # selection of nodes, instead of just ordering while len(nodes) < M: remainingNodes = [n for n in range(N) if n not in nodes] # Computes the eigenvalue decomposition phi_eig, phi_ast_k = np.linalg.eig( STkSk[remainingNodes][:,remainingNodes]) phi_ast_k = phi_ast_k[:][:,np.argmin(phi_eig.real)] abs_phi_ast_k_2 = np.square(np.absolute(phi_ast_k)) newNodePos = np.argmax(abs_phi_ast_k_2) nodes.append(remainingNodes[newNodePos]) it += 1 if scalarWeights: S = S[nodes,:][:,nodes] else: S = S[:,nodes,:][:,:,nodes] return S, nodes def permEDS(S): """ permEDS: determines the permutation by the experimentally designed sampling score (from highest to lowest) Input: S (np.array): matrix Output: permS (np.array): matrix permuted order (list): list of indices to permute S to turn into permS. """ # Design decisions: If we are given a multi-edge GSO, we're just going to # average all the edge dimensions and use that to compute the spectral # proxies. # Check S is of correct shape assert len(S.shape) == 2 or len(S.shape) == 3 # If it is a matrix, just use it if len(S.shape) == 2: assert S.shape[0] == S.shape[1] scalarWeights = True simpleS = S.copy() # If it is a tensor of shape E x N x N, average over dimension E. else: assert S.shape[1] == S.shape[2] scalarWeights = False # Average over dimension E simpleS = np.mean(S, axis = 0) E, V = np.linalg.eig(simpleS) # Eigendecomposition of S kappa = np.max(np.absolute(V), axis=1) kappa2 = np.square(kappa) # The probabilities assigned to each node are # proportional to kappa2, so in the mean, the ones with largest kappa^2 # would be "sampled" more often, and as suche are more important (i.e. # they have a higher score) # Sort ascending order (from min degree to max degree) order = np.argsort(kappa2) # Reverse sorting order = np.flip(order,0) if scalarWeights: S = S[order,:][:,order] else: S = S[:,order,:][:,:,order] return S, order.tolist() def edgeFailSampling(W, p): """ edgeFailSampling: randomly delete the edges of a given graph Input: W (np.array): adjacency matrix p (float): probability of deleting an edge Output: W (np.array): adjacency matrix with some edges randomly deleted Obs.: The resulting graph need not be connected (even if the input graph is) """ assert 0 <= p <= 1 N = W.shape[0] assert W.shape[1] == N undirected = np.allclose(W, W.T, atol = zeroTolerance) maskEdges = np.random.rand(N, N) maskEdges = (maskEdges > p).astype(W.dtype) # Put a 1 with probability 1-p W = maskEdges * W if undirected: W = np.triu(W) W = W + W.T return W class Graph(): """ Graph: class to handle a graph with several of its properties Initialization: graphType (string): 'SBM', 'SmallWorld', 'fuseEdges', and 'adjacency' N (int): number of nodes [optionalArguments]: related to the specific type of graph; see createGraph() for details. Attributes: .N (int): number of nodes .M (int): number of edges .W (np.array): weighted adjacency matrix .D (np.array): degree matrix .A (np.array): unweighted adjacency matrix .L (np.array): Laplacian matrix (if graph is undirected and has no self-loops) .S (np.array): graph shift operator (weighted adjacency matrix by default) .E (np.array): eigenvalue (diag) matrix (graph frequency coefficients) .V (np.array): eigenvector matrix (graph frequency basis) .undirected (bool): True if the graph is undirected .selfLoops (bool): True if the graph has self-loops Methods: .computeGFT(): computes the GFT of the existing stored GSO and stores it internally in self.V and self.E (if this is never called, the corresponding attributes are set to None) .setGSO(S, GFT = 'no'): sets a new GSO Inputs: S (np.array): new GSO matrix (has to have the same number of nodes), updates attribute .S GFT ('no', 'increasing' or 'totalVariation'): order of eigendecomposition; if 'no', no eigendecomposition is made, and the attributes .V and .E are set to None """ # in this class we provide, easily as attributes, the basic notions of # a graph. This serve as a building block for more complex notions as well. def __init__(self, graphType, N, graphOptions): assert N > 0 #\\\ Create the graph (Outputs adjacency matrix): self.W = createGraph(graphType, N, graphOptions) # TODO: Let's start easy: make it just an N x N matrix. We'll see later # the rest of the things just as handling multiple features and stuff. #\\\ Number of nodes: self.N = (self.W).shape[0] #\\\ Bool for graph being undirected: self.undirected = np.allclose(self.W, (self.W).T, atol = zeroTolerance) # np.allclose() gives true if matrices W and W.T are the same up to # atol. #\\\ Bool for graph having self-loops: self.selfLoops = True \ if np.sum(np.abs(np.diag(self.W)) > zeroTolerance) > 0 \ else False #\\\ Degree matrix: self.D = np.diag(np.sum(self.W, axis = 1)) #\\\ Number of edges: self.M = int(np.sum(np.triu(self.W)) if self.undirected \ else np.sum(self.W)) #\\\ Unweighted adjacency: self.A = (np.abs(self.W) > 0).astype(self.W.dtype) #\\\ Laplacian matrix: # Only if the graph is undirected and has no self-loops if self.undirected and not self.selfLoops: self.L = adjacencyToLaplacian(self.W) else: self.L = None #\\\ GSO (Graph Shift Operator): # The weighted adjacency matrix by default self.S = self.W #\\\ GFT: Declare variables but do not compute it unless specifically # requested self.E = None # Eigenvalues self.V = None # Eigenvectors def computeGFT(self): # Compute the GFT of the stored GSO if self.S is not None: #\\ GFT: # Compute the eigenvalues (E) and eigenvectors (V) self.E, self.V = computeGFT(self.S, order = 'totalVariation') def setGSO(self, S, GFT = 'no'): # This simply sets a matrix as a new GSO. It has to have the same number # of nodes (otherwise, it's a different graph!) and it can or cannot # compute the GFT, depending on the options for GFT assert S.shape[0] == S.shape[1] == self.N assert GFT == 'no' or GFT == 'increasing' or GFT == 'totalVariation' # Set the new GSO self.S = S if GFT == 'no': self.E = None self.V = None else: self.E, self.V = computeGFT(self.S, order = GFT) def splineBasis(K, x, degree=3): # Function written by M. Defferrard, taken verbatim (except for function # name), from # https://github.com/mdeff/cnn_graph/blob/master/lib/models.py#L662 """ Return the B-spline basis. K: number of control points. x: evaluation points or number of evenly distributed evaluation points. degree: degree of the spline. Cubic spline by default. """ if np.isscalar(x): x = np.linspace(0, 1, x) # Evenly distributed knot vectors. kv1 = x.min() * np.ones(degree) kv2 = np.linspace(x.min(), x.max(), K-degree+1) kv3 = x.max() * np.ones(degree) kv = np.concatenate((kv1, kv2, kv3)) # Cox - DeBoor recursive function to compute one spline over x. def cox_deboor(k, d): # Test for end conditions, the rectangular degree zero spline. if (d == 0): return ((x - kv[k] >= 0) & (x - kv[k + 1] < 0)).astype(int) denom1 = kv[k + d] - kv[k] term1 = 0 if denom1 > 0: term1 = ((x - kv[k]) / denom1) * cox_deboor(k, d - 1) denom2 = kv[k + d + 1] - kv[k + 1] term2 = 0 if denom2 > 0: term2 = ((-(x - kv[k + d + 1]) / denom2) * cox_deboor(k + 1, d - 1)) return term1 + term2 # Compute basis for each point basis = np.column_stack([cox_deboor(k, degree) for k in range(K)]) basis[-1,-1] = 1 return basis def coarsen(A, levels, self_connections=False): # Function written by M. Defferrard, taken (almost) verbatim, from # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L5 """ Coarsen a graph, represented by its adjacency matrix A, at multiple levels. """ graphs, parents = metis(A, levels) perms = compute_perm(parents) for i, A in enumerate(graphs): M, M = A.shape if not self_connections: A = A.tocoo() A.setdiag(0) if i < levels: A = perm_adjacency(A, perms[i]) A = A.tocsr() A.eliminate_zeros() graphs[i] = A # Mnew, Mnew = A.shape # print('Layer {0}: M_{0} = |V| = {1} nodes ({2} added),' # '|E| = {3} edges'.format(i, Mnew, Mnew-M, A.nnz//2)) return graphs, perms[0] if levels > 0 else None def metis(W, levels, rid=None): # Function written by M. Defferrard, taken verbatim, from # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34 """ Coarsen a graph multiple times using the METIS algorithm. INPUT W: symmetric sparse weight (adjacency) matrix levels: the number of coarsened graphs OUTPUT graph[0]: original graph of size N_1 graph[2]: coarser graph of size N_2 < N_1 graph[levels]: coarsest graph of Size N_levels < ... < N_2 < N_1 parents[i] is a vector of size N_i with entries ranging from 1 to N_{i+1} which indicate the parents in the coarser graph[i+1] nd_sz{i} is a vector of size N_i that contains the size of the supernode in the graph{i} NOTE if "graph" is a list of length k, then "parents" will be a list of length k-1 """ N, N = W.shape if rid is None: rid = np.random.permutation(range(N)) parents = [] degree = W.sum(axis=0) - W.diagonal() graphs = [] graphs.append(W) #supernode_size = np.ones(N) #nd_sz = [supernode_size] #count = 0 #while N > maxsize: for _ in range(levels): #count += 1 # CHOOSE THE WEIGHTS FOR THE PAIRING # weights = ones(N,1) # metis weights weights = degree # graclus weights # weights = supernode_size # other possibility weights = np.array(weights).squeeze() # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR idx_row, idx_col, val = scipy.sparse.find(W) perm = np.argsort(idx_row) rr = idx_row[perm] cc = idx_col[perm] vv = val[perm] cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered parents.append(cluster_id) # TO DO # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE #supernode_size = full( sparse(cluster_id, ones(N,1) , # supernode_size ) ) #print(cluster_id) #print(supernode_size) #nd_sz{count+1}=supernode_size; # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH nrr = cluster_id[rr] ncc = cluster_id[cc] nvv = vv Nnew = cluster_id.max() + 1 # CSR is more appropriate: row,val pairs appear multiple times W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew)) W.eliminate_zeros() # Add new graph to the list of all coarsened graphs graphs.append(W) N, N = W.shape # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS) degree = W.sum(axis=0) #degree = W.sum(axis=0) - W.diagonal() # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS #[~, rid]=sort(ss); # arthur strategy #[~, rid]=sort(supernode_size); # thomas strategy #rid=randperm(N); # metis/graclus strategy ss = np.array(W.sum(axis=0)).squeeze() rid = np.argsort(ss) return graphs, parents # Coarsen a graph given by rr,cc,vv. rr is assumed to be ordered def metis_one_level(rr,cc,vv,rid,weights): # Function written by M. Defferrard, taken verbatim, from # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L119 nnz = rr.shape[0] N = rr[nnz-1] + 1 marked = np.zeros(N, np.bool) rowstart = np.zeros(N, np.int32) rowlength = np.zeros(N, np.int32) cluster_id = np.zeros(N, np.int32) oldval = rr[0] count = 0 clustercount = 0 for ii in range(nnz): rowlength[count] = rowlength[count] + 1 if rr[ii] > oldval: oldval = rr[ii] rowstart[count+1] = ii count = count + 1 for ii in range(N): tid = rid[ii] if not marked[tid]: wmax = 0.0 rs = rowstart[tid] marked[tid] = True bestneighbor = -1 for jj in range(rowlength[tid]): nid = cc[rs+jj] if marked[nid]: tval = 0.0 else: tval = vv[rs+jj] * (1.0/weights[tid] + 1.0/weights[nid]) if tval > wmax: wmax = tval bestneighbor = nid cluster_id[tid] = clustercount if bestneighbor > -1: cluster_id[bestneighbor] = clustercount marked[bestneighbor] = True clustercount += 1 return cluster_id def compute_perm(parents): # Function written by M. Defferrard, taken verbatim, from # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L167 """ Return a list of indices to reorder the adjacency and data matrices so that the union of two neighbors from layer to layer forms a binary tree. """ # Order of last layer is random (chosen by the clustering algorithm). indices = [] if len(parents) > 0: M_last = max(parents[-1]) + 1 indices.append(list(range(M_last))) for parent in parents[::-1]: #print('parent: {}'.format(parent)) # Fake nodes go after real ones. pool_singeltons = len(parent) indices_layer = [] for i in indices[-1]: indices_node = list(np.where(parent == i)[0]) assert 0 <= len(indices_node) <= 2 #print('indices_node: {}'.format(indices_node)) # Add a node to go with a singelton. if len(indices_node) == 1: indices_node.append(pool_singeltons) pool_singeltons += 1 #print('new singelton: {}'.format(indices_node)) # Add two nodes as children of a singelton in the parent. elif len(indices_node) == 0: indices_node.append(pool_singeltons+0) indices_node.append(pool_singeltons+1) pool_singeltons += 2 #print('singelton childrens: {}'.format(indices_node)) indices_layer.extend(indices_node) indices.append(indices_layer) # Sanity checks. for i,indices_layer in enumerate(indices): M = M_last*2**i # Reduction by 2 at each layer (binary tree). assert len(indices[0] == M) # The new ordering does not omit an indice. assert sorted(indices_layer) == list(range(M)) return indices[::-1] def perm_adjacency(A, indices): # Function written by M. Defferrard, taken verbatim, from # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L242 """ Permute adjacency matrix, i.e. exchange node ids, so that binary unions form the clustering tree. """ if indices is None: return A M, M = A.shape Mnew = len(indices) assert Mnew >= M A = A.tocoo() # Add Mnew - M isolated vertices. if Mnew > M: rows = scipy.sparse.coo_matrix((Mnew-M, M), dtype=np.float32) cols = scipy.sparse.coo_matrix((Mnew, Mnew-M), dtype=np.float32) A = scipy.sparse.vstack([A, rows]) A = scipy.sparse.hstack([A, cols]) # Permute the rows and the columns. perm = np.argsort(indices) A.row = np.array(perm)[A.row] A.col = np.array(perm)[A.col] # assert np.abs(A - A.T).mean() < 1e-9 assert type(A) is scipy.sparse.coo.coo_matrix return A def permCoarsening(x, indices): # Original function written by M. Defferrard, found in # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L219 # Function name has been changed, and it has been further adapted to handle # multiple features as # number_data_points x number_features x number_nodes # instead of the original # number_data_points x number_nodes """ Permute data matrix, i.e. exchange node ids, so that binary unions form the clustering tree. """ if indices is None: return x B, F, N = x.shape Nnew = len(indices) assert Nnew >= N xnew = np.empty((B, F, Nnew)) for i,j in enumerate(indices): # Existing vertex, i.e. real data. if j < N: xnew[:,:,i] = x[:,:,j] # Fake vertex because of singeltons. # They will stay 0 so that max pooling chooses the singelton. # Or -infty ? else: xnew[:,:,i] = np.zeros([B, F]) return xnew
63,587
38.373375
90
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/utils/visualTools.py
# 2019/01/21~2018/07/12 # This function is taken almost verbatim from https://github.com/amaiasalvador # and all credit should go to Amaia Salvador. import os import glob import torchvision.utils as vutils from operator import itemgetter from tensorboardX import SummaryWriter class Visualizer(): def __init__(self, checkpoints_dir, name): self.win_size = 256 self.name = name self.saved = False self.checkpoints_dir = checkpoints_dir self.ncols = 4 # remove existing for filename in glob.glob(self.checkpoints_dir+"/events*"): os.remove(filename) self.writer = SummaryWriter(checkpoints_dir) def reset(self): self.saved = False # images: (b, c, 0, 1) array of images def image_summary(self, mode, epoch, images): images = vutils.make_grid(images, normalize=True, scale_each=True) self.writer.add_image('{}/Image'.format(mode), images, epoch) # figure (for matplotlib figures) def figure_summary(self, mode, epoch, fig): self.writer.add_figure('{}/Figure'.format(mode), fig, epoch) # text: type: ingredients/recipe def text_summary(self, mode, epoch, type, text, vocabulary, gt=True, max_length=20): for i, el in enumerate(text): # text_list if not gt: # we are printing a sample idx = el.nonzero().squeeze() + 1 else: idx = el # we are printing the ground truth words_list = itemgetter(*idx)(vocabulary) if len(words_list) <= max_length: self.writer.add_text('{}/{}_{}_{}'.format(mode, type, i, 'gt' if gt else 'prediction'), ', '.join(filter(lambda x: x != '<pad>', words_list)), epoch) else: self.writer.add_text('{}/{}_{}_{}'.format(mode, type, i, 'gt' if gt else 'prediction'), 'Number of sampled ingredients is too big: {}'.format(len(words_list)), epoch) # losses: dictionary of error labels and values def scalar_summary(self, mode, epoch, **args): for k, v in args.items(): self.writer.add_scalar('{}/{}'.format(mode, k), v, epoch) self.writer.export_scalars_to_json("{}/tensorboard_all_scalars.json".format(self.checkpoints_dir)) def histo_summary(self, model, step): """Log a histogram of the tensor of values.""" for name, param in model.named_parameters(): self.writer.add_histogram(name, param, step) def close(self): self.writer.close()
2,521
37.212121
182
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/utils/miscTools.py
# 2018/10/15~ # Fernando Gama, fgama@seas.upenn.edu. # Luana Ruiz, rubruiz@seas.upenn.edu. """ miscTools Miscellaneous Tools module num2filename: change a numerical value into a string usable as a filename saveSeed: save the random state of generators loadSeed: load the number of random state of generators writeVarValues: write the specified values in the specified txt file """ import os import pickle import numpy as np import torch def num2filename(x,d): """ Takes a number and returns a string with the value of the number, but in a format that is writable into a filename. s = num2filename(x,d) Gets rid of decimal points which are usually inconvenient to have in a filename. If the number x is an integer, then s = str(int(x)). If the number x is a decimal number, then it replaces the '.' by the character specified by d. Setting d = '' erases the decimal point, setting d = '.' simply returns a string with the exact same number. Example: >> num2filename(2,'d') >> '2' >> num2filename(3.1415,'d') >> '3d1415' >> num2filename(3.1415,'') >> '31415' >> num2filename(3.1415,'.') >> '3.1415' """ if x == int(x): return str(int(x)) else: return str(x).replace('.',d) def saveSeed(randomStates, saveDir): """ Takes a list of dictionaries of random generator states of different modules and saves them in a .pkl format. Inputs: randomStates (list): The length of this list is equal to the number of modules whose states want to be saved (torch, numpy, etc.). Each element in this list is a dictionary. The dictionary has three keys: 'module' with the name of the module in string format ('numpy' or 'torch', for example), 'state' with the saved generator state and, if corresponds, 'seed' with the specific seed for the generator (note that torch has both state and seed, but numpy only has state) saveDir (path): where to save the seed, it will be saved under the filename 'randomSeedUsed.pkl' """ pathToSeed = os.path.join(saveDir, 'randomSeedUsed.pkl') with open(pathToSeed, 'wb') as seedFile: pickle.dump({'randomStates': randomStates}, seedFile) def loadSeed(loadDir): """ Loads the states and seed saved in a specified path Inputs: loadDir (path): where to look for thee seed to load; it is expected that the appropriate file within loadDir is named 'randomSeedUsed.pkl' Obs.: The file 'randomSeedUsed.pkl' should contain a list structured as follows. The length of this list is equal to the number of modules whose states were saved (torch, numpy, etc.). Each element in this list is a dictionary. The dictionary has three keys: 'module' with the name of the module in string format ('numpy' or 'torch', for example), 'state' with the saved generator state and, if corresponds, 'seed' with the specific seed for the generator (note that torch has both state and seed, but numpy only has state) """ pathToSeed = os.path.join(loadDir, 'randomSeedUsed.pkl') with open(pathToSeed, 'rb') as seedFile: randomStates = pickle.load(seedFile) randomStates = randomStates['randomStates'] for module in randomStates: thisModule = module['module'] if thisModule == 'numpy': np.random.RandomState().set_state(module['state']) elif thisModule == 'torch': torch.set_rng_state(module['state']) torch.manual_seed(module['seed']) def writeVarValues(fileToWrite, varValues): """ Write the value of several string variables specified by a dictionary into the designated .txt file. Input: fileToWrite (os.path): text file to save the specified variables varValues (dictionary): values to save in the text file. They are saved in the format "key = value". """ with open(fileToWrite, 'a+') as file: for key in varValues.keys(): file.write('%s = %s\n' % (key, varValues[key])) file.write('\n')
4,291
37.321429
80
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/utils/__init__.py
0
0
0
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Repo/Tangent_Bundle_NN/alegnnss/utils/dataTools.py
# 2021/03/04~ # Fernando Gama, fgama@seas.upenn.edu # Luana Ruiz, rubruiz@seas.upenn.edu # Kate Tolstaya, eig@seas.upenn.edu """ dataTools.py Data management module Functions: normalizeData: normalize data along a specified axis changeDataType: change data type of data Classes (datasets): FacebookEgo (class): loads the Facebook adjacency matrix of EgoNets SourceLocalization (class): creates the datasets for a source localization problem Authorship (class): loads and splits the dataset for the authorship attribution problem MovieLens (class): Loads and handles handles the MovieLens-100k dataset Flocking (class): creates trajectories for the problem of flocking TwentyNews (class): handles the 20NEWS dataset Epidemics (class): loads the edge list of the friendship network of the high school in Marseille and generates the epidemic spread data based on the SIR model """ import os import pickle import hdf5storage # This is required to import old Matlab(R) files. import urllib.request # To download from the internet import zipfile # To handle zip files import gzip # To handle gz files import shutil # Command line utilities import matplotlib import csv matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.family'] = 'serif' import matplotlib.pyplot as plt from matplotlib.animation import FFMpegWriter import numpy as np import torch import alegnnss.utils.graphTools as graph zeroTolerance = 1e-9 # Values below this number are considered zero. def normalizeData(x, ax): """ normalizeData(x, ax): normalize data x (subtract mean and divide by standard deviation) along the specified axis ax """ thisShape = x.shape # get the shape assert ax < len(thisShape) # check that the axis that we want to normalize # is there dataType = type(x) # get data type so that we don't have to convert if 'numpy' in repr(dataType): # Compute the statistics xMean = np.mean(x, axis = ax) xDev = np.std(x, axis = ax) # Add back the dimension we just took out xMean = np.expand_dims(xMean, ax) xDev = np.expand_dims(xDev, ax) elif 'torch' in repr(dataType): # Compute the statistics xMean = torch.mean(x, dim = ax) xDev = torch.std(x, dim = ax) # Add back the dimension we just took out xMean = xMean.unsqueeze(ax) xDev = xDev.unsqueeze(ax) # Subtract mean and divide by standard deviation x = (x - xMean) / xDev return x def changeDataType(x, dataType): """ changeDataType(x, dataType): change the dataType of variable x into dataType """ # So this is the thing: To change data type it depends on both, what dtype # the variable already is, and what dtype we want to make it. # Torch changes type by .type(), but numpy by .astype() # If we have already a torch defined, and we apply a torch.tensor() to it, # then there will be warnings because of gradient accounting. # All of these facts make changing types considerably cumbersome. So we # create a function that just changes type and handles all this issues # inside. # If we can't recognize the type, we just make everything numpy. # Check if the variable has an argument called 'dtype' so that we can now # what type of data type the variable is if 'dtype' in dir(x): varType = x.dtype # So, let's start assuming we want to convert to numpy if 'numpy' in repr(dataType): # Then, the variable con be torch, in which case we move it to cpu, to # numpy, and convert it to the right type. if 'torch' in repr(varType): x = x.cpu().numpy().astype(dataType) # Or it could be numpy, in which case we just use .astype elif 'numpy' in repr(type(x)): x = x.astype(dataType) # Now, we want to convert to torch elif 'torch' in repr(dataType): # If the variable is torch in itself if 'torch' in repr(varType): x = x.type(dataType) # But, if it's numpy elif 'numpy' in repr(type(x)): x = torch.tensor(x, dtype = dataType) # This only converts between numpy and torch. Any other thing is ignored return x def invertTensorEW(x): # Elementwise inversion of a tensor where the 0 elements are kept as zero. # Warning: Creates a copy of the tensor xInv = x.copy() # Copy the matrix to invert # Replace zeros for ones. xInv[x < zeroTolerance] = 1. # Replace zeros for ones xInv = 1./xInv # Now we can invert safely xInv[x < zeroTolerance] = 0. # Put back the zeros return xInv class _data: # Internal supraclass from which all data sets will inherit. # There are certain methods that all Data classes must have: # getSamples(), expandDims(), to() and astype(). # To avoid coding this methods over and over again, we create a class from # which the data can inherit this basic methods. # All the signals are always assumed to be graph signals that are written # nDataPoints (x nFeatures) x nNodes # If we have one feature, we have the expandDims() that adds a x1 so that # it can be readily processed by architectures/functions that always assume # a 3-dimensional signal. def __init__(self): # Minimal set of attributes that all data classes should have self.dataType = None self.device = None self.nTrain = None self.nValid = None self.nTest = None self.samples = {} self.samples['train'] = {} self.samples['train']['signals'] = None self.samples['train']['targets'] = None self.samples['valid'] = {} self.samples['valid']['signals'] = None self.samples['valid']['targets'] = None self.samples['test'] = {} self.samples['test']['signals'] = None self.samples['test']['targets'] = None def getSamples(self, samplesType, *args): # samplesType: train, valid, test # args: 0 args, give back all # args: 1 arg: if int, give that number of samples, chosen at random # args: 1 arg: if list, give those samples precisely. # Check that the type is one of the possible ones assert samplesType == 'train' or samplesType == 'valid' \ or samplesType == 'test' # Check that the number of extra arguments fits assert len(args) <= 1 # If there are no arguments, just return all the desired samples x = self.samples[samplesType]['signals'] y = self.samples[samplesType]['targets'] # If there's an argument, we have to check whether it is an int or a # list if len(args) == 1: # If it is an int, just return that number of randomly chosen # samples. if type(args[0]) == int: nSamples = x.shape[0] # total number of samples # We can't return more samples than there are available assert args[0] <= nSamples # Randomly choose args[0] indices selectedIndices = np.random.choice(nSamples, size = args[0], replace = False) # Select the corresponding samples xSelected = x[selectedIndices] y = y[selectedIndices] else: # The fact that we put else here instead of elif type()==list # allows for np.array to be used as indices as well. In general, # any variable with the ability to index. xSelected = x[args[0]] # And assign the labels y = y[args[0]] # If we only selected a single element, then the nDataPoints dim # has been left out. So if we have less dimensions, we have to # put it back if len(xSelected.shape) < len(x.shape): if 'torch' in self.dataType: x = xSelected.unsqueeze(0) else: x = np.expand_dims(xSelected, axis = 0) else: x = xSelected return x, y def expandDims(self): # For each data set partition for key in self.samples.keys(): # If there's something in them if self.samples[key]['signals'] is not None: # And if it has only two dimensions # (shape: nDataPoints x nNodes) if len(self.samples[key]['signals'].shape) == 2: # Then add a third dimension in between so that it ends # up with shape # nDataPoints x 1 x nNodes # and it respects the 3-dimensional format that is taken # by many of the processing functions if 'torch' in repr(self.dataType): self.samples[key]['signals'] = \ self.samples[key]['signals'].unsqueeze(1) else: self.samples[key]['signals'] = np.expand_dims( self.samples[key]['signals'], axis = 1) elif len(self.samples[key]['signals'].shape) == 3: if 'torch' in repr(self.dataType): self.samples[key]['signals'] = \ self.samples[key]['signals'].unsqueeze(2) else: self.samples[key]['signals'] = np.expand_dims( self.samples[key]['signals'], axis = 2) def astype(self, dataType): # This changes the type for the minimal attributes (samples). This # methods should still be initialized within the data classes, if more # attributes are used. # The labels could be integers as created from the dataset, so if they # are, we need to be sure they are integers also after conversion. # To do this we need to match the desired dataType to its int # counterpart. Typical examples are: # numpy.float64 -> numpy.int64 # numpy.float32 -> numpy.int32 # torch.float64 -> torch.int64 # torch.float32 -> torch.int32 targetType = str(self.samples['train']['targets'].dtype) if 'int' in targetType: if 'numpy' in repr(dataType): if '64' in targetType: targetType = np.int64 elif '32' in targetType: targetType = np.int32 elif 'torch' in repr(dataType): if '64' in targetType: targetType = torch.int64 elif '32' in targetType: targetType = torch.int32 else: # If there is no int, just stick with the given dataType targetType = dataType # Now that we have selected the dataType, and the corresponding # labelType, we can proceed to convert the data into the corresponding # type for key in self.samples.keys(): self.samples[key]['signals'] = changeDataType( self.samples[key]['signals'], dataType) self.samples[key]['targets'] = changeDataType( self.samples[key]['targets'], targetType) # Update attribute if dataType is not self.dataType: self.dataType = dataType def to(self, device): # This changes the type for the minimal attributes (samples). This # methods should still be initialized within the data classes, if more # attributes are used. # This can only be done if they are torch tensors if 'torch' in repr(self.dataType): for key in self.samples.keys(): for secondKey in self.samples[key].keys(): self.samples[key][secondKey] \ = self.samples[key][secondKey].to(device) # If the device changed, save it. if device is not self.device: self.device = device class _dataForSemisupervised(_data): # Internal supraclass from which data classes inherit when they are used # (also) for semisueprvised regression. This renders the .evaluate() method the same in all # cases (MSE) so justifies the use of another internal class. def __init__(self): super().__init__() self.test_indices = None self.test_indices_torch = None self.evaluate_only_test = None self.nPoints = None def evaluate(self, yHat, y, tol = 1e-9): if yHat.shape != y.shape: y = y.flatten() if not self.evaluate_only_test: N = self.nPoints#np.prod(y.shape) if 'torch' in repr(self.dataType): # And compute the error totalErrors = torch.sum((y - yHat)**2)/N errorRate = totalErrors.type(self.dataType) else: yHat = np.array(yHat) y = np.array(y) # And compute the error totalErrors = np.sum((yHat - y)**2)/N errorRate = totalErrors.astype(self.dataType) return errorRate else: if 'torch' in repr(self.dataType): # And compute the error gat1 = torch.gather(yHat,1,self.test_indices_torch) gat2 = torch.gather(y,1,self.test_indices_torch) N = max(gat2.shape) totalErrors = torch.sum(torch.abs(gat1 - gat2))/N errorRate = totalErrors.type(self.dataType) else: yHat = np.array(yHat) y = np.array(y) gat1 = np.gather(yHat,1,self.test_indices) gat2 = np.gather(y,1,self.test_indices) N = max(gat2.shape) # And compute the error totalErrors = np.sum(np.abs((yHat - y)))/N errorRate = totalErrors.astype(self.dataType) return errorRate class _dataForClassification(_data): # Internal supraclass from which data classes inherit when they are used # for classification. This renders the .evaluate() method the same in all # cases (how many examples are incorrectly labeled) so justifies the use of # another internal class. def __init__(self): super().__init__() def evaluate(self, yHat, y, tol = 1e-9): """ Return the accuracy (ratio of yHat = y) """ N = len(y) if 'torch' in repr(self.dataType): # We compute the target label (hardmax) yHat = torch.argmax(yHat, dim = 1) # And compute the error totalErrors = torch.sum(torch.abs(yHat - y) > tol) errorRate = totalErrors.type(self.dataType)/N else: yHat = np.array(yHat) y = np.array(y) # We compute the target label (hardmax) yHat = np.argmax(yHat, axis = 1) # And compute the error totalErrors = np.sum(np.abs(yHat - y) > tol) errorRate = totalErrors.astype(self.dataType)/N # And from that, compute the accuracy return errorRate class FacebookEgo: """ FacebookEgo: Loads the adjacency matrix of the Facebook Egonets available in https://snap.stanford.edu/data/ego-Facebook.html by J. McAuley and J. Leskovec. Learning to Discover Social Circles in Ego Networks. NIPS, 2012. Initialization: Input: dataDir (string): path for the directory in where to look for the data (if the data is not found, it will be downloaded to this directory) use234 (bool): if True, load a smaller subnetwork of 234 users with two communities (one big, and one small) Methods: .loadData(filename, use234): load the data in self.dataDir/filename, if it does not exist, then download it and save it as filename in self.dataDir If use234 is True, load the 234-user subnetwork as well. adjacencyMatrix = .getAdjacencyMatrix([use234]): return the nNodes x nNodes np.array with the adjacency matrix. If use234 is True, then return the smaller nNodes = 234 user subnetwork (default: use234 = False). """ def __init__(self, dataDir, use234 = False): # Dataset directory self.dataDir = dataDir # Empty attributes self.adjacencyMatrix = None self.adjacencyMatrix234 = None # Load data self.loadData('facebookEgo.pkl', use234) def loadData(self, filename, use234): # Check if the dataDir exists, and if not, create it if not os.path.exists(self.dataDir): os.makedirs(self.dataDir) # Create the filename to save/load datasetFilename = os.path.join(self.dataDir, filename) if use234: datasetFilename234 = os.path.join(self.dataDir,'facebookEgo234.pkl') if os.path.isfile(datasetFilename234): with open(datasetFilename234, 'rb') as datasetFile234: datasetDict = pickle.load(datasetFile234) self.adjacencyMatrix234 = datasetDict['adjacencyMatrix'] # Check if the file does exist, load it if os.path.isfile(datasetFilename): # If it exists, load it with open(datasetFilename, 'rb') as datasetFile: datasetDict = pickle.load(datasetFile) # And save the corresponding variable self.adjacencyMatrix = datasetDict['adjacencyMatrix'] else: # If it doesn't exist, load it # There could be three options here: that we have the raw data # already there, that we have the zip file and need to unzip it, # or that we do not have nothing and we need to download it. existsRawData = \ os.path.isfile(os.path.join(self.dataDir, 'facebook_combined.txt')) # And the zip file existsZipFile = os.path.isfile(os.path.join( self.dataDir,'facebook_combined.txt.gz')) if not existsRawData and not existsZipFile: # We have to download it fbURL='https://snap.stanford.edu/data/facebook_combined.txt.gz' urllib.request.urlretrieve(fbURL, filename = os.path.join( self.dataDir,'facebook_combined.txt.gz')) existsZipFile = True if not existsRawData and existsZipFile: # Unzip it zipFile = os.path.join(self.dataDir, 'facebook_combined.txt.gz') txtFile = os.path.join(self.dataDir, 'facebook_combined.txt') with gzip.open(zipFile, 'rb') as f_in: with open(txtFile, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) # Now that we have the data, we can get their filenames rawDataFilename = os.path.join(self.dataDir,'facebook_combined.txt') assert os.path.isfile(rawDataFilename) # And we can load it and store it. adjacencyMatrix = np.empty([0, 0]) # Start with an empty matrix and # then we slowly add the number of nodes, which we do not assume # to be known beforehand. # Let's start with the data. # Open it. with open(rawDataFilename, 'r') as rawData: # The file consists of a succession of lines, each line # corresponds to an edge for dataLine in rawData: # For each line, we split it in the different fields dataLineSplit = dataLine.rstrip('\n').split(' ') # Keep the ones we care about here node_i = int(dataLineSplit[0]) node_j = int(dataLineSplit[1]) node_max = max(node_i, node_j) # Get the largest node # Now we have to add this information to the adjacency # matrix. # We need to check whether we need to add more elements if node_max+1 > max(adjacencyMatrix.shape): colDiff = node_max+1 - adjacencyMatrix.shape[1] zeroPadCols = np.zeros([adjacencyMatrix.shape[0],\ colDiff]) adjacencyMatrix = np.concatenate((adjacencyMatrix, zeroPadCols), axis = 1) rowDiff = node_max+1 - adjacencyMatrix.shape[0] zeroPadRows = np.zeros([rowDiff,\ adjacencyMatrix.shape[1]]) adjacencyMatrix = np.concatenate((adjacencyMatrix, zeroPadRows), axis = 0) # Now that we have assured appropriate dimensions adjacencyMatrix[node_i, node_j] = 1. # And because it is undirected by construction adjacencyMatrix[node_j, node_i] = 1. # Now that it is loaded, let's store it self.adjacencyMatrix = adjacencyMatrix # And save it in a pickle file for posterity with open(datasetFilename, 'wb') as datasetFile: pickle.dump( {'adjacencyMatrix': self.adjacencyMatrix}, datasetFile ) def getAdjacencyMatrix(self, use234 = False): return self.adjacencyMatrix234 if use234 else self.adjacencyMatrix class SourceLocalization(_dataForClassification): """ SourceLocalization: Creates the dataset for a source localization problem Initialization: Input: G (class): Graph on which to diffuse the process, needs an attribute .N with the number of nodes (int) and attribute .W with the adjacency matrix (np.array) nTrain (int): number of training samples nValid (int): number of validation samples nTest (int): number of testing samples sourceNodes (list of int): list of indices of nodes to be used as sources of the diffusion process tMax (int): maximum diffusion time, if None, the maximum diffusion time is the size of the graph (default: None) dataType (dtype): datatype for the samples created (default: np.float64) device (device): if torch.Tensor datatype is selected, this is on what device the data is saved. Methods: signals, labels = .getSamples(samplesType[, optionalArguments]) Input: samplesType (string): 'train', 'valid' or 'test' to determine from which dataset to get the samples from optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get Output: signals (dtype.array): numberSamples x numberNodes labels (dtype.array): numberSamples >> Obs.: The 0th dimension matches the corresponding signal to its respective label .expandDims(): Adds the feature dimension to the graph signals (i.e. for graph signals of shape nSamples x nNodes, turns them into shape nSamples x 1 x nNodes, so that they can be handled by general graph signal processing techniques that take into account a feature dimension by default) .astype(type): change the type of the data matrix arrays. Input: type (dtype): target type of the variables (e.g. torch.float64, numpy.float64, etc.) .to(device): if dtype is torch.tensor, move them to the specified device. Input: device (string): target device to move the variables to (e.g. 'cpu', 'cuda:0', etc.) errorRate = .evaluate(yHat, y, tol = 1e-9) Input: yHat (dtype.array): unnormalized probability of each label (shape: nDataPoints x nClasses) y (dtype.array): correct labels (1-D binary vector, shape: nDataPoints) tol (float, default = 1e-9): numerical tolerance to consider two numbers to be equal Output: errorRate (float): proportion of incorrect labels """ def __init__(self, G, nTrain, nValid, nTest, sourceNodes, tMax = None, dataType = np.float64, device = 'cpu'): # Initialize parent super().__init__() # store attributes self.dataType = dataType self.device = device self.nTrain = nTrain self.nValid = nValid self.nTest = nTest # If no tMax is specified, set it the maximum possible. if tMax == None: tMax = G.N #\\\ Generate the samples # Get the largest eigenvalue of the weighted adjacency matrix EW, VW = graph.computeGFT(G.W, order = 'totalVariation') eMax = np.max(EW) # Normalize the matrix so that it doesn't explode Wnorm = G.W / eMax # total number of samples nTotal = nTrain + nValid + nTest # sample source nodes sampledSources = np.random.choice(sourceNodes, size = nTotal) # sample diffusion times sampledTimes = np.random.choice(tMax, size = nTotal) # Since the signals are generated as W^t * delta, this reduces to the # selection of a column of W^t (the column corresponding to the source # node). Therefore, we generate an array of size tMax x N x N with all # the powers of the matrix, and then we just simply select the # corresponding column for the corresponding time lastWt = np.eye(G.N, G.N) Wt = lastWt.reshape([1, G.N, G.N]) for t in range(1,tMax): lastWt = lastWt @ Wnorm Wt = np.concatenate((Wt, lastWt.reshape([1, G.N, G.N])), axis = 0) x = Wt[sampledTimes, :, sampledSources] # Now, we have the signals and the labels signals = x # nTotal x N # Finally, we have to match the source nodes to the corresponding labels # which start at 0 and increase in integers. nodesToLabels = {} for it in range(len(sourceNodes)): nodesToLabels[sourceNodes[it]] = it labels = [nodesToLabels[x] for x in sampledSources] # nTotal # Split and save them self.samples['train']['signals'] = signals[0:nTrain, :] self.samples['train']['targets'] = np.array(labels[0:nTrain]) self.samples['valid']['signals'] = signals[nTrain:nTrain+nValid, :] self.samples['valid']['targets'] =np.array(labels[nTrain:nTrain+nValid]) self.samples['test']['signals'] = signals[nTrain+nValid:nTotal, :] self.samples['test']['targets'] =np.array(labels[nTrain+nValid:nTotal]) # Change data to specified type and device self.astype(self.dataType) self.to(self.device) class Authorship(_dataForClassification): """ Authorship: Loads the dataset of 19th century writers for the authorship attribution problem Credits for this dataset to Mark Eisen. Please, refer to this paper for details, and whenever using this dataset: S. Segarra, M. Eisen and A. Ribeiro, Authorship Attribution through Function Word Adjacency Networks, IEEE Trans. Signal Process., vol. 63, Issue 20, Oct 2015. Possible authors: jacob 'abbott', robert louis 'stevenson', louisa may 'alcott', horatio 'alger', james 'allen', jane 'austen', emily 'bronte', james 'cooper', charles 'dickens', hamlin 'garland', nathaniel 'hawthorne', henry 'james', herman 'melville', 'page', henry 'thoreau', mark 'twain', arthur conan 'doyle', washington 'irving', edgar allan 'poe', sarah orne 'jewett', edith 'wharton' Initialization: Input: authorName (string): which is the selected author to attribute plays to ratioTrain (float): ratio of the total texts to be part of the training set ratioValid (float): ratio of the train texts to be part of the validation set dataPath (string): path to where the authorship data is located graphNormalizationType ('rows' or 'cols'): how to normalize the created graph from combining all the selected author WANs keepIsolatedNodes (bool): If False, get rid of isolated nodes forceUndirected (bool): If True, create an undirected graph forceConnected (bool): If True, ensure that the resulting graph is connected dataType (dtype): type of loaded data (default: np.float64) device (device): where to store the data (e.g., 'cpu', 'cuda:0', etc.) Methods: .loadData(dataPath): load the data found in dataPath and store it in attributes .authorData and .functionWords authorData = .getAuthorData(samplesType, selectData, [, optionalArguments]) Input: samplesType (string): 'train', 'valid', 'test' or 'all' to determine from which dataset to get the raw author data from selectData (string): 'WAN' or 'wordFreq' to decide if we want to retrieve either the WAN of each excerpt or the word frequency count of each excerpt optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get Output: Either the WANs or the word frequency count of all the excerpts of the selected author .createGraph(): creates a graph from the WANs of the excerpt written by the selected author available in the training set. The fusion of this WANs is done in accordance with the input options following graphTools.createGraph(). The resulting adjacency matrix is stored. .getGraph(): fetches the stored adjacency matrix and returns it .getFunctionWords(): fetches the list of functional words. Returns a tuple where the first element correspond to all the functional words in use, and the second element consists of all the functional words available. Obs.: When we created the graph, some of the functional words might have been dropped in order to make it connected, for example. signals, labels = .getSamples(samplesType[, optionalArguments]) Input: samplesType (string): 'train', 'valid' or 'test' to determine from which dataset to get the samples from optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get Output: signals (dtype.array): numberSamples x numberNodes labels (dtype.array): numberSamples >> Obs.: The 0th dimension matches the corresponding signal to its respective label .expandDims(): Adds the feature dimension to the graph signals (i.e. for graph signals of shape nSamples x nNodes, turns them into shape nSamples x 1 x nNodes, so that they can be handled by general graph signal processing techniques that take into account a feature dimension by default) .astype(type): change the type of the data matrix arrays. Input: type (dtype): target type of the variables (e.g. torch.float64, numpy.float64, etc.) .to(device): if dtype is torch.tensor, move them to the specified device. Input: device (string): target device to move the variables to (e.g. 'cpu', 'cuda:0', etc.) errorRate = .evaluate(yHat, y, tol = 1e-9) Input: yHat (dtype.array): estimated labels (1-D binary vector) y (dtype.array): correct labels (1-D binary vector) >> Obs.: both arrays are of the same length tol (float): numerical tolerance to consider two numbers to be equal Output: errorRate (float): proportion of incorrect labels """ def __init__(self, authorName, ratioTrain, ratioValid, dataPath, graphNormalizationType, keepIsolatedNodes, forceUndirected, forceConnected, dataType = np.float64, device = 'cpu'): # Initialize parent super().__init__() # Store self.authorName = authorName self.ratioTrain = ratioTrain self.ratioValid = ratioValid self.dataPath = dataPath self.dataType = dataType self.device = device # Store characteristics of the graph to be created self.graphNormalizationType = graphNormalizationType self.keepIsolatedNodes = keepIsolatedNodes self.forceUndirected = forceUndirected self.forceConnected = forceConnected self.adjacencyMatrix = None # Other data to save self.authorData = None self.selectedAuthor = None self.allFunctionWords = None self.functionWords = None # Load data self.loadData(dataPath) # Check that the authorName is a valid name assert authorName in self.authorData.keys() # Get the selected author's data thisAuthorData = self.authorData[authorName].copy() nExcerpts = thisAuthorData['wordFreq'].shape[0] # Number of excerpts # by the selected author nTrainAuthor = int(round(ratioTrain * nExcerpts)) nValidAuthor = int(round(ratioValid * nTrainAuthor)) nTestAuthor = nExcerpts - nTrainAuthor nTrainAuthor = nTrainAuthor - nValidAuthor # Now, we know how many training, validation and testing samples from # the required author. But we will also include an equal amount of # other authors, therefore self.nTrain = round(2 * nTrainAuthor) self.nValid = round(2 * nValidAuthor) self.nTest = round(2 * nTestAuthor) # Now, let's get the corresponding signals for the author xAuthor = thisAuthorData['wordFreq'] # Get a random permutation of these works, and split them accordingly randPerm = np.random.permutation(nExcerpts) # Save the indices corresponding to each split randPermTrain = randPerm[0:nTrainAuthor] randPermValid = randPerm[nTrainAuthor:nTrainAuthor+nValidAuthor] randPermTest = randPerm[nTrainAuthor+nValidAuthor:nExcerpts] xAuthorTrain = xAuthor[randPermTrain, :] xAuthorValid = xAuthor[randPermValid, :] xAuthorTest = xAuthor[randPermTest, :] # And we will store this split self.selectedAuthor = {} # Copy all data self.selectedAuthor['all'] = thisAuthorData.copy() # Copy word frequencies self.selectedAuthor['train'] = {} self.selectedAuthor['train']['wordFreq'] = xAuthorTrain.copy() self.selectedAuthor['valid'] = {} self.selectedAuthor['valid']['wordFreq'] = xAuthorValid.copy() self.selectedAuthor['test'] = {} self.selectedAuthor['test']['wordFreq'] = xAuthorTest.copy() # Copy WANs self.selectedAuthor['train']['WAN'] = \ thisAuthorData['WAN'][randPermTrain, :, :].copy() self.selectedAuthor['valid']['WAN'] = \ thisAuthorData['WAN'][randPermValid, :, :].copy() self.selectedAuthor['test']['WAN'] = \ thisAuthorData['WAN'][randPermTest, :, :].copy() # Now we need to get an equal amount of works from the rest of the # authors. xRest = np.empty([0, xAuthorTrain.shape[1]]) # Create an empty matrix # to store all the works by the rest of the authors. # Now go author by author gathering all works for key in self.authorData.keys(): # Only for authors that are not the selected author if key is not authorName: thisAuthorTexts = self.authorData[key]['wordFreq'] xRest = np.concatenate((xRest, thisAuthorTexts), axis = 0) # After obtaining all works, xRest is of shape nRestOfData x nWords # We now need to select at random from this other data, but only up # to nExcerpts. Therefore, we will randperm all the indices, but keep # only the first nExcerpts indices. randPerm = np.random.permutation(xRest.shape[0]) randPerm = randPerm[0:nExcerpts] # nExcerpts x nWords # And now we should just get the appropriate number of texts from these # other authors. # Compute how many samples for each case nTrainRest = self.nTrain - nTrainAuthor nValidRest = self.nValid - nValidAuthor nTestRest = self.nTest - nTestAuthor # And obtain those xRestTrain = xRest[randPerm[0:nTrainRest], :] xRestValid = xRest[randPerm[nTrainRest:nTrainRest + nValidRest], :] xRestTest = xRest[randPerm[nTrainRest+nValidRest:nExcerpts], :] # Now construct the signals and labels. Signals is just the # concatenation of each of these excerpts. Labels is just a bunch of # 1s followed by a bunch of 0s # Obs.: The fact that the dataset is ordered now, it doesn't matter, # since it will be shuffled at each epoch. xTrain = np.concatenate((xAuthorTrain, xRestTrain), axis = 0) labelsTrain = np.concatenate((np.ones(nTrainAuthor), np.zeros(nTrainRest)), axis = 0) xValid = np.concatenate((xAuthorValid, xRestValid), axis = 0) labelsValid = np.concatenate((np.ones(nValidAuthor), np.zeros(nValidRest)), axis = 0) xTest = np.concatenate((xAuthorTest, xRestTest), axis = 0) labelsTest = np.concatenate((np.ones(nTestAuthor), np.zeros(nTestRest)), axis = 0) # And assign them to the required attribute samples self.samples['train']['signals'] = xTrain self.samples['train']['targets'] = labelsTrain.astype(np.int) self.samples['valid']['signals'] = xValid self.samples['valid']['targets'] = labelsValid.astype(np.int) self.samples['test']['signals'] = xTest self.samples['test']['targets'] = labelsTest.astype(np.int) # Create graph self.createGraph() # Change data to specified type and device self.astype(self.dataType) self.to(self.device) def loadData(self, dataPath): # Load data (from a .mat file) rawData = hdf5storage.loadmat(dataPath) # rawData is a dictionary with four keys: # 'all_authors': contains the author list # 'all_freqs': contains the word frequency count for each excerpt # 'all_wans': contains the WANS for each excerpt # 'function_words': a list of the functional words # The issue is that hdf5storage, while necessary to load old # Matlab(R) files, gives the data in a weird format, that we need # to adapt and convert. # The data will be structured as follows. We will have an # authorData dictionary of dictionaries: the first key will be the # author name, the second key will be either freqs or wans to # access either one or another. # We will also clean up and save the functional word list, although # we do not need to use it. authorData = {} # Create dictionary for it in range(len(rawData['all_authors'])): thisAuthor = str(rawData['all_authors'][it][0][0][0]) # Each element in rawData['all_authors'] is nested in a couple # of lists, so that's why we need the three indices [0][0][0] # to reach the string with the actual author name. # Get the word frequency thisWordFreq = rawData['all_freqs'][0][it] # 1 x nWords x nData # Again, the [0] is due to the structure of the data # Let us get rid of that extra 1, and then transpose this to be # stored as nData x nWords (since nWords is the dimension of # the number of nodes the network will have; CS notation) thisWordFreq = thisWordFreq.squeeze(0).T # nData x nWords # Finally, get the WANs thisWAN = rawData['all_wans'][0][it] # nWords x nWords x nData thisWAN = thisWAN.transpose(2, 0, 1) # nData x nWords x nWords # Obs.: thisWAN is likely not symmetric, so the way this is # transposed matters. In this case, since thisWAN was intended # to be a tensor in matlab (where the last index is the # collection of matrices), we just throw that last dimension to # the front (since numpy consider the first index as the # collection index). # Now we can create the dictionary and save the corresopnding # data. authorData[thisAuthor] = {} authorData[thisAuthor]['wordFreq'] = thisWordFreq authorData[thisAuthor]['WAN'] = thisWAN # And at last, gather the list of functional words functionWords = [] # empty list to store the functional words for word in rawData['function_words']: functionWords.append(str(word[0][0][0])) # Store all the data recently collected self.authorData = authorData self.allFunctionWords = functionWords self.functionWords = functionWords.copy() def getAuthorData(self, samplesType, dataType, *args): # dataType: train, valid, test # args: 0 args, give back all # args: 1 arg: if int, give that number of samples, chosen at random # args: 1 arg: if list, give those samples precisely. # Check that the type is one of the possible ones assert samplesType == 'train' or samplesType == 'valid' \ or samplesType == 'test' or samplesType == 'all' # Check that the dataType is either wordFreq or WAN assert dataType == 'WAN' or dataType == 'wordFreq' # Check that the number of extra arguments fits assert len(args) <= 1 # If there are no arguments, just return all the desired samples x = self.selectedAuthor[samplesType][dataType] # If there's an argument, we have to check whether it is an int or a # list if len(args) == 1: # If it is an int, just return that number of randomly chosen # samples. if type(args[0]) == int: nSamples = x.shape[0] # total number of samples # We can't return more samples than there are available assert args[0] <= nSamples # Randomly choose args[0] indices selectedIndices = np.random.choice(nSamples, size = args[0], replace = False) # The reshape is to avoid squeezing if only one sample is # requested (because x can have two or three dimension, we # need to take a longer path here, so we will only do it # if args[0] is equal to 1.) if args[0] == 1: newShape = [1] newShape.extend(list(x.shape[1:])) x = x[selectedIndices].reshape(newShape) else: # The fact that we put else here instead of elif type()==list # allows for np.array to be used as indices as well. In general, # any variable with the ability to index. xNew = x[args[0]] # If only one element is selected, avoid squeezing. Given that # the element can be a list (which has property len) or an # np.array (which doesn't have len, but shape), then we can # only avoid squeezing if we check that it has been sequeezed # (or not) if len(xNew.shape) <= len(x.shape): newShape = [1] newShape.extend(list(x.shape[1:])) x = xNew.reshape(newShape) return x def createGraph(self): # Save list of nodes to keep to later update the datasets with the # appropriate words nodesToKeep = [] # Number of nodes (so far) = Number of functional words N = self.selectedAuthor['all']['wordFreq'].shape[1] # Create graph graphOptions = {} graphOptions['adjacencyMatrices'] = self.selectedAuthor['train']['WAN'] graphOptions['nodeList'] = nodesToKeep graphOptions['aggregationType'] = 'sum' graphOptions['normalizationType'] = self.graphNormalizationType graphOptions['isolatedNodes'] = self.keepIsolatedNodes graphOptions['forceUndirected'] = self.forceUndirected graphOptions['forceConnected'] = self.forceConnected W = graph.createGraph('fuseEdges', N, graphOptions) # Obs.: We do not need to recall graphOptions['nodeList'] as nodesToKeep # since these are all passed as pointers that point to the same list, so # modifying graphOptions also modifies nodesToKeep. # Store adjacency matrix self.adjacencyMatrix = W.astype(np.float64) # Update data # For each dataset split for key in self.samples.keys(): # Check the signals have been loaded if self.samples[key]['signals'] is not None: # And check which is the dimension of the nodes (i.e. whether # it was expanded or not, since we always need to keep the # entries of the last dimension) if len(self.samples[key]['signals'].shape) == 2: self.samples[key]['signals'] = \ self.samples[key]['signals'][: , nodesToKeep] elif len(self.samples[key]['signals'].shape) == 2: self.samples[key]['signals'] = \ self.samples[key]['signals'][:,:,nodesToKeep] if self.allFunctionWords is not None: self.functionWords = [self.allFunctionWords[w] for w in nodesToKeep] def getGraph(self): return self.adjacencyMatrix def getFunctionWords(self): return self.functionWords, self.allFunctionWords def astype(self, dataType): # This changes the type for the selected author as well as the samples for key in self.selectedAuthor.keys(): for secondKey in self.selectedAuthor[key].keys(): self.selectedAuthor[key][secondKey] = changeDataType( self.selectedAuthor[key][secondKey], dataType) self.adjacencyMatrix = changeDataType(self.adjacencyMatrix, dataType) # And now, initialize to change the samples as well (and also save the # data type) super().astype(dataType) def to(self, device): # If the dataType is 'torch' if 'torch' in repr(self.dataType): # Change the selected author ('test', 'train', 'valid', 'all'; # 'WANs', 'wordFreq') for key in self.selectedAuthor.keys(): for secondKey in self.selectedAuthor[key].keys(): self.selectedAuthor[key][secondKey] \ = self.selectedAuthor[key][secondKey].to(device) self.adjacencyMatrix.to(device) # And call the inherit method to initialize samples (and save to # device) super().to(device) class MovieLens(_data): """ MovieLens: Loads and handles handles the MovieLens-100k dataset The setting is that of regression on a specific node of the graph. That is, given a graph, and an incomplete graph signal on that graph, we want to estimate the value of the signal on a specific node. If, for instance, we have a movie-based graph, then the graph signal corresponds to the ratings that a given user gave to some of the movies. The objective is to estimate how that particular user would rate one of the other available movies. (Same holds by interchanging 'movie' with 'user' in this paragraph) Initialization: Input: graphType('user' or 'movie'): which underlying graph to build; 'user' for user-based graph (each node is a user), and 'movie' for movie-based (each node is a movie); this also determines the data, on a user-based graph, each data sample (each graph signal) corresponds to a movie, and on the movie-based graph, each data sample corresponds to a user. labelID (list of int or 'all'): these are the specific nodes on which we will be looking to interpolate; this has effect in the building of the training, validation and test sets, since only data samples that have a value at that node can be used ratioTrain (float): ratio of the total samples to be part of the validation set ratioValid (float): ratio of the train samples to be part of the validation set dataDir (string): directory where to download the movie-lens dataset to/ to check if it has already been downloaded keepIsolatedNodes (bool): If False, get rid of isolated nodes forceUndirected (bool): If True, create an undirected graph forceConnected (bool): If True, ensure that the resulting graph is connected kNN (int): sparsify this graph keeping kNN nearest neighbors maxNodes (int, default: None): consider only the maxNodes nodes with largest number of ratings minRatings (int, default: 0): get rid of all columns and rows with less than minRatings ratings (for minRatings = 0, just keep all the matrix) interpolate (bool, default: False): if True, interpolates the matrix by means of a nearest-neighbor rule before creating the graph signals (i.e. all the graph signals will have complete ratings) >> Obs.: Just using these signals to interpolate the remaining rating can be interpreted as a typical baseline. dataType (dtype): type of loaded data (default: np.float64) device (device): where to store the data (e.g., 'cpu', 'cuda:0', etc.) The resulting dataset consists of triads (signal, target, labelID) where: - the signal contains the ratings given by some data sample to all nodes with a 0 for the rating corresponding to the labelID node (note that, if interpolate = False, then there will also be zeros in other nodes that have not been rated) - target is the value of the rating at the corresponding labelID node - labelID is the label of the node whose rating has been removed In other words, we want to use signal to estimate the value target at the node labelID. Methods: .loadData(filename, [dataDir]): loads the data from dataDir (if not provided, the internally stored one is used) and saves it as filename; if the data has already been processed and saved as 'filename', then it will be just loaded. .createGraph(): creates a graphType-based graph with the previously established options (undirected, isolated, connected, etc.); this graph is always sparsified by means of a nearest-neighbor rule. The graph is created containing only data samples in the training set. .interpolateRatings(): uses a nearest-neighbor rule to interpolate the ratings in the graph signal; this means that all zero values that do not correspond to labelID are replaced by the average of ratings of the closest neighbors with nonzero ratings. .getGraph(): fetches the adjacency matrix of the stored graph. .getIncompleteMatrix(): fetches the incomplete matrix as it was loaded from the data. .getMovieTitles(): fetches a dictionary, where each key is the movieID (starting from zero, so that it matches the index of the columns of the incomplete matrix; subtract 1 from the movieID of movieLens to get this movieID) and each value is the title of the movie (in string format). .getLabelID(): the index of the node whose data will be regressed; this might differ from the input labelID in that: its count starts from zero, its count might have been modified after getting rid of nodes in order to build a graph with the desired characteristics .evaluate(yHat, y): computes the RMSE between the estimated ratings yHat and the actual ratings given in y. lossValue = .evaluate(yHat, y) Input: yHat (dtype.array): estimated target y (dtype.array): target representation Output: lossValue (float): regression loss chosen .astype(type): change the type of the data matrix arrays. Input: type (dtype): target type of the variables (e.g. torch.float64, numpy.float64, etc.) .to(device): if dtype is torch.tensor, move them to the specified device. Input: device (string): target device to move the variables to (e.g. 'cpu', 'cuda:0', etc.) """ def __init__(self, graphType, labelID, ratioTrain, ratioValid, dataDir, keepIsolatedNodes, forceUndirected, forceConnected, kNN, maxNodes = None, maxDataPoints = None, minRatings = 0, interpolate = False, dataType = np.float64, device = 'cpu'): super().__init__() # This creates the attributes: dataType, device, nTrain, nTest, nValid, # and samples, and fills them all with None, and also creates the # methods: getSamples, astype, and to. self.dataType = dataType self.device = device # Store attributes # GraphType assert graphType == 'user' or graphType == 'movie' # This is because what are the graph signals depends on the graph we # want to use. self.graphType = graphType # Label ID assert type(labelID) is list or labelID == 'all' # Label ID is the user ID or the movie ID following the MovieLens # nomenclature. This determines how we build the labels in the # dataset. If it's all, then we want to estimate for all users/movies. # Dataset partition self.ratioTrain = ratioTrain self.ratioValid = ratioValid # Dataset directory self.dataDir = dataDir # Where the data is, or where it should be saved # to. # Graph preferences self.keepIsolatedNodes = keepIsolatedNodes self.forceUndirected = forceUndirected self.forceConnected = forceConnected self.kNN = kNN # Reduce the graph to have maxNodes self.maxNodes = maxNodes # Discard samples with less than minRatings ratings self.minRatings = minRatings # Interpolate nonexisting ratings (i.e. get rid of zeros and replace # them by the nearest neighbor rating) self.doInterpolate = interpolate # Empty attributes for now self.incompleteMatrix = None self.movieTitles = {} self.adjacencyMatrix = None self.indexDataPoints = {} # Now, we should be ready to load the data and build the (incomplete) # matrix self.loadData('movielens100kIncompleteMatrix.pkl') # This has loaded the incompleteMatrix and movieTitles attributes. # First check if we might need to get rid of columns and rows to get # the minimum number of ratings requested if self.minRatings > 0: incompleteMatrix = self.incompleteMatrix # Get a one where there are ratings, and a 0 where there are not binaryIncompleteMatrix = (incompleteMatrix>0)\ .astype(incompleteMatrix.dtype) # Count the number of ratings in each row nRatingsPerRow = np.sum(binaryIncompleteMatrix, axis = 1) # Count the number of ratings in each column nRatingsPerCol = np.sum(binaryIncompleteMatrix, axis = 0) # Indices of rows and columns to keep indexRowsToKeep = np.nonzero(nRatingsPerRow > self.minRatings)[0] indexColsToKeep = np.nonzero(nRatingsPerCol > self.minRatings)[0] # Reduce the size of the matrix incompleteMatrix = \ incompleteMatrix[indexRowsToKeep][:, indexColsToKeep] # Store it self.incompleteMatrix = incompleteMatrix # Also, we need to consider that, if we have the movie graph, # then we need to update the movie list as well (all the columns # we lost -the nodes we lost- are part of a movie list that # has a one-to-one correspondence) if self.graphType == 'movie': if len(self.movieTitles) > 0: # Non empty movieList # Where to save the new movie list movieTitles = {} # Because nodes are now numbered sequentially, we need to # do the same with the movieID to keep them matched (i.e. # node n corresponds to movieList[n] title) newMovieID = 0 for movieID in indexColsToKeep: movieTitles[newMovieID] = self.movieTitles[movieID] newMovieID = newMovieID + 1 # Update movieList self.movieTitles = movieTitles else: # If there was no need to reduce the columns or rows indexRowsToKeep = np.arange(self.incompleteMatrix.shape[0]) indexColsToKeep = np.arange(self.incompleteMatrix.shape[1]) # To simplify code, we will work always with each row being a data # sample. The incompleteMatrix is User x Movies if graphType == 'user': # If we want to reduce the number of nodes (i.e. is not None), and # we want less nodes than the ones that actually there if maxNodes is not None and maxNodes<self.incompleteMatrix.shape[0]: # The number of columns in the matrix is the number of nodes, # therefore, each column is a node, and the number of nonzero # elements in each node is the number of ratings for each movie nRatings = np.sum((self.incompleteMatrix > zeroTolerance), axis = 1) # Order the nodes in decreasing order of number of ratings indexRowsToKeep = np.argsort(-nRatings) # Keep only the first nNodes indexRowsToKeep = indexRowsToKeep[0:maxNodes] # And reduce the size of the matrix self.incompleteMatrix = self.incompleteMatrix[indexRowsToKeep,:] # If the graph type is user-based, then the graph signals are the # movies, scored for every user. This means that each column of the # incompleteMatrix is a graph signal, but since we're working with # rows, we have to transpose it workingMatrix = self.incompleteMatrix.T # Movies x User # Which one correspond to the nodes indexNodesToKeep = indexRowsToKeep # Now, each row is a movie score for all users, so that it is a # graph signal in the user-based graph. else: if maxNodes is not None and maxNodes<self.incompleteMatrix.shape[1]: nRatings = np.sum((self.incompleteMatrix > zeroTolerance), axis = 0) indexColsToKeep = np.argsort(-nRatings) indexColsToKeep = indexColsToKeep[0:maxNodes] self.incompleteMatrix = self.incompleteMatrix[:,indexColsToKeep] workingMatrix = self.incompleteMatrix # In this case, each row is a user (how that user scored all movies) # and this is the kind of data samples we need for movie-based # graphs indexNodesToKeep = indexColsToKeep # Determine the number of nodes nNodes = workingMatrix.shape[1] assert len(indexNodesToKeep) == nNodes # And we need to map the original IDs to the new ones (note that # each column is a node now -each row is a graph signal- so we # care about matching the labels to the corresponding new ones) # First check, that, unless we wanted all indices (so we don't # care much about the ones we just dropped), we have them in the # new indices (i.e. we didn't drop them) if labelID != 'all': # For each of the introduced IDs, check: self.labelID = np.empty(0, dtype = np.int) for i in labelID: # Recall that labelID they start with 1, but indexNodesToKeep # starts with zero assert (i-1) in indexNodesToKeep newIndex = np.argwhere(indexNodesToKeep == (i-1))[0] self.labelID = np.concatenate((self.labelID, newIndex)) else: self.labelID = np.arange(nNodes) # Up to this point, we just have an array of IDs of nodes we care about # This could be all, one or a few, but is a numpy.array # So, now we just select a number of rows (graph signals) at random # to make the train and valid and test set. But we need to keep # track of the ID (the node) # The total number of points is now the number of nonzero elements # of the matrix. The problem is that we cannot get a random number # of nonzero elements of the matrix, because we're risking selecting # all rows (graph signals), and thus not leaving anything for the # train and test set. In other words, the rows determine the graph # signals, and all the nonzero elements of each row will make up # for the points in each training set. # Next we reduce the size of the matrix to the ones that we are # interested in selectedMatrix = workingMatrix[:, self.labelID] # So far we've got the value of all graph signals only on the nodes # of interest (some of these might just be zero, if the nodes of # interest weren't rated by that given graph signal) # Get rid of those rows that have no ratings for the labels of # interest # We sum all the rows: since all the ratings are positive, those # rows that are zero is because they have no ratings nonzeroRows = np.sum(selectedMatrix, axis = 1) nonzeroRows = np.nonzero(nonzeroRows)[0] selectedMatrix = selectedMatrix[nonzeroRows,:] # Now, we move on to count the total number of graph signals that # we have (number of rows) nRows = selectedMatrix.shape[0] # Permute the indices at random randPerm = np.random.permutation(nRows) # This gives me a random way of going through all the rows. So we # will do that, going row by row, picking all the nonzero elements # in said row, until we reach the (closest possible) number to the # amount of training samples we want. # The point of this is that each row might have more than one # data point: i.e. some graph signal might have rated more than one # of the nodes of interest; therefore this would amount to having # more than one data point stemming from that graph signal -by # zero-ing out each of the nodes separately- # Total number of available samples (whether to take the 0 or the # 1 element of the set is indistinct, they both have the same len) nDataPoints = len(np.nonzero(selectedMatrix)[0]) # Check if the total number of desired samples has been defined # (a max number of data points could have been set if we want # to randomly select a subset of all available datapoints, for # running a faster training) if maxDataPoints is None: maxDataPoints = nDataPoints # and if it was designed, if it is not greater than the total # number of data points available elif maxDataPoints > nDataPoints: maxDataPoints = nDataPoints self.maxDataPoints = maxDataPoints # Target number of train, valid and test samples nTrain = round(ratioTrain * maxDataPoints) nValid = round(ratioValid * nTrain) nTrain = nTrain - nValid nTest = maxDataPoints - nTrain - nValid # TODO: There has to be a way of accelerating this thing below # Training count nTrainSoFar = 0 rowCounter = 0 # Save variables trainSignals = np.empty([0, nNodes]) trainLabels = np.empty(0) trainIDs = np.empty(0).astype(np.int) while nTrainSoFar < nTrain and rowCounter < nRows: # Get the corresponding selected row thisRow = selectedMatrix[randPerm[rowCounter], :] # Get the indices of the nonzero elements of interest (i.e # of all the nodes of interest, which ones have a nonzero # rating on this graph signal) thisNZcols = np.nonzero(thisRow)[0] # Nonzero Cols # And now we can match this to the corresponding columns in the # original matrix thisIDs = self.labelID[thisNZcols] thisNpoints = len(thisIDs) # Get the labels thisLabels = thisRow[thisNZcols] # Get the signals thisSignals = workingMatrix[nonzeroRows[randPerm[rowCounter]],:] # From this signal (taken from the original working matrix) we # will obtain as many signals as nonzero ratings of the nodes of # interest. Therefore, we need to repeat it to that point thisSignals = np.tile(thisSignals, [thisNpoints, 1]) # thisNpoints x nNodes # We need to zero-out those elements that will be part of # the samples thisSignals[np.arange(thisNpoints), thisIDs] = 0 # And now we should be able to concatenate trainSignals = np.concatenate((trainSignals, thisSignals), axis = 0) trainLabels = np.concatenate((trainLabels, thisLabels)) trainIDs = np.concatenate((trainIDs, thisIDs)) # Add how many new data points we have just got nTrainSoFar += thisNpoints # And increase the counter rowCounter += 1 # We have finalized the training set. Now, we have to count how # many training samples we actually have self.nTrain = len(trainLabels) # We also want to know which rows we have selected so far indexTrainPoints = nonzeroRows[randPerm[0:rowCounter]] nRowsTrain = rowCounter # Now, repeat for validation set: nValidSoFar = 0 rowCounter = nRowsTrain # Initialize where the other one left off # Save variables validSignals = np.empty([0, nNodes]) validLabels = np.empty(0) validIDs = np.empty(0).astype(np.int) while nValidSoFar < nValid and rowCounter < nRows: # Get the corresponding selected row thisRow = selectedMatrix[randPerm[rowCounter], :] # Get the indices of the nonzero elements of interest (i.e # of all the nodes of interest, which ones have a nonzero # rating on this graph signal) thisNZcols = np.nonzero(thisRow)[0] # Nonzero Cols # And now we can match this to the corresponding columns in the # original matrix thisIDs = self.labelID[thisNZcols] thisNpoints = len(thisIDs) # Get the labels thisLabels = thisRow[thisNZcols] # Get the signals thisSignals = workingMatrix[nonzeroRows[randPerm[rowCounter]],:] # From this signal (taken from the original working matrix) we # will obtain as many signals as nonzero ratings of the nodes of # interest. Therefore, we need to repeat it to that point thisSignals = np.tile(thisSignals, [thisNpoints, 1]) # thisNpoints x nNodes # We need to zero-out those elements that will be part of # the samples thisSignals[np.arange(thisNpoints), thisIDs] = 0 # And now we should be able to concatenate validSignals = np.concatenate((validSignals, thisSignals), axis = 0) validLabels = np.concatenate((validLabels, thisLabels)) validIDs = np.concatenate((validIDs, thisIDs)) # Add how many new data points we have just got nValidSoFar += thisNpoints # And increase the counter rowCounter += 1 # We have finalized the validation set. Now, we have to count how # many validation samples we actually have self.nValid = len(validLabels) # We also want to know which rows we have selected so far indexValidPoints = nonzeroRows[randPerm[nRowsTrain:rowCounter]] nRowsValid = rowCounter - nRowsTrain # And, finally the test set nTestSoFar = 0 rowCounter = nRowsTrain + nRowsValid # Save variables testSignals = np.empty([0, nNodes]) testLabels = np.empty(0) testIDs = np.empty(0).astype(np.int) while nTestSoFar < nTest and rowCounter < nRows: # Get the corresponding selected row thisRow = selectedMatrix[randPerm[rowCounter], :] # Get the indices of the nonzero elements of interest (i.e # of all the nodes of interest, which ones have a nonzero # rating on this graph signal) thisNZcols = np.nonzero(thisRow)[0] # Nonzero Cols # And now we can match this to the corresponding columns in the # original matrix thisIDs = self.labelID[thisNZcols] thisNpoints = len(thisIDs) # Get the labels thisLabels = thisRow[thisNZcols] # Get the signals thisSignals = workingMatrix[nonzeroRows[randPerm[rowCounter]],:] # From this signal (taken from the original working matrix) we # will obtain as many signals as nonzero ratings of the nodes of # interest. Therefore, we need to repeat it to that point thisSignals = np.tile(thisSignals, [thisNpoints, 1]) # thisNpoints x nNodes # We need to zero-out those elements that will be part of # the samples thisSignals[np.arange(thisNpoints), thisIDs] = 0 # And now we should be able to concatenate testSignals = np.concatenate((testSignals, thisSignals), axis = 0) testLabels = np.concatenate((testLabels, thisLabels)) testIDs = np.concatenate((testIDs, thisIDs)) # Add how many new data points we have just got nTestSoFar += thisNpoints # And increase the counter rowCounter += 1 # We have finalized the validation set. Now, we have to count how # many validation samples we actually have self.nTest = len(testLabels) # We also want to know which rows we have selected so far indexTestPoints=nonzeroRows[randPerm[nRowsTrain+nRowsValid:rowCounter]] # And we also need all the data points (all the rows), so: indexDataPoints = np.concatenate((indexTrainPoints, indexValidPoints, indexTestPoints)) # Now, this finalizes the data split, now, so we have all we need: # signals, labels, and IDs. # So far, either by selecting a node, or by selecting all nodes, we # have the variables we need: signals, labels, IDs and index points. self.samples['train']['signals'] = trainSignals self.samples['train']['targets'] = trainLabels self.samples['valid']['signals'] = validSignals self.samples['valid']['targets'] = validLabels self.samples['test']['signals'] = testSignals self.samples['test']['targets'] = testLabels self.targetIDs = {} self.targetIDs['train'] = trainIDs self.targetIDs['valid'] = validIDs self.targetIDs['test'] = testIDs # And update the index of the data points (which are the rows selected) self.indexDataPoints['all'] = indexDataPoints self.indexDataPoints['train'] = indexTrainPoints self.indexDataPoints['valid'] = indexValidPoints self.indexDataPoints['test'] = indexTestPoints # Now the data has been loaded, and the training/test partition has been # made, create the graph self.createGraph() # Observe that this graph also adjusts the signals to reflect any change # in the number of nodes # Finally, check if we want to interpolate the useless zeros if self.doInterpolate: self.interpolateRatings() # Change data to specified type and device self.astype(self.dataType) self.to(self.device) def loadData(self, filename, *args): # Here we offer the option of including an additional dir, if not, use # the internally stored one. if len(args) == 1: dataDir = args[0] else: assert self.dataDir is not None dataDir = self.dataDir # Check if the dataDir exists, and if not, create it if not os.path.exists(dataDir): os.makedirs(dataDir) # Create the filename to save/load datasetFilename = os.path.join(dataDir, filename) # Check if the file does exist, load it if os.path.isfile(datasetFilename): # If it exists, load it with open(datasetFilename, 'rb') as datasetFile: datasetDict = pickle.load(datasetFile) # And save the corresponding variable self.incompleteMatrix = datasetDict['incompleteMatrix'] self.movieTitles = datasetDict['movieTitles'] else: # If it doesn't exist, load it # There could be three options here: that we have the raw data # already there, that we have the zip file and need to decompress it, # or that we do not have nothing and we need to download it. existsRawData = \ os.path.isfile(os.path.join(dataDir,'ml-100k','u.data')) \ and os.path.isfile(os.path.join(dataDir,'ml-100k','u.item')) # Actually, we're only interested in the ratings, but we're also # getting the movie list, just in case. Other information that we're # not considering at the moment includes: genres, user demographics existsZipFile = os.path.isfile(os.path.join(dataDir,'ml-100k.zip')) if not existsRawData and not existsZipFile: # We have to download it mlURL='http://files.grouplens.org/datasets/movielens/ml-100k.zip' urllib.request.urlretrieve(mlURL, filename = os.path.join(dataDir,'ml-100k.zip')) existsZipFile = True if not existsRawData and existsZipFile: # Unzip it zipObject = zipfile.ZipFile(os.path.join(dataDir,'ml-100k.zip')) zipObject.extractall(dataDir) zipObject.close() # Now that we have the data, we can get their filenames rawDataFilename = os.path.join(dataDir,'ml-100k','u.data') assert os.path.isfile(rawDataFilename) rawMovieListFilename = os.path.join(dataDir,'ml-100k','u.item') assert os.path.isfile(rawMovieListFilename) # And we can load it and store it. rawMatrix = np.empty([0, 0]) # Start with an empty matrix and then # we slowly add the number of users and movies, which we do not # assume to be known beforehand # Let's start with the data. # Open it. with open(rawDataFilename, 'r') as rawData: # The file consists of a succession of lines, each line # corresponds to a data sample for dataLine in rawData: # For each line, we split it in the different fields dataLineSplit = dataLine.rstrip('\n').split('\t') # Keep the ones we care about here userID = int(dataLineSplit[0]) movieID = int(dataLineSplit[1]) rating = int(dataLineSplit[2]) # Now we have to add this information to the matrix # The matrix is of size Users x Movies (U x M) # We need to check whether we need to add more rows # or more columns if userID > rawMatrix.shape[0]: rowDiff = userID - rawMatrix.shape[0] zeroPadRows = np.zeros([rowDiff, rawMatrix.shape[1]]) rawMatrix = np.concatenate((rawMatrix, zeroPadRows), axis = 0) if movieID > rawMatrix.shape[1]: colDiff = movieID - rawMatrix.shape[1] zeroPadCols = np.zeros([rawMatrix.shape[0], colDiff]) rawMatrix = np.concatenate((rawMatrix, zeroPadCols), axis = 1) # Now that we have assured appropriate dimensions rawMatrix[userID - 1, movieID - 1] = rating # Recall that the count of user and movie ID starts at 1 # for the movielens dataset, but we need to start indexing # at 0 for Python # Now that we have created the matrix, we store it self.incompleteMatrix = rawMatrix # And we move to load the movie names with open(rawMovieListFilename, 'r', encoding = "ISO-8859-1") \ as rawMovieList: # Go line by line (each line corresponds to a movie) for movieLine in rawMovieList: movieLineSplit = movieLine.rstrip('\n').split('|') movieID = int(movieLineSplit[0]) - 1 # Look that, in this case, we're making the movies ID match # the column indexing (so it starts at zero) movieTitle = movieLineSplit[1] self.movieTitles[movieID] = movieTitle # And now that we're done, we save this in a pickle file for # posterity with open(datasetFilename, 'wb') as datasetFile: pickle.dump( {'incompleteMatrix': self.incompleteMatrix, 'movieTitles': self.movieTitles}, datasetFile ) def createGraph(self): # Here we can choose to create the movie or the user graph. # Let's start with the incomplete matrix, and get randomly some of the # elements from it to use as training data to build the graph. # Recall that the datapoints that I have already split following the # user/movie ID selection (or 'all' for all it matters) have to be # taken into account. So, check that this points have been determined assert 'all' in self.indexDataPoints.keys() assert 'train' in self.indexDataPoints.keys() assert 'valid' in self.indexDataPoints.keys() assert 'test' in self.indexDataPoints.keys() assert self.nTrain is not None \ and self.nValid is not None \ and self.nTest is not None # To follow the paper by Huang et al., where the data is given by # Y in U x M, and goes into full detail on how to build the U x U # user-based graph, then, we will stick with this formulation if self.graphType == 'user': workingMatrix = self.incompleteMatrix # User x Movies else: workingMatrix = self.incompleteMatrix.T # Movies x User # Note that this is the opposite arrangement that we considered before # when loading the data into samples; back then, we considered samples # to be rows and the data to build the graph was therefore in columns; # in this case, it is the opposite, since we still want to use the data # located in the rows. # Now, the indices in self.indexDataPoints, essentially determine the # data samples (the graph signals) that we put in each set. Now, these # graph signals are now the columns, because the nodes are the rows. # So, these indexDataPoints are the columns in the new workingMatrix. # In essence, we need to add more points to complete the train set, but # to be sure that (i) these points are not the ones in the valid and # test sets, and (ii) that the training points are included already. # Now, out of all possible graph signals (number of columns in this # workingMatrix), we have selected some of those to be part of the # training set. But each of these graph signals, have a different # number of traning points (because they have a different number of # nonzero elements). And we only care, when building the graph, on the # nonzero elements of the graph signals. # So, let's count the number of training points that we actually have # To do this, we count the number of nonzero elements in the samples # that we have selected trainSamples = self.indexDataPoints['train'] nTrainPointsActual = len(np.nonzero(workingMatrix[:, trainSamples])[0]) # And the total number of points that we have already partitioned into # the different sets validSamples = self.indexDataPoints['valid'] nValidPointsActual = len(np.nonzero(workingMatrix[:, validSamples])[0]) testSamples = self.indexDataPoints['test'] nTestPointsActual = len(np.nonzero(workingMatrix[:, testSamples])[0]) # Total number of points already considered nPointsActual = nTrainPointsActual+nValidPointsActual+nTestPointsActual # The total number of data points in the entire dataset is indexDataPoints = np.nonzero(workingMatrix) # This is a tuple, where the first element is the place of nonzero # indices in the rows, and the second element is the place of nonzero # indices in the columns. nDataPoints = len(indexDataPoints[0]) # or [1], it doesn't matter # Note that every nonzero point belonging to labelID has already been # assigned to either one or the other dataset, so when we split # these datasets, we cannot consider these. # The total number of expected training points is nTrainPointsAll = int(round(self.ratioTrain * nDataPoints)) # Discard the (expected) number of validation points nTrainPointsAll = int(nTrainPointsAll\ -round(self.ratioValid*nTrainPointsAll)) # Now, we only need to add more points if the expected number of # training points is greater than the ones we still have. # If we have more training points than what we originally intended, we # just use those (they will be part of the training samples regardless) # This could happen, for instance, if by chances, the graph signals # picked for training set are the more dense ones, giving a lot of # training points: nTrainPointsAll > nTrainPointsActual # Likewise, if we do not have any more points to take from (because all # the other graph signals have already been taken for validation and # test set), we can proceed to get the remaining needed points: # nPointsActual < nDataPoints if nTrainPointsAll > nTrainPointsActual and nPointsActual < nDataPoints: # So, now, the number of points that we still need to get are nTrainPointsRest = nTrainPointsAll - nTrainPointsActual # Next, we need to determine what is the pool of indices where we # can get the samples from (it cannot be samples that have already # been considered in any of the graph signals) nTotalCols = workingMatrix.shape[1] # Total number of columns # Note that self.indexDataPoints['all'] has all the columns that # have already been selected. So the remaining columns are the ones # that are not there indexRemainingCols = [i for i in range(nTotalCols) \ if i not in self.indexDataPoints['all']] indexRemainingCols = np.array(indexRemainingCols) # So the total number of points left is indexDataPointsRest=np.nonzero(workingMatrix[:,indexRemainingCols]) nDataPointsRest = len(indexDataPointsRest[0]) # Now, check that we have enough points to complete the total # desired. If not, just use all of them if nDataPointsRest < nTrainPointsRest: nTrainPointsRest = nDataPointsRest # Now, we need to select at random from these points, those that # will be part of the training set to build the graph. randPerm = np.random.permutation(nDataPointsRest) # Pick the needed number of subindices subIndexRandomRest = randPerm[0:nTrainPointsRest] # And select the points (both rows and columns) # Remember that columns indexed by indexDataPointsRest, actually # refer to the submatrix of remaining columns, so indexDataPointsRestCols=indexDataPointsRest[1][subIndexRandomRest] indexDataPointsRestCols=indexRemainingCols[indexDataPointsRestCols] indexDataPointsRestRows=indexDataPointsRest[0][subIndexRandomRest] indexTrainPointsRest = (indexDataPointsRestRows, indexDataPointsRestCols) # So, so far, we have all the needed training points: (i) those in # the original training set, and (ii) those in the remaining graph # signals to complete the number of desired training points. # Now, we need to merge these points with the ones already in the # training set of graph signals indexTrainPointsID = np.nonzero( workingMatrix[:, self.indexDataPoints['train']]) # And put them together with the ones we already had indexTrainPoints = ( np.concatenate((indexTrainPointsRest[0],indexTrainPointsID[0])), np.concatenate((indexTrainPointsRest[1], self.indexDataPoints['train'][indexTrainPointsID[1]])) ) else: # If we already had all the points we wanted, which are those that # were already in the training set, we need to get them, so it's # just a renaming indexTrainPoints = np.nonzero( workingMatrix[:, self.indexDataPoints['train']]) # But the columns in this indexTrainPoints, are actually the # columns of the smaller matrix evaluated only on # self.indexDataPoints['train']. So we need to map it into the # full column numbers indexTrainPoints = ( indexTrainPoints[0], self.indexDataPoints['train'][indexTrainPoints[1]] ) # And state that there are no new extra points nTrainPointsRest = 0 # Record the actual number of training points that we are left with nTrainPoints = len(indexTrainPoints[0]) assert nTrainPoints == nTrainPointsRest + nTrainPointsActual # And this is it! We got all the necessary training samples, including # those that we were already using. # Finally, set every other element not in the training set in the # workingMatrix to zero workingMatrixZeroedTrain = workingMatrix.copy() workingMatrixZeroedTrain[indexTrainPoints] = 0. workingMatrix = workingMatrix - workingMatrixZeroedTrain assert len(np.nonzero(workingMatrix)[0]) == nTrainPoints # To check that the total number of nonzero elements of the matrix are # the total number of training samples that we're supposed to have. # Now, we finally have the incompleteMatrix only with the corresponding # elements: a ratioTrain proportion of training samples that, for sure, # include the ones that we will use in the graph signals dataset and, # for sure, exclude those that are in the validation and test sets. # Finally, on to compute the correlation matrix. # The mean required for the (u,v)th element of the correlation matrix is # the sum of the ratings for row u, but only in those columns where # there is also a rating for row v. So we care about the values in row # u, but we need to know which nonzero positions coincide between rows # u and v. In order to do this, we create a template that signals # the position of elements. binaryTemplate = (workingMatrix > 0).astype(workingMatrix.dtype) # Then, when we multiply the matrix with the actual ratings, with the # transpose of this template, we will be summing the values of one # matrix (in rows) but only for the places where there was an element # in the other row (now a column, because it is transposed). This gives # us the sum part of the mean. sumMatrix = workingMatrix.dot(binaryTemplate.T) # To count the number of elements that are shred by both rows u and v, # we simply multiply the binary template. countMatrix = binaryTemplate.dot(binaryTemplate.T) # Note that there might be elements with zero intersection, then we # need to set this to 1 so division by 0 doesn't create a NaN (the # end result will still be zero, since the sumMatrix will have a # zero in those same positions) countMatrix[countMatrix == 0] = 1 # And now we can compute this (u,v) dependent mean avgMatrix = sumMatrix / countMatrix # Note that this matrix is not supposed to be symmetric due to the # use of only the sum of the item u over the set uv, instead of using # the sum over u and over v. More specifically, the definition is # mu_{uv} = 1/|S_{uv}| * \sum_{i \in S_{uv}} Y_{ui} # Since the sum is of elements u, when we compute mu_{vu} we will get # a different sum # Now, to compute the correlation, we need to compute the square sum # matrix \sum_{i \in S_{uv}} Y_{ui}^{2} sqSumMatrix = (workingMatrix ** 2).dot(binaryTemplate.T) # And compute the correlation matrix as # 1/|S_{uv}| \sum_{i \in S_{uv}} Y_{ui}^{2} - \mu_{uv}^{2} # where \mu_{uv} is the mean we computed before correlationMatrix = sqSumMatrix / countMatrix - avgMatrix ** 2 # Finally, normalize the individual user variances and get rid of the # identity matrix # Compute the square root of the diagonal elements sqrtDiagonal = np.sqrt(np.diag(correlationMatrix)) # Find the place where the nonzero elements are nonzeroSqrtDiagonalIndex = (sqrtDiagonal > zeroTolerance)\ .astype(sqrtDiagonal.dtype) # Set the zero elements to 1 sqrtDiagonal[sqrtDiagonal < zeroTolerance] = 1. # Invert safely invSqrtDiagonal = 1/sqrtDiagonal # Get rid of the fake 1 inversions invSqrtDiagonal = invSqrtDiagonal * nonzeroSqrtDiagonalIndex # Make it a matrix again normalizationMatrix = np.diag(invSqrtDiagonal) # And normalize normalizedMatrix = normalizationMatrix.dot( correlationMatrix.dot(normalizationMatrix)) \ - np.eye(correlationMatrix.shape[0]) # There could be isolated nodes, which mean that have 0 in the # diagonal already, so when subtracting the identity they end up with # -1 in the diagonal element. # If this is the case, we just put back a one in those nodes. But the # real problem, comes if the labelID is within those isolated nodes. # If that's the case, then we just stop the processing, there's # nothing else to do. diagNormalizedMatrix = np.diag(np.diag(normalizedMatrix)) isolatedNodes = np.nonzero(np.abs(diagNormalizedMatrix + 1) \ < zeroTolerance) normalizedMatrix[isolatedNodes] = 0. # Get rid of the "quasi-zeros" that could have arrived through # division. normalizedMatrix[np.abs(normalizedMatrix) < zeroTolerance] = 0. # Finally, create the graph # Number of nodes so far N = normalizedMatrix.shape[0] # Add the necessary extra dimension (because it is 'fuseEdges' so it # expects a set of matrices, instead of just one) normalizedMatrix = normalizedMatrix.reshape([1, N, N]) # Use 'fuseEdges' to handle several desirable properties that could # be enforced on the graph nodesToKeep = [] # List of nodes to keep after some of them might have # been removed to satisfy the constraints extraComponents= [] # List where we save the rest of the isolated # components, if there where W = graph.createGraph('fuseEdges', N, {'adjacencyMatrices': normalizedMatrix, 'aggregationType': 'sum', 'normalizationType': 'no', 'isolatedNodes': self.keepIsolatedNodes, 'forceUndirected': self.forceUndirected, 'forceConnected': self.forceConnected, 'nodeList': nodesToKeep, 'extraComponents': extraComponents}) # So far, the matrix output is the adjacency matrix of the largest # connected component, and nodesToKeep refer to those nodes. # At this point, it can happen that some (or all) of the selected nodes # are not in the graph. If none of the selected nodes is there, we # should stop (we have no useful problem anymore) IDnodesKept = 0 # How many of the selected ID nodes are we keeping for i in self.labelID: if i in nodesToKeep: IDnodesKept += 1 assert IDnodesKept > 0 # Update samples and labelID, if necessary if len(nodesToKeep) < N: # Update the node IDs # Get signals, IDs and labels trainSignals = self.samples['train']['signals'] trainIDs = self.targetIDs['train'] trainLabels = self.samples['train']['targets'] validSignals = self.samples['valid']['signals'] validIDs = self.targetIDs['valid'] validLabels = self.samples['valid']['targets'] testSignals = self.samples['test']['signals'] testIDs = self.targetIDs['test'] testLabels = self.samples['test']['targets'] # Update the ID # Train set trainIDsToKeep = [] # which samples from the train set we need to # keep (note that if some of the nodes that were labeled in the # trainIDs have been vanished, then we need to get rid of those # training samples) newTrainIDs = [] # Then, we need to match the old node numbering # (with all the nodes), to those of the new numbering for i in range(len(trainIDs)): # If the train ID of the sample is in nodes to keep if trainIDs[i] in nodesToKeep: # We need to add it to the list of nodes to keep (what # position in the training samples are, because those that # are not there also have to be discarded from the rating) trainIDsToKeep.append(i) # And we have to update the ID to the new one (considering # that not all nodes have been kept) newTrainIDs.append(nodesToKeep.index(trainIDs[i])) trainIDsToKeep = np.array(trainIDsToKeep) # Convert to numpy newTrainIDs = np.array(newTrainIDs) # Conver to numpy # Valid Set validIDsToKeep = [] newValidIDs = [] for i in range(len(validIDs)): if validIDs[i] in nodesToKeep: validIDsToKeep.append(i) newValidIDs.append(nodesToKeep.index(validIDs[i])) validIDsToKeep = np.array(validIDsToKeep) # Convert to numpy newValidIDs = np.array(newValidIDs) # Conver to numpy # Test Set testIDsToKeep = [] newTestIDs = [] for i in range(len(testIDs)): if testIDs[i] in nodesToKeep: testIDsToKeep.append(i) newTestIDs.append(nodesToKeep.index(testIDs[i])) testIDsToKeep = np.array(testIDsToKeep) # Convert to numpy newTestIDs = np.array(newTestIDs) # Conver to numpy # And, finally, we update the signals trainSignals = trainSignals[trainIDsToKeep][:, nodesToKeep] validSignals = validSignals[validIDsToKeep][:, nodesToKeep] testSignals = testSignals[testIDsToKeep][:, nodesToKeep] # and the IDs trainIDs = newTrainIDs validIDs = newValidIDs testIDs = newTestIDs # Also update the labels (some of the samples are gone) trainLabels = trainLabels[trainIDsToKeep] validLabels = validLabels[validIDsToKeep] testLabels = testLabels[testIDsToKeep] # and store them where they belong self.nTrain = trainSignals.shape[0] self.nValid = validSignals.shape[0] self.nTest = testSignals.shape[0] self.samples['train']['signals'] = trainSignals self.samples['train']['targets'] = trainLabels self.targetIDs['train'] = trainIDs self.samples['valid']['signals'] = validSignals self.samples['valid']['targets'] = validLabels self.targetIDs['valid'] = validIDs self.samples['test']['signals'] = testSignals self.samples['test']['targets'] = testLabels self.targetIDs['test'] = testIDs # If the graph type is 'movies', then any removed node has a # repercusion in the movie list, and therefore, we need to update # that as well if self.graphType == 'movie': if len(self.movieTitles) > 0: # Non empty movieList # Where to save the new movie list movieTitles = {} # Because nodes are now numbered sequentially, we need to # do the same with the movieID to keep them matched (i.e. # node n corresponds to movieList[n] title) newMovieID = 0 for movieID in nodesToKeep: movieTitles[newMovieID] = self.movieTitles[movieID] newMovieID = newMovieID + 1 # Update movieList self.movieTitles = movieTitles # And finally, sparsify it (nearest neighbors) self.adjacencyMatrix = graph.sparsifyGraph(W, 'NN', self.kNN) def interpolateRatings(self): # For the nonzero nodes, we will average the value of the closest # nonzero elements. # So we need to find the neighborhood, iteratively, until we find a # nodes in the neighborhood that have nonzero elements. And then # average those. # There are three sets of signals, so for each one of them for key in self.samples.keys(): # Get the signals thisSignal = self.samples[key]['signals'] # B (x F) x N if len(thisSignal.shape) == 3: # If B x 1 x N assert thisSignal.shape[1] == 1 thisSignal = thisSignal.squeeze(1) # B x N # Look for the elements in the signal that are zero zeroLocations = np.nonzero(np.abs(thisSignal) < zeroTolerance) # This is a tuple with two elements, each element is a 1-D np.array # with the rows and column indices of the zero elements, respectiv # The columns are the nodes, so we should iterate by nodes. The # problem is that I do not want to go ahead finding the neighborhood # each time, and I do not want to go ahead element by element, I # want to go node by node, so that I have to do at most N searches # Not a good idea. Let's do it by neighborhood, since I can get # all neighbors at once # If there are zero locations that need to be interpolated K = 1 while len(zeroLocations[0]) > 0: # Location of nodes with zero value zeroNodes = np.unique(zeroLocations[1]) # If we want to make this faster, we only want the neighborhoods # of the nodes that don't have a value yet. # The problem is that the computeNeighborhood function only # works on the first N values, so we need to reorder the matrix # so that the first elements are the nodes we actually want # To do this, we need to add the rest of the nodes to the list # of zeroNodes and then reorder the matrix # Full nodes fullNodes = [n for n in range(thisSignal.shape[1]) if n not in zeroNodes] fullNodes = np.array(fullNodes, dtype=np.int) # Complete list of nodes (concatenate them) allNodes = np.concatenate((zeroNodes, fullNodes)) # Reorder the matrix A = self.adjacencyMatrix[allNodes, :][:, allNodes] # Get the neighborhood nbList = graph.computeNeighborhood(A, K, N = len(zeroNodes)) # This is a list of lists. Each node has associated a list of # neighboring nodes. # But the index of this neighboring nodes is not correct, # because it belongs to the allNodes ordering and not the # original one. # # Go for each node, and pick up the neighboring values # (It is more likely that we will have more samples than # nodes, so it should be faster to iterate through nodes) # For each element in the neighborhood list for i in range(len(nbList)): # Get the actual node thisNode = zeroNodes[i] # Get the neighborhood (and map it to the corresponding # nodes in the original ordering) thisNB = [allNodes[n] for n in nbList[i]] # Now, get the values at the neighborhood (which is now # in the original ordering) nbValues = thisSignal[:,thisNB] # This gives all the neighboring values of each batch # Average the nonzero elements # Sum of the elements sumSignal = np.sum(nbValues, axis = 1) # Count of nonzero elements countNZ = np.count_nonzero(nbValues, axis = 1) # Get rid of the zero elements for division countNZ[countNZ == 0] = 1. # Compute the average and round to an integer meanSignal = np.round(sumSignal / countNZ) # And now we need to place this newly computed mean # signal back in the nonzero elements zeroBatches = zeroLocations[0][zeroLocations[1] == thisNode] # Add it to the signal thisSignal[zeroBatches, thisNode] = meanSignal[zeroBatches] # Now that we have finished all nodes for the K-hop neighbors # we need to update the zero elements zeroLocations = np.nonzero(np.abs(thisSignal) < zeroTolerance) # and add a new neighborhood K += 1 # And put it back where it goes self.samples[key]['signals'] = thisSignal def getIncompleteMatrix(self): return self.incompleteMatrix def getGraph(self): return self.adjacencyMatrix def getMovieTitles(self): return self.movieTitles def getLabelID(self, *args): # So, here are the options # No arguments: return the list of self.labelID # One argument: it has to be samplesType and then return all labelIDs # for that sample type # Two arguments, can either be list or int, and return at random, like # the getSamples() method if len(args) == 0: returnID = self.labelID else: # The first argument has to be the sample type samplesType = args[0] # Check that is one of the possibilities assert samplesType == 'train' or samplesType == 'valid' \ or samplesType == 'test' returnID = self.targetIDs[samplesType] if len(args) == 2: # If it is an int, just return that number of randomly chosen # IDs if type(args[1]) is int: # Total number of samples nSamples = returnID.shape # Check that we are asked to return a number of samples that # we actually have assert args[1] <= nSamples # Randomly choose args[1] indices selectedIndices = np.random.choice(nSamples, size = args[1], replace = False) # Select the corresponding IDs returnID = returnID[selectedIndices] else: # This has to be a list () or an np.array which can serve # as indexing functions returnID = returnID[args[1]] return returnID def evaluate(self, yHat, y): # y and yHat should be of the same dimension, where dimension 0 is the # number of samples N = y.shape[0] # number of samples assert yHat.shape[0] == N # And now, get rid of any extra '1' dimension that might appear # involuntarily from some vectorization issues. y = y.squeeze() yHat = yHat.squeeze() # Yet, if there was only one sample, then the sample dimension was # also get rid of during the squeeze, so we need to add it back if N == 1: y = y.unsqueeze(0) yHat = yHat.unsqueeze(0) # Now, we compute the RMS if 'torch' in repr(self.dataType): mse = torch.nn.functional.mse_loss(yHat, y) rmse = torch.sqrt(mse) else: mse = np.mean((yHat - y) ** 2) rmse = np.sqrt(mse) return rmse def astype(self, dataType): # This changes the type for the incomplete and adjacency matrix. self.incompleteMatrix = changeDataType(self.incompleteMatrix, dataType) self.adjacencyMatrix = changeDataType(self.adjacencyMatrix, dataType) # And now, initialize to change the samples as well (and also save the # data type) super().astype(dataType) def to(self, device): # If the dataType is 'torch' if 'torch' in repr(self.dataType): # Change the stored attributes that are not handled by the inherited # method to(). self.incompleteMatrix.to(device) self.adjacencyMatrix.to(device) # And call the inherit method to initialize samples (and save to # device) super().to(device) class Flocking(_data): """ Flocking: Creates synthetic trajectories for the problem of coordinating a team of robots to fly together while avoiding collisions. See the following paper for details E. Tolstaya, F. Gama, J. Paulos, G. Pappas, V. Kumar, and A. Ribeiro, "Learning Decentralized Controllers for Robot Swarms with Graph Neural Networks," in Conf. Robot Learning 2019. Osaka, Japan: Int. Found. Robotics Res., 30 Oct.-1 Nov. 2019. Initialization: Input: nAgents (int): Number of agents commRadius (float): communication radius (in meters) repelDist (float): minimum target separation of agents (in meters) nTrain (int): number of training trajectories nValid (int): number of validation trajectories nTest (int): number of testing trajectories duration (float): duration of each trajectory (in seconds) samplingTime (float): time between consecutive time instants (in sec) initGeometry ('circular', 'rectangular'): initial positioning geometry (default: 'circular') initVelValue (float): maximum initial velocity (in meters/seconds, default: 3.) initMinDist (float): minimum initial distance between agents (in meters, default: 0.1) accelMax (float): maximum possible acceleration (in meters/seconds^2, default: 10.) normalizeGraph (bool): if True normalizes the communication graph adjacency matrix by the maximum eigenvalue (default: True) doPrint (bool): If True prints messages (default: True) dataType (dtype): datatype for the samples created (default: np.float64) device (device): if torch.Tensor datatype is selected, this is on what device the data is saved (default: 'cpu') Methods: signals, targets = .getSamples(samplesType[, optionalArguments]) Input: samplesType (string): 'train', 'valid' or 'test' to determine from which dataset to get the samples from optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get Output: signals (dtype.array): numberSamples x 6 x numberNodes targets (dtype.array): numberSamples x 2 x numberNodes 'signals' are the state variables as described in the corresponding paper; 'targets' is the 2-D acceleration for each node cost = .evaluate(vel = None, accel = None, initVel = None, samplingTime = None) Input: vel (array): velocities; nSamples x tSamples x 2 x nAgents accel (array): accelerations; nSamples x tSamples x 2 x nAgents initVel (array): initial velocities; nSamples x 2 x nAgents samplingTime (float): sampling time >> Obs.: Either vel or (accel and initVel) have to be specified for the cost to be computed, if all of them are specified, only vel is used Output: cost (float): flocking cost as specified in eq. (13) .astype(dataType): change the type of the data matrix arrays. Input: dataType (dtype): target type of the variables (e.g. torch.float64, numpy.float64, etc.) .to(device): if dtype is torch.tensor, move them to the specified device. Input: device (string): target device to move the variables to (e.g. 'cpu', 'cuda:0', etc.) state = .computeStates(pos, vel, graphMatrix, ['doPrint']) Input: pos (array): positions; nSamples x tSamples x 2 x nAgents vel (array): velocities; nSamples x tSamples x 2 x nAgents graphMatrix (array): matrix description of communication graph; nSamples x tSamples x nAgents x nAgents 'doPrint' (bool): optional argument to print outputs; if not used uses the same status set for the entire class in the initialization Output: state (array): states; nSamples x tSamples x 6 x nAgents graphMatrix = .computeCommunicationGraph(pos, commRadius, normalizeGraph, ['kernelType' = 'gaussian', 'weighted' = False, 'doPrint']) Input: pos (array): positions; nSamples x tSamples x 2 x nAgents commRadius (float): communication radius (in meters) normalizeGraph (bool): if True normalize adjacency matrix by largest eigenvalue 'kernelType' ('gaussian'): kernel to apply to the distance in order to compute the weights of the adjacency matrix, default is the 'gaussian' kernel; other kernels have to be coded, and also the parameters of the kernel have to be included as well, in the case of the gaussian kernel, 'kernelScale' determines the scale (default: 1.) 'weighted' (bool): if True the graph is weighted according to the kernel type; if False, it's just a binary adjacency matrix 'doPrint' (bool): optional argument to print outputs; if not used uses the same status set for the entire class in the initialization Output: graphMatrix (array): adjacency matrix of the communication graph; nSamples x tSamples x nAgents x nAgents thisData = .getData(name, samplesType[, optionalArguments]) Input: name (string): variable name to get (for example, 'pos', 'vel', etc.) samplesType ('train', 'test' or 'valid') optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get Output: thisData (array): specific type of data requested pos, vel[, accel, state, graph] = computeTrajectory(initPos, initVel, duration[, 'archit', 'accel', 'doPrint']) Input: initPos (array): initial positions; nSamples x 2 x nAgents initVel (array): initial velocities; nSamples x 2 x nAgents duration (float): duration of trajectory (in seconds) Optional arguments: (either 'accel' or 'archit' have to be there) 'archit' (nn.Module): torch architecture that computes the output from the states 'accel' (array): accelerations; nSamples x tSamples x 2 x nAgents 'doPrint' (bool): optional argument to print outputs; if not used uses the same status set for the entire class in the initialization Output: pos (array): positions; nSamples x tSamples x 2 x nAgents vel (array): velocities; nSamples x tSamples x 2 x nAgents Optional outputs (only if 'archit' was used) accel (array): accelerations; nSamples x tSamples x 2 x nAgents state (array): state; nSamples x tSamples x 6 x nAgents graph (array): adjacency matrix of communication graph; nSamples x tSamples x nAgents x nAgents uDiff, uDistSq = .computeDifferences (u): Input: u (array): nSamples (x tSamples) x 2 x nAgents Output: uDiff (array): pairwise differences between the agent entries of u; nSamples (x tSamples) x 2 x nAgents x nAgents uDistSq (array): squared distances between agent entries of u; nSamples (x tSamples) x nAgents x nAgents pos, vel, accel = .computeOptimalTrajectory(initPos, initVel, duration, samplingTime, repelDist, accelMax = 100.) Input: initPos (array): initial positions; nSamples x 2 x nAgents initVel (array): initial velocities; nSamples x 2 x nAgents duration (float): duration of trajectory (in seconds) samplingTime (float): time elapsed between consecutive time instants (in seconds) repelDist (float): minimum desired distance between agents (in m) accelMax (float, default = 100.): maximum possible acceleration Output: pos (array): positions; nSamples x tSamples x 2 x nAgents vel (array): velocities; nSamples x tSamples x 2 x nAgents accel (array): accelerations; nSamples x tSamples x 2 x nAgents initPos, initVel = .computeInitialPositions(nAgents, nSamples, commRadius, minDist = 0.1, geometry = 'rectangular', xMaxInitVel = 3., yMaxInitVel = 3.) Input: nAgents (int): number of agents nSamples (int): number of sample trajectories commRadius (float): communication radius (in meters) minDist (float): minimum initial distance between agents (in m) geometry ('rectangular', 'circular'): initial geometry xMaxInitVel (float): maximum velocity in the x-axis yMaxInitVel (float): maximum velocity in the y-axis Output: initPos (array): initial positions; nSamples x 2 x nAgents initVel (array): initial velocities; nSamples x 2 x nAgents .saveVideo(saveDir, pos, [, optionalArguments], commGraph = None, [optionalKeyArguments]) Input: saveDir (os.path, string): directory where to save the trajectory videos pos (array): positions; nSamples x tSamples x 2 x nAgents optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get commGraph (array): adjacency matrix of communication graph; nSamples x tSamples x nAgents x nAgents if not None, then this array is used to produce snapshots of the video that include the communication graph at that time instant 'doPrint' (bool): optional argument to print outputs; if not used uses the same status set for the entire class in the initialization 'videoSpeed' (float): how faster or slower the video is reproduced (default: 1.) 'showVideoSpeed' (bool): if True shows the legend with the video speed in the video; by default it will show it whenever the video speed is different from 1. 'vel' (array): velocities; nSamples x tSamples x 2 x nAgents 'showCost' (bool): if True and velocities are set, the snapshots will show the instantaneous cost (default: True) 'showArrows' (bool): if True and velocities are set, the snapshots will show the arrows of the velocities (default: True) """ def __init__(self, nAgents, commRadius, repelDist, nTrain, nValid, nTest, duration, samplingTime, initGeometry = 'circular',initVelValue = 3.,initMinDist = 0.1, accelMax = 10., normalizeGraph = True, doPrint = True, dataType = np.float64, device = 'cpu'): # Initialize parent class super().__init__() # Save the relevant input information # Number of nodes self.nAgents = nAgents self.commRadius = commRadius self.repelDist = repelDist # Number of samples self.nTrain = nTrain self.nValid = nValid self.nTest = nTest nSamples = nTrain + nValid + nTest # Geometry self.mapWidth = None self.mapHeight = None # Agents self.initGeometry = initGeometry self.initVelValue = initVelValue self.initMinDist = initMinDist self.accelMax = accelMax # Duration of the trajectory self.duration = float(duration) self.samplingTime = samplingTime # Data self.normalizeGraph = normalizeGraph self.dataType = dataType self.device = device # Options self.doPrint = doPrint # Places to store the data self.initPos = None self.initVel = None self.pos = None self.vel = None self.accel = None self.commGraph = None self.state = None if self.doPrint: print("\tComputing initial conditions...", end = ' ', flush = True) # Compute the initial positions initPosAll, initVelAll = self.computeInitialPositions( self.nAgents, nSamples, self.commRadius, minDist = self.initMinDist, geometry = self.initGeometry, xMaxInitVel = self.initVelValue, yMaxInitVel = self.initVelValue ) # Once we have all positions and velocities, we will need to split # them in the corresponding datasets (train, valid and test) self.initPos = {} self.initVel = {} if self.doPrint: print("OK", flush = True) # Erase the label first, then print it print("\tComputing the optimal trajectories...", end=' ', flush=True) # Compute the optimal trajectory posAll, velAll, accelAll = self.computeOptimalTrajectory( initPosAll, initVelAll, self.duration, self.samplingTime, self.repelDist, accelMax = self.accelMax) self.pos = {} self.vel = {} self.accel = {} if self.doPrint: print("OK", flush = True) # Erase the label first, then print it print("\tComputing the communication graphs...", end=' ', flush=True) # Compute communication graph commGraphAll = self.computeCommunicationGraph(posAll, self.commRadius, self.normalizeGraph) self.commGraph = {} if self.doPrint: print("OK", flush = True) # Erase the label first, then print it print("\tComputing the agent states...", end = ' ', flush = True) # Compute the states stateAll = self.computeStates(posAll, velAll, commGraphAll) self.state = {} if self.doPrint: # Erase the label print("OK", flush = True) # Separate the states into training, validation and testing samples # and save them # Training set self.samples['train']['signals'] = stateAll[0:self.nTrain].copy() self.samples['train']['targets'] = accelAll[0:self.nTrain].copy() self.initPos['train'] = initPosAll[0:self.nTrain] self.initVel['train'] = initVelAll[0:self.nTrain] self.pos['train'] = posAll[0:self.nTrain] self.vel['train'] = velAll[0:self.nTrain] self.accel['train'] = accelAll[0:self.nTrain] self.commGraph['train'] = commGraphAll[0:self.nTrain] self.state['train'] = stateAll[0:self.nTrain] # Validation set startSample = self.nTrain endSample = self.nTrain + self.nValid self.samples['valid']['signals']=stateAll[startSample:endSample].copy() self.samples['valid']['targets']=accelAll[startSample:endSample].copy() self.initPos['valid'] = initPosAll[startSample:endSample] self.initVel['valid'] = initVelAll[startSample:endSample] self.pos['valid'] = posAll[startSample:endSample] self.vel['valid'] = velAll[startSample:endSample] self.accel['valid'] = accelAll[startSample:endSample] self.commGraph['valid'] = commGraphAll[startSample:endSample] self.state['valid'] = stateAll[startSample:endSample] # Testing set startSample = self.nTrain + self.nValid endSample = self.nTrain + self.nValid + self.nTest self.samples['test']['signals']=stateAll[startSample:endSample].copy() self.samples['test']['targets']=accelAll[startSample:endSample].copy() self.initPos['test'] = initPosAll[startSample:endSample] self.initVel['test'] = initVelAll[startSample:endSample] self.pos['test'] = posAll[startSample:endSample] self.vel['test'] = velAll[startSample:endSample] self.accel['test'] = accelAll[startSample:endSample] self.commGraph['test'] = commGraphAll[startSample:endSample] self.state['test'] = stateAll[startSample:endSample] # Change data to specified type and device self.astype(self.dataType) self.to(self.device) def astype(self, dataType): # Change all other signals to the correct place datasetType = ['train', 'valid', 'test'] for key in datasetType: self.initPos[key] = changeDataType(self.initPos[key], dataType) self.initVel[key] = changeDataType(self.initVel[key], dataType) self.pos[key] = changeDataType(self.pos[key], dataType) self.vel[key] = changeDataType(self.vel[key], dataType) self.accel[key] = changeDataType(self.accel[key], dataType) self.commGraph[key] = changeDataType(self.commGraph[key], dataType) self.state[key] = changeDataType(self.state[key], dataType) # And call the parent super().astype(dataType) def to(self, device): # Check the data is actually torch if 'torch' in repr(self.dataType): datasetType = ['train', 'valid', 'test'] # Move the data for key in datasetType: self.initPos[key].to(device) self.initVel[key].to(device) self.pos[key].to(device) self.vel[key].to(device) self.accel[key].to(device) self.commGraph[key].to(device) self.state[key].to(device) super().to(device) def expandDims(self): # Just avoid the 'expandDims' method in the parent class pass def computeStates(self, pos, vel, graphMatrix, **kwargs): # We get the following inputs. # positions: nSamples x tSamples x 2 x nAgents # velocities: nSamples x tSamples x 2 x nAgents # graphMatrix: nSaples x tSamples x nAgents x nAgents # And we want to build the state, which is a vector of dimension 6 on # each node, that is, the output shape is # nSamples x tSamples x 6 x nAgents # The print for this one can be settled independently, if not, use the # default of the data object if 'doPrint' in kwargs.keys(): doPrint = kwargs['doPrint'] else: doPrint = self.doPrint # Check correct dimensions assert len(pos.shape) == len(vel.shape) == len(graphMatrix.shape) == 4 nSamples = pos.shape[0] tSamples = pos.shape[1] assert pos.shape[2] == 2 nAgents = pos.shape[3] assert vel.shape[0] == graphMatrix.shape[0] == nSamples assert vel.shape[1] == graphMatrix.shape[1] == tSamples assert vel.shape[2] == 2 assert vel.shape[3] == graphMatrix.shape[2] == graphMatrix.shape[3] \ == nAgents # If we have a lot of batches and a particularly long sequence, this # is bound to fail, memory-wise, so let's do it time instant by time # instant if we have a large number of time instants, and split the # batches maxTimeSamples = 200 # Set the maximum number of t.Samples before # which to start doing this time by time. maxBatchSize = 100 # Maximum number of samples to process at a given # time # Compute the number of samples, and split the indices accordingly if nSamples < maxBatchSize: nBatches = 1 batchSize = [nSamples] elif nSamples % maxBatchSize != 0: # If we know it's not divisible, then we do floor division and # add one more batch nBatches = nSamples // maxBatchSize + 1 batchSize = [maxBatchSize] * nBatches # But the last batch is actually smaller, so just add the # remaining ones batchSize[-1] = nSamples - sum(batchSize[0:-1]) # If they fit evenly, then just do so. else: nBatches = int(nSamples/maxBatchSize) batchSize = [maxBatchSize] * nBatches # batchIndex is used to determine the first and last element of each # batch. We need to add the 0 because it's the first index. batchIndex = np.cumsum(batchSize).tolist() batchIndex = [0] + batchIndex # Create the output state variable state = np.zeros((nSamples, tSamples, 6, nAgents)) for b in range(nBatches): # Pick the batch elements posBatch = pos[batchIndex[b]:batchIndex[b+1]] velBatch = vel[batchIndex[b]:batchIndex[b+1]] graphMatrixBatch = graphMatrix[batchIndex[b]:batchIndex[b+1]] if tSamples > maxTimeSamples: # For each time instant for t in range(tSamples): # Now, we need to compute the differences, in velocities and in # positions, for each agent, for each time instant posDiff, posDistSq = \ self.computeDifferences(posBatch[:,t,:,:]) # posDiff: batchSize[b] x 2 x nAgents x nAgents # posDistSq: batchSize[b] x nAgents x nAgents velDiff, _ = self.computeDifferences(velBatch[:,t,:,:]) # velDiff: batchSize[b] x 2 x nAgents x nAgents # Next, we need to get ride of all those places where there are # no neighborhoods. That is given by the nonzero elements of the # graph matrix. graphMatrixTime = (np.abs(graphMatrixBatch[:,t,:,:])\ >zeroTolerance)\ .astype(pos.dtype) # graphMatrix: batchSize[b] x nAgents x nAgents # We also need to invert the squares of the distances posDistSqInv = invertTensorEW(posDistSq) # posDistSqInv: batchSize[b] x nAgents x nAgents # Now we add the extra dimensions so that all the # multiplications are adequate graphMatrixTime = np.expand_dims(graphMatrixTime, 1) # graphMatrix: batchSize[b] x 1 x nAgents x nAgents # Then, we can get rid of non-neighbors posDiff = posDiff * graphMatrixTime posDistSqInv = np.expand_dims(posDistSqInv,1)\ * graphMatrixTime velDiff = velDiff * graphMatrixTime # Finally, we can compute the states stateVel = np.sum(velDiff, axis = 3) # stateVel: batchSize[b] x 2 x nAgents statePosFourth = np.sum(posDiff * (posDistSqInv ** 2), axis = 3) # statePosFourth: batchSize[b] x 2 x nAgents statePosSq = np.sum(posDiff * posDistSqInv, axis = 3) # statePosSq: batchSize[b] x 2 x nAgents # Concatentate the states and return the result state[batchIndex[b]:batchIndex[b+1],t,:,:] = \ np.concatenate((stateVel, statePosFourth, statePosSq), axis = 1) # batchSize[b] x 6 x nAgents if doPrint: # Sample percentage count percentageCount = int(100*(t+1+b*tSamples)\ /(nBatches*tSamples)) if t == 0 and b == 0: # It's the first one, so just print it print("%3d%%" % percentageCount, end = '', flush = True) else: # Erase the previous characters print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) else: # Now, we need to compute the differences, in velocities and in # positions, for each agent, for each time instante posDiff, posDistSq = self.computeDifferences(posBatch) # posDiff: batchSize[b] x tSamples x 2 x nAgents x nAgents # posDistSq: batchSize[b] x tSamples x nAgents x nAgents velDiff, _ = self.computeDifferences(velBatch) # velDiff: batchSize[b] x tSamples x 2 x nAgents x nAgents # Next, we need to get ride of all those places where there are # no neighborhoods. That is given by the nonzero elements of the # graph matrix. graphMatrixBatch = (np.abs(graphMatrixBatch) > zeroTolerance)\ .astype(pos.dtype) # graphMatrix: batchSize[b] x tSamples x nAgents x nAgents # We also need to invert the squares of the distances posDistSqInv = invertTensorEW(posDistSq) # posDistSqInv: batchSize[b] x tSamples x nAgents x nAgents # Now we add the extra dimensions so that all the multiplications # are adequate graphMatrixBatch = np.expand_dims(graphMatrixBatch, 2) # graphMatrix:batchSize[b] x tSamples x 1 x nAgents x nAgents # Then, we can get rid of non-neighbors posDiff = posDiff * graphMatrixBatch posDistSqInv = np.expand_dims(posDistSqInv, 2)\ * graphMatrixBatch velDiff = velDiff * graphMatrixBatch # Finally, we can compute the states stateVel = np.sum(velDiff, axis = 4) # stateVel: batchSize[b] x tSamples x 2 x nAgents statePosFourth = np.sum(posDiff * (posDistSqInv ** 2), axis = 4) # statePosFourth: batchSize[b] x tSamples x 2 x nAgents statePosSq = np.sum(posDiff * posDistSqInv, axis = 4) # statePosSq: batchSize[b] x tSamples x 2 x nAgents # Concatentate the states and return the result state[batchIndex[b]:batchIndex[b+1]] = \ np.concatenate((stateVel, statePosFourth, statePosSq), axis = 2) # state: batchSize[b] x tSamples x 6 x nAgents if doPrint: # Sample percentage count percentageCount = int(100*(b+1)/nBatches) if b == 0: # It's the first one, so just print it print("%3d%%" % percentageCount, end = '', flush = True) else: # Erase the previous characters print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) # Print if doPrint: # Erase the percentage print('\b \b' * 4, end = '', flush = True) return state def computeCommunicationGraph(self, pos, commRadius, normalizeGraph, **kwargs): # Take in the position and the communication radius, and return the # trajectory of communication graphs # Input will be of shape # nSamples x tSamples x 2 x nAgents # Output will be of shape # nSamples x tSamples x nAgents x nAgents assert commRadius > 0 assert len(pos.shape) == 4 nSamples = pos.shape[0] tSamples = pos.shape[1] assert pos.shape[2] == 2 nAgents = pos.shape[3] # Graph type options # Kernel type (only Gaussian implemented so far) if 'kernelType' in kwargs.keys(): kernelType = kwargs['kernelType'] else: kernelType = 'gaussian' # Decide if the graph is weighted or not if 'weighted' in kwargs.keys(): weighted = kwargs['weighted'] else: weighted = False # If it is a Gaussian kernel, we need to determine the scale if kernelType == 'gaussian': if 'kernelScale' in kwargs.keys(): kernelScale = kwargs['kernelScale'] else: kernelScale = 1. # The print for this one can be settled independently, if not, use the # default of the data object if 'doPrint' in kwargs.keys(): doPrint = kwargs['doPrint'] else: doPrint = self.doPrint # If we have a lot of batches and a particularly long sequence, this # is bound to fail, memory-wise, so let's do it time instant by time # instant if we have a large number of time instants, and split the # batches maxTimeSamples = 200 # Set the maximum number of t.Samples before # which to start doing this time by time. maxBatchSize = 100 # Maximum number of samples to process at a given # time # Compute the number of samples, and split the indices accordingly if nSamples < maxBatchSize: nBatches = 1 batchSize = [nSamples] elif nSamples % maxBatchSize != 0: # If we know it's not divisible, then we do floor division and # add one more batch nBatches = nSamples // maxBatchSize + 1 batchSize = [maxBatchSize] * nBatches # But the last batch is actually smaller, so just add the # remaining ones batchSize[-1] = nSamples - sum(batchSize[0:-1]) # If they fit evenly, then just do so. else: nBatches = int(nSamples/maxBatchSize) batchSize = [maxBatchSize] * nBatches # batchIndex is used to determine the first and last element of each # batch. We need to add the 0 because it's the first index. batchIndex = np.cumsum(batchSize).tolist() batchIndex = [0] + batchIndex # Create the output state variable graphMatrix = np.zeros((nSamples, tSamples, nAgents, nAgents)) for b in range(nBatches): # Pick the batch elements posBatch = pos[batchIndex[b]:batchIndex[b+1]] if tSamples > maxTimeSamples: # If the trajectories are longer than 200 points, then do it # time by time. # For each time instant for t in range(tSamples): # Let's start by computing the distance squared _, distSq = self.computeDifferences(posBatch[:,t,:,:]) # Apply the Kernel if kernelType == 'gaussian': graphMatrixTime = np.exp(-kernelScale * distSq) else: graphMatrixTime = distSq # Now let's place zeros in all places whose distance is greater # than the radius graphMatrixTime[distSq > (commRadius ** 2)] = 0. # Set the diagonal elements to zero graphMatrixTime[:,\ np.arange(0,nAgents),np.arange(0,nAgents)]\ = 0. # If it is unweighted, force all nonzero values to be 1 if not weighted: graphMatrixTime = (graphMatrixTime > zeroTolerance)\ .astype(distSq.dtype) if normalizeGraph: isSymmetric = np.allclose(graphMatrixTime, np.transpose(graphMatrixTime, axes = [0,2,1])) # Tries to make the computation faster, only the # eigenvalues (while there is a cost involved in # computing whether the matrix is symmetric, # experiments found that it is still faster to use the # symmetric algorithm for the eigenvalues) if isSymmetric: W = np.linalg.eigvalsh(graphMatrixTime) else: W = np.linalg.eigvals(graphMatrixTime) maxEigenvalue = np.max(np.real(W), axis = 1) # batchSize[b] # Reshape to be able to divide by the graph matrix maxEigenvalue=maxEigenvalue.reshape((batchSize[b],1,1)) # Normalize graphMatrixTime = graphMatrixTime / maxEigenvalue # And put it in the corresponding time instant graphMatrix[batchIndex[b]:batchIndex[b+1],t,:,:] = \ graphMatrixTime if doPrint: # Sample percentage count percentageCount = int(100*(t+1+b*tSamples)\ /(nBatches*tSamples)) if t == 0 and b == 0: # It's the first one, so just print it print("%3d%%" % percentageCount, end = '', flush = True) else: # Erase the previous characters print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) else: # Let's start by computing the distance squared _, distSq = self.computeDifferences(posBatch) # Apply the Kernel if kernelType == 'gaussian': graphMatrixBatch = np.exp(-kernelScale * distSq) else: graphMatrixBatch = distSq # Now let's place zeros in all places whose distance is greater # than the radius graphMatrixBatch[distSq > (commRadius ** 2)] = 0. # Set the diagonal elements to zero graphMatrixBatch[:,:, np.arange(0,nAgents),np.arange(0,nAgents)] =0. # If it is unweighted, force all nonzero values to be 1 if not weighted: graphMatrixBatch = (graphMatrixBatch > zeroTolerance)\ .astype(distSq.dtype) if normalizeGraph: isSymmetric = np.allclose(graphMatrixBatch, np.transpose(graphMatrixBatch, axes = [0,1,3,2])) # Tries to make the computation faster if isSymmetric: W = np.linalg.eigvalsh(graphMatrixBatch) else: W = np.linalg.eigvals(graphMatrixBatch) maxEigenvalue = np.max(np.real(W), axis = 2) # batchSize[b] x tSamples # Reshape to be able to divide by the graph matrix maxEigenvalue = maxEigenvalue.reshape((batchSize[b], tSamples, 1, 1)) # Normalize graphMatrixBatch = graphMatrixBatch / maxEigenvalue # Store graphMatrix[batchIndex[b]:batchIndex[b+1]] = graphMatrixBatch if doPrint: # Sample percentage count percentageCount = int(100*(b+1)/nBatches) if b == 0: # It's the first one, so just print it print("%3d%%" % percentageCount, end = '', flush = True) else: # Erase the previous characters print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) # Print if doPrint: # Erase the percentage print('\b \b' * 4, end = '', flush = True) return graphMatrix def getData(self, name, samplesType, *args): # samplesType: train, valid, test # args: 0 args, give back all # args: 1 arg: if int, give that number of samples, chosen at random # args: 1 arg: if list, give those samples precisely. # Check that the type is one of the possible ones assert samplesType == 'train' or samplesType == 'valid' \ or samplesType == 'test' # Check that the number of extra arguments fits assert len(args) <= 1 # Check that the name is actually an attribute assert name in dir(self) # Get the desired attribute thisDataDict = getattr(self, name) # Check it's a dictionary and that it has the corresponding key assert type(thisDataDict) is dict assert samplesType in thisDataDict.keys() # Get the data now thisData = thisDataDict[samplesType] # Get the dimension length thisDataDims = len(thisData.shape) # Check that it has at least two dimension, where the first one is # always the number of samples assert thisDataDims > 1 if len(args) == 1: # If it is an int, just return that number of randomly chosen # samples. if type(args[0]) == int: nSamples = thisData.shape[0] # total number of samples # We can't return more samples than there are available assert args[0] <= nSamples # Randomly choose args[0] indices selectedIndices = np.random.choice(nSamples, size = args[0], replace = False) # Select the corresponding samples thisData = thisData[selectedIndices] else: # The fact that we put else here instead of elif type()==list # allows for np.array to be used as indices as well. In general, # any variable with the ability to index. thisData = thisData[args[0]] # If we only selected a single element, then the nDataPoints dim # has been left out. So if we have less dimensions, we have to # put it back if len(thisData.shape) < thisDataDims: if 'torch' in repr(thisData.dtype): thisData =thisData.unsqueeze(0) else: thisData = np.expand_dims(thisData, axis = 0) return thisData def evaluate(self, vel = None, accel = None, initVel = None, samplingTime = None): # It is optional to add a different sampling time, if not, it uses # the internal one if samplingTime is None: # If there's no argument use the internal sampling time samplingTime = self.samplingTime # Check whether we have vel, or accel and initVel (i.e. we are either # given the velocities, or we are given the elements to compute them) if vel is not None: assert len(vel.shape) == 4 nSamples = vel.shape[0] tSamples = vel.shape[1] assert vel.shape[2] == 2 nAgents = vel.shape[3] elif accel is not None and initVel is not None: assert len(accel.shape) == 4 and len(initVel.shape) == 3 nSamples = accel.shape[0] tSamples = accel.shape[1] assert accel.shape[2] == 2 nAgents = accel.shape[3] assert initVel.shape[0] == nSamples assert initVel.shape[1] == 2 assert initVel.shape[2] == nAgents # Now that we know we have a accel and init velocity, compute the # velocity trajectory # Compute the velocity trajectory if 'torch' in repr(accel.dtype): # Check that initVel is also torch assert 'torch' in repr(initVel.dtype) # Create the tensor to save the velocity trajectory vel = torch.zeros(nSamples,tSamples,2,nAgents, dtype = accel.dtype, device = accel.device) # Add the initial velocity vel[:,0,:,:] = initVel.clone().detach() else: # Create the space vel = np.zeros((nSamples, tSamples, 2, nAgents), dtype=accel.dtype) # Add the initial velocity vel[:,0,:,:] = initVel.copy() # Go over time for t in range(1,tSamples): # Compute velocity vel[:,t,:,:] = accel[:,t-1,:,:] * samplingTime + vel[:,t-1,:,:] # Check that I did enter one of the if clauses assert vel is not None # And now that we have the velocities, we can compute the cost if 'torch' in repr(vel.dtype): # Average velocity for time t, averaged across agents avgVel = torch.mean(vel, dim = 3) # nSamples x tSamples x 2 # Compute the difference in velocity between each agent and the # mean velocity diffVel = vel - avgVel.unsqueeze(3) # nSamples x tSamples x 2 x nAgents # Compute the MSE velocity diffVelNorm = torch.sum(diffVel ** 2, dim = 2) # nSamples x tSamples x nAgents # Average over agents diffVelAvg = torch.mean(diffVelNorm, dim = 2) # nSamples x tSamples # Sum over time costPerSample = torch.sum(diffVelAvg, dim = 1) # nSamples # Final average cost cost = torch.mean(costPerSample) else: # Repeat for numpy avgVel = np.mean(vel, axis = 3) # nSamples x tSamples x 2 diffVel = vel - np.tile(np.expand_dims(avgVel, 3), (1, 1, 1, nAgents)) # nSamples x tSamples x 2 x nAgents diffVelNorm = np.sum(diffVel ** 2, axis = 2) # nSamples x tSamples x nAgents diffVelAvg = np.mean(diffVelNorm, axis = 2) # nSamples x tSamples costPerSample = np.sum(diffVelAvg, axis = 1) # nSamples cost = np.mean(costPerSample) # scalar return cost def computeTrajectory(self, initPos, initVel, duration, **kwargs): # Check initPos is of shape batchSize x 2 x nAgents assert len(initPos.shape) == 3 batchSize = initPos.shape[0] assert initPos.shape[1] nAgents = initPos.shape[2] # Check initVel is of shape batchSize x 2 x nAgents assert len(initVel.shape) == 3 assert initVel.shape[0] == batchSize assert initVel.shape[1] == 2 assert initVel.shape[2] == nAgents # Check what kind of data it is # This is because all the functions are numpy, but if this was # torch, we need to return torch, to make it consistent if 'torch' in repr(initPos.dtype): assert 'torch' in repr(initVel.dtype) useTorch = True device = initPos.device assert initVel.device == device else: useTorch = False # Create time line time = np.arange(0, duration, self.samplingTime) tSamples = len(time) # Here, we have two options, or we're given the acceleration or the # architecture assert 'archit' in kwargs.keys() or 'accel' in kwargs.keys() # Flags to determine which method to use useArchit = False useAccel = False if 'archit' in kwargs.keys(): archit = kwargs['archit'] # This is a torch.nn.Module architecture architDevice = list(archit.parameters())[0].device useArchit = True elif 'accel' in kwargs.keys(): accel = kwargs['accel'] # accel has to be of shape batchSize x tSamples x 2 x nAgents assert len(accel.shape) == 4 assert accel.shape[0] == batchSize assert accel.shape[1] == tSamples assert accel.shape[2] == 2 assert accel.shape[3] == nAgents if useTorch: assert 'torch' in repr(accel.dtype) useAccel = True # Decide on printing or not: if 'doPrint' in kwargs.keys(): doPrint = kwargs['doPrint'] else: doPrint = self.doPrint # Use default # Now create the outputs that will be filled afterwards pos = np.zeros((batchSize, tSamples, 2, nAgents), dtype = np.float) vel = np.zeros((batchSize, tSamples, 2, nAgents), dtype = np.float) if useArchit: accel = np.zeros((batchSize, tSamples, 2, nAgents), dtype=np.float) state = np.zeros((batchSize, tSamples, 6, nAgents), dtype=np.float) graph = np.zeros((batchSize, tSamples, nAgents, nAgents), dtype = np.float) # Assign the initial positions and velocities if useTorch: pos[:,0,:,:] = initPos.cpu().numpy() vel[:,0,:,:] = initVel.cpu().numpy() if useAccel: accel = accel.cpu().numpy() else: pos[:,0,:,:] = initPos.copy() vel[:,0,:,:] = initVel.copy() if doPrint: # Sample percentage count percentageCount = int(100/tSamples) # Print new value print("%3d%%" % percentageCount, end = '', flush = True) # Now, let's get started: for t in range(1, tSamples): # If it is architecture-based, we need to compute the state, and # for that, we need to compute the graph if useArchit: # Adjust pos value for graph computation thisPos = np.expand_dims(pos[:,t-1,:,:], 1) # Compute graph thisGraph = self.computeCommunicationGraph(thisPos, self.commRadius, True, doPrint = False) # Save graph graph[:,t-1,:,:] = thisGraph.squeeze(1) # Adjust vel value for state computation thisVel = np.expand_dims(vel[:,t-1,:,:], 1) # Compute state thisState = self.computeStates(thisPos, thisVel, thisGraph, doPrint = False) # Save state state[:,t-1,:,:] = thisState.squeeze(1) # Compute the output of the architecture # Note that we need the collection of all time instants up # to now, because when we do the communication exchanges, # it involves past times. x = torch.tensor(state[:,0:t,:,:], device = architDevice) S = torch.tensor(graph[:,0:t,:,:], device = architDevice) with torch.no_grad(): thisAccel = archit(x, S) # Now that we have computed the acceleration, we only care # about the last element in time thisAccel = thisAccel.cpu().numpy()[:,-1,:,:] thisAccel[thisAccel > self.accelMax] = self.accelMax thisAccel[thisAccel < -self.accelMax] = self.accelMax # And save it accel[:,t-1,:,:] = thisAccel # Now that we have the acceleration, we can update position and # velocity vel[:,t,:,:] = accel[:,t-1,:,:] * self.samplingTime +vel[:,t-1,:,:] pos[:,t,:,:] = accel[:,t-1,:,:] * (self.samplingTime ** 2)/2 + \ vel[:,t-1,:,:] * self.samplingTime + pos[:,t-1,:,:] if doPrint: # Sample percentage count percentageCount = int(100*(t+1)/tSamples) # Erase previous value and print new value print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) # And we're missing the last values of graph, state and accel, so # let's compute them for completeness # Graph thisPos = np.expand_dims(pos[:,-1,:,:], 1) thisGraph = self.computeCommunicationGraph(thisPos, self.commRadius, True, doPrint = False) graph[:,-1,:,:] = thisGraph.squeeze(1) # State thisVel = np.expand_dims(vel[:,-1,:,:], 1) thisState = self.computeStates(thisPos, thisVel, thisGraph, doPrint = False) state[:,-1,:,:] = thisState.squeeze(1) # Accel x = torch.tensor(state).to(architDevice) S = torch.tensor(graph).to(architDevice) with torch.no_grad(): thisAccel = archit(x, S) thisAccel = thisAccel.cpu().numpy()[:,-1,:,:] thisAccel[thisAccel > self.accelMax] = self.accelMax thisAccel[thisAccel < -self.accelMax] = self.accelMax # And save it accel[:,-1,:,:] = thisAccel # Print if doPrint: # Erase the percentage print('\b \b' * 4, end = '', flush = True) # After we have finished, turn it back into tensor, if required if useTorch: pos = torch.tensor(pos).to(device) vel = torch.tensor(vel).to(device) accel = torch.tensor(accel).to(device) # And return it if useArchit: return pos, vel, accel, state, graph elif useAccel: return pos, vel def computeDifferences(self, u): # Takes as input a tensor of shape # nSamples x tSamples x 2 x nAgents # or of shape # nSamples x 2 x nAgents # And returns the elementwise difference u_i - u_j of shape # nSamples (x tSamples) x 2 x nAgents x nAgents # And the distance squared ||u_i - u_j||^2 of shape # nSamples (x tSamples) x nAgents x nAgents # Check dimensions assert len(u.shape) == 3 or len(u.shape) == 4 # If it has shape 3, which means it's only a single time instant, then # add the extra dimension so we move along assuming we have multiple # time instants if len(u.shape) == 3: u = np.expand_dims(u, 1) hasTimeDim = False else: hasTimeDim = True # Now we have that pos always has shape # nSamples x tSamples x 2 x nAgents nSamples = u.shape[0] tSamples = u.shape[1] assert u.shape[2] == 2 nAgents = u.shape[3] # Compute the difference along each axis. For this, we subtract a # column vector from a row vector. The difference tensor on each # position will have shape nSamples x tSamples x nAgents x nAgents # and then we add the extra dimension to concatenate and obtain a final # tensor of shape nSamples x tSamples x 2 x nAgents x nAgents # First, axis x # Reshape as column and row vector, respectively uCol_x = u[:,:,0,:].reshape((nSamples, tSamples, nAgents, 1)) uRow_x = u[:,:,0,:].reshape((nSamples, tSamples, 1, nAgents)) # Subtract them uDiff_x = uCol_x - uRow_x # nSamples x tSamples x nAgents x nAgents # Second, for axis y uCol_y = u[:,:,1,:].reshape((nSamples, tSamples, nAgents, 1)) uRow_y = u[:,:,1,:].reshape((nSamples, tSamples, 1, nAgents)) uDiff_y = uCol_y - uRow_y # nSamples x tSamples x nAgents x nAgents # Third, compute the distance tensor of shape # nSamples x tSamples x nAgents x nAgents uDistSq = uDiff_x ** 2 + uDiff_y ** 2 # Finally, concatenate to obtain the tensor of differences # Add the extra dimension in the position uDiff_x = np.expand_dims(uDiff_x, 2) uDiff_y = np.expand_dims(uDiff_y, 2) # And concatenate them uDiff = np.concatenate((uDiff_x, uDiff_y), 2) # nSamples x tSamples x 2 x nAgents x nAgents # Get rid of the time dimension if we don't need it if not hasTimeDim: # (This fails if tSamples > 1) uDistSq = uDistSq.squeeze(1) # nSamples x nAgents x nAgents uDiff = uDiff.squeeze(1) # nSamples x 2 x nAgents x nAgents return uDiff, uDistSq def computeOptimalTrajectory(self, initPos, initVel, duration, samplingTime, repelDist, accelMax = 100.): # The optimal trajectory is given by # u_{i} = - \sum_{j=1}^{N} (v_{i} - v_{j}) # + 2 \sum_{j=1}^{N} (r_{i} - r_{j}) * # (1/\|r_{i}\|^{4} + 1/\|r_{j}\|^{2}) * # 1{\|r_{ij}\| < R} # for each agent i=1,...,N, where v_{i} is the velocity and r_{i} the # position. # Check that initPos and initVel as nSamples x 2 x nAgents arrays assert len(initPos.shape) == len(initVel.shape) == 3 nSamples = initPos.shape[0] assert initPos.shape[1] == initVel.shape[1] == 2 nAgents = initPos.shape[2] assert initVel.shape[0] == nSamples assert initVel.shape[2] == nAgents # time time = np.arange(0, duration, samplingTime) tSamples = len(time) # number of time samples # Create arrays to store the trajectory pos = np.zeros((nSamples, tSamples, 2, nAgents)) vel = np.zeros((nSamples, tSamples, 2, nAgents)) accel = np.zeros((nSamples, tSamples, 2, nAgents)) # Initial settings pos[:,0,:,:] = initPos vel[:,0,:,:] = initVel if self.doPrint: # Sample percentage count percentageCount = int(100/tSamples) # Print new value print("%3d%%" % percentageCount, end = '', flush = True) # For each time instant for t in range(1,tSamples): # Compute the optimal acceleration # Compute the distance between all elements (positions) ijDiffPos, ijDistSq = self.computeDifferences(pos[:,t-1,:,:]) # ijDiffPos: nSamples x 2 x nAgents x nAgents # ijDistSq: nSamples x nAgents x nAgents # And also the difference in velocities ijDiffVel, _ = self.computeDifferences(vel[:,t-1,:,:]) # ijDiffVel: nSamples x 2 x nAgents x nAgents # The last element we need to compute the acceleration is the # gradient. Note that the gradient only counts when the distance # is smaller than the repel distance # This is the mask to consider each of the differences repelMask = (ijDistSq < (repelDist**2)).astype(ijDiffPos.dtype) # Apply the mask to the relevant differences ijDiffPos = ijDiffPos * np.expand_dims(repelMask, 1) # Compute the constant (1/||r_ij||^4 + 1/||r_ij||^2) ijDistSqInv = invertTensorEW(ijDistSq) # Add the extra dimension ijDistSqInv = np.expand_dims(ijDistSqInv, 1) # Compute the acceleration accel[:,t-1,:,:] = \ -np.sum(ijDiffVel, axis = 3) \ +2* np.sum(ijDiffPos * (ijDistSqInv ** 2 + ijDistSqInv), axis = 3) # Finally, note that if the agents are too close together, the # acceleration will be very big to get them as far apart as # possible, and this is physically impossible. # So let's add a limitation to the maximum aceleration # Find the places where the acceleration is big thisAccel = accel[:,t-1,:,:].copy() # Values that exceed accelMax, force them to be accelMax thisAccel[accel[:,t-1,:,:] > accelMax] = accelMax # Values that are smaller than -accelMax, force them to be accelMax thisAccel[accel[:,t-1,:,:] < -accelMax] = -accelMax # And put it back accel[:,t-1,:,:] = thisAccel # Update the values # Update velocity vel[:,t,:,:] = accel[:,t-1,:,:] * samplingTime + vel[:,t-1,:,:] # Update the position pos[:,t,:,:] = accel[:,t-1,:,:] * (samplingTime ** 2)/2 + \ vel[:,t-1,:,:] * samplingTime + pos[:,t-1,:,:] if self.doPrint: # Sample percentage count percentageCount = int(100*(t+1)/tSamples) # Erase previous pecentage and print new value print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) # Print if self.doPrint: # Erase the percentage print('\b \b' * 4, end = '', flush = True) return pos, vel, accel def computeInitialPositions(self, nAgents, nSamples, commRadius, minDist = 0.1, geometry = 'rectangular', **kwargs): # It will always be uniform. We can select whether it is rectangular # or circular (or some other shape) and the parameters respecting # that assert geometry == 'rectangular' or geometry == 'circular' assert minDist * (1.+zeroTolerance) <= commRadius * (1.-zeroTolerance) # We use a zeroTolerance buffer zone, just in case minDist = minDist * (1. + zeroTolerance) commRadius = commRadius * (1. - zeroTolerance) # If there are other keys in the kwargs argument, they will just be # ignored # We will first create the grid, whether it is rectangular or # circular. # Let's start by setting the fixed position if geometry == 'rectangular': # This grid has a distance that depends on the desired minDist and # the commRadius distFixed = (commRadius + minDist)/(2.*np.sqrt(2)) # This is the fixed distance between points in the grid distPerturb = (commRadius - minDist)/(4.*np.sqrt(2)) # This is the standard deviation of a uniform perturbation around # the fixed point. # This should guarantee that, even after the perturbations, there # are no agents below minDist, and that all agents have at least # one other agent within commRadius. # How many agents per axis nAgentsPerAxis = int(np.ceil(np.sqrt(nAgents))) axisFixedPos = np.arange(-(nAgentsPerAxis * distFixed)/2, (nAgentsPerAxis * distFixed)/2, step = distFixed) # Repeat the positions in the same order (x coordinate) xFixedPos = np.tile(axisFixedPos, nAgentsPerAxis) # Repeat each element (y coordinate) yFixedPos = np.repeat(axisFixedPos, nAgentsPerAxis) # Concatenate this to obtain the positions fixedPos = np.concatenate((np.expand_dims(xFixedPos, 0), np.expand_dims(yFixedPos, 0)), axis = 0) # Get rid of unnecessary agents fixedPos = fixedPos[:, 0:nAgents] # And repeat for the number of samples we want to generate fixedPos = np.repeat(np.expand_dims(fixedPos, 0), nSamples, axis = 0) # nSamples x 2 x nAgents # Now generate the noise perturbPos = np.random.uniform(low = -distPerturb, high = distPerturb, size = (nSamples, 2, nAgents)) # Initial positions initPos = fixedPos + perturbPos elif geometry == 'circular': # Radius for the grid rFixed = (commRadius + minDist)/2. rPerturb = (commRadius - minDist)/4. fixedRadius = np.arange(0, rFixed * nAgents, step = rFixed)+rFixed # Angles for the grid aFixed = (commRadius/fixedRadius + minDist/fixedRadius)/2. for a in range(len(aFixed)): # How many times does aFixed[a] fits within 2pi? nAgentsPerCircle = 2 * np.pi // aFixed[a] # And now divide 2*np.pi by this number aFixed[a] = 2 * np.pi / nAgentsPerCircle # Fixed angle difference for each value of fixedRadius # Now, let's get the radius, angle coordinates for each agents initRadius = np.empty((0)) initAngles = np.empty((0)) agentsSoFar = 0 # Number of agents located so far n = 0 # Index for radius while agentsSoFar < nAgents: thisRadius = fixedRadius[n] thisAngles = np.arange(0, 2*np.pi, step = aFixed[n]) agentsSoFar += len(thisAngles) initRadius = np.concatenate((initRadius, np.repeat(thisRadius, len(thisAngles)))) initAngles = np.concatenate((initAngles, thisAngles)) n += 1 assert len(initRadius) == agentsSoFar # Restrict to the number of agents we need initRadius = initRadius[0:nAgents] initAngles = initAngles[0:nAgents] # Add the number of samples initRadius = np.repeat(np.expand_dims(initRadius, 0), nSamples, axis = 0) initAngles = np.repeat(np.expand_dims(initAngles, 0), nSamples, axis = 0) # Add the noise # First, to the angles for n in range(nAgents): # Get the radius (the angle noise depends on the radius); so # far the radius is the same for all samples thisRadius = initRadius[0,n] aPerturb = (commRadius/thisRadius - minDist/thisRadius)/4. # Add the noise to the angles initAngles[:,n] += np.random.uniform(low = -aPerturb, high = aPerturb, size = (nSamples)) # Then, to the radius initRadius += np.random.uniform(low = -rPerturb, high = rPerturb, size = (nSamples, nAgents)) # And finally, get the positions in the cartesian coordinates initPos = np.zeros((nSamples, 2, nAgents)) initPos[:, 0, :] = initRadius * np.cos(initAngles) initPos[:, 1, :] = initRadius * np.sin(initAngles) # Now, check that the conditions are met: # Compute square distances _, distSq = self.computeDifferences(np.expand_dims(initPos, 1)) # Get rid of the "time" dimension that arises from using the # method to compute distances distSq = distSq.squeeze(1) # Compute the minimum distance (don't forget to add something in # the diagonal, which otherwise is zero) minDistSq = np.min(distSq + \ 2 * commRadius\ *np.eye(distSq.shape[1]).reshape(1, distSq.shape[1], distSq.shape[2]) ) assert minDistSq >= minDist ** 2 # Now the number of neighbors graphMatrix = self.computeCommunicationGraph(np.expand_dims(initPos,1), self.commRadius, False, doPrint = False) graphMatrix = graphMatrix.squeeze(1) # nSamples x nAgents x nAgents # Binarize the matrix graphMatrix = (np.abs(graphMatrix) > zeroTolerance)\ .astype(initPos.dtype) # And check that we always have initially connected graphs for n in range(nSamples): assert graph.isConnected(graphMatrix[n,:,:]) # We move to compute the initial velocities. Velocities can be # either positive or negative, so we do not need to determine # the lower and higher, just around zero if 'xMaxInitVel' in kwargs.keys(): xMaxInitVel = kwargs['xMaxInitVel'] else: xMaxInitVel = 3. # Takes five seconds to traverse half the map # Same for the other axis if 'yMaxInitVel' in kwargs.keys(): yMaxInitVel = kwargs['yMaxInitVel'] else: yMaxInitVel = 3. # And sample the velocities xInitVel = np.random.uniform(low = -xMaxInitVel, high = xMaxInitVel, size = (nSamples, 1, nAgents)) yInitVel = np.random.uniform(low = -yMaxInitVel, high = yMaxInitVel, size = (nSamples, 1, nAgents)) # Add bias xVelBias = np.random.uniform(low = -xMaxInitVel, high = xMaxInitVel, size = (nSamples)) yVelBias = np.random.uniform(low = -yMaxInitVel, high = yMaxInitVel, size = (nSamples)) # And concatenate them velBias = np.concatenate((xVelBias, yVelBias)).reshape((nSamples,2,1)) initVel = np.concatenate((xInitVel, yInitVel), axis = 1) + velBias # nSamples x 2 x nAgents return initPos, initVel def saveVideo(self, saveDir, pos, *args, commGraph = None, **kwargs): # Check that pos is a position of shape nSamples x tSamples x 2 x nAgents assert len(pos.shape) == 4 nSamples = pos.shape[0] tSamples = pos.shape[1] assert pos.shape[2] == 2 nAgents = pos.shape[3] if 'torch' in repr(pos.dtype): pos = pos.cpu().numpy() # Check if there's the need to plot a graph if commGraph is not None: # If there's a communication graph, then it has to have shape # nSamples x tSamples x nAgents x nAgents assert len(commGraph.shape) == 4 assert commGraph.shape[0] == nSamples assert commGraph.shape[1] == tSamples assert commGraph.shape[2] == commGraph.shape[3] == nAgents if 'torch' in repr(commGraph.dtype): commGraph = commGraph.cpu().numpy() showGraph = True else: showGraph = False if 'doPrint' in kwargs.keys(): doPrint = kwargs['doPrint'] else: doPrint = self.doPrint # This number determines how faster or slower to reproduce the video if 'videoSpeed' in kwargs.keys(): videoSpeed = kwargs['videoSpeed'] else: videoSpeed = 1. if 'showVideoSpeed' in kwargs.keys(): showVideoSpeed = kwargs['showVideoSpeed'] else: if videoSpeed != 1: showVideoSpeed = True else: showVideoSpeed = False if 'vel' in kwargs.keys(): vel = kwargs['vel'] if 'showCost' in kwargs.keys(): showCost = kwargs['showCost'] else: showCost = True if 'showArrows' in kwargs.keys(): showArrows = kwargs['showArrows'] else: showArrows = True else: showCost = False showArrows = False # Check that the number of extra arguments fits assert len(args) <= 1 # If there's an argument, we have to check whether it is an int or a # list if len(args) == 1: # If it is an int, just return that number of randomly chosen # samples. if type(args[0]) == int: # We can't return more samples than there are available assert args[0] <= nSamples # Randomly choose args[0] indices selectedIndices = np.random.choice(nSamples, size = args[0], replace = False) else: # The fact that we put else here instead of elif type()==list # allows for np.array to be used as indices as well. In general, # any variable with the ability to index. selectedIndices = args[0] # Select the corresponding samples pos = pos[selectedIndices] # Finally, observe that if pos has shape only 3, then that's # because we selected a single sample, so we need to add the extra # dimension back again if len(pos.shape) < 4: pos = np.expand_dims(pos, 0) if showGraph: commGraph = commGraph[selectedIndices] if len(commGraph.shape)< 4: commGraph = np.expand_dims(commGraph, 0) # Where to save the video if not os.path.exists(saveDir): os.mkdir(saveDir) videoName = 'sampleTrajectory' xMinMap = np.min(pos[:,:,0,:]) * 1.2 xMaxMap = np.max(pos[:,:,0,:]) * 1.2 yMinMap = np.min(pos[:,:,1,:]) * 1.2 yMaxMap = np.max(pos[:,:,1,:]) * 1.2 # Create video object videoMetadata = dict(title = 'Sample Trajectory', artist = 'Flocking', comment='Flocking example') videoWriter = FFMpegWriter(fps = videoSpeed/self.samplingTime, metadata = videoMetadata) if doPrint: print("\tSaving video(s)...", end = ' ', flush = True) # For each sample now for n in range(pos.shape[0]): # If there's more than one video to create, enumerate them if pos.shape[0] > 1: thisVideoName = videoName + '%03d.mp4' % n else: thisVideoName = videoName + '.mp4' # Select the corresponding position trajectory thisPos = pos[n] # Create figure videoFig = plt.figure(figsize = (5,5)) # Set limits plt.xlim((xMinMap, xMaxMap)) plt.ylim((yMinMap, yMaxMap)) plt.axis('equal') if showVideoSpeed: plt.text(xMinMap, yMinMap, r'Speed: $%.2f$' % videoSpeed) # Create plot handle plotAgents, = plt.plot([], [], marker = 'o', markersize = 3, linewidth = 0, color = '#01256E', scalex = False, scaley = False) # Create the video with videoWriter.saving(videoFig, os.path.join(saveDir,thisVideoName), tSamples): for t in range(tSamples): # Plot the agents plotAgents.set_data(thisPos[t,0,:], thisPos[t,1,:]) videoWriter.grab_frame() # Print if doPrint: # Sample percentage count percentageCount = int( 100*(t+1+n*tSamples)/(tSamples * pos.shape[0]) ) if n == 0 and t == 0: print("%3d%%" % percentageCount, end = '', flush = True) else: print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) plt.close(fig=videoFig) # Print if doPrint: # Erase the percentage and the label print('\b \b' * 4 + "OK", flush = True) if showGraph: # Normalize velocity if showArrows: # vel is of shape nSamples x tSamples x 2 x nAgents velNormSq = np.sum(vel ** 2, axis = 2) # nSamples x tSamples x nAgents maxVelNormSq = np.max(np.max(velNormSq, axis = 2), axis = 1) # nSamples maxVelNormSq = maxVelNormSq.reshape((nSamples, 1, 1, 1)) # nSamples x 1 x 1 x 1 normVel = 2*vel/np.sqrt(maxVelNormSq) if doPrint: print("\tSaving graph snapshots...", end = ' ', flush = True) # Essentially, we will print nGraphs snapshots and save them # as images with the graph. This is the best we can do in a # reasonable processing time (adding the graph to the video takes # forever). time = np.arange(0, self.duration, step = self.samplingTime) assert len(time) == tSamples nSnapshots = 5 # The number of snapshots we will consider tSnapshots = np.linspace(0, tSamples-1, num = nSnapshots) # This gives us nSnapshots equally spaced in time. Now, we need # to be sure these are integers tSnapshots = np.unique(tSnapshots.astype(np.int)).astype(np.int) # Directory to save the snapshots snapshotDir = os.path.join(saveDir,'graphSnapshots') # Base name of the snapshots snapshotName = 'graphSnapshot' for n in range(pos.shape[0]): if pos.shape[0] > 1: thisSnapshotDir = snapshotDir + '%03d' % n thisSnapshotName = snapshotName + '%03d' % n else: thisSnapshotDir = snapshotDir thisSnapshotName = snapshotName if not os.path.exists(thisSnapshotDir): os.mkdir(thisSnapshotDir) # Get the corresponding positions thisPos = pos[n] thisCommGraph = commGraph[n] for t in tSnapshots: # Get the edge pairs # Get the graph for this time instant thisCommGraphTime = thisCommGraph[t] # Check if it is symmetric isSymmetric = np.allclose(thisCommGraphTime, thisCommGraphTime.T) if isSymmetric: # Use only half of the matrix thisCommGraphTime = np.triu(thisCommGraphTime) # Find the position of all edges outEdge, inEdge = np.nonzero(np.abs(thisCommGraphTime) \ > zeroTolerance) # Create the figure thisGraphSnapshotFig = plt.figure(figsize = (5,5)) # Set limits (to be the same as the video) plt.xlim((xMinMap, xMaxMap)) plt.ylim((yMinMap, yMaxMap)) plt.axis('equal') # Plot the edges plt.plot([thisPos[t,0,outEdge], thisPos[t,0,inEdge]], [thisPos[t,1,outEdge], thisPos[t,1,inEdge]], color = '#A8AAAF', linewidth = 0.75, scalex = False, scaley = False) # Plot the arrows if showArrows: for i in range(nAgents): plt.arrow(thisPos[t,0,i], thisPos[t,1,i], normVel[n,t,0,i], normVel[n,t,1,i]) # Plot the nodes plt.plot(thisPos[t,0,:], thisPos[t,1,:], marker = 'o', markersize = 3, linewidth = 0, color = '#01256E', scalex = False, scaley = False) # Add the cost value if showCost: totalCost = self.evaluate(vel = vel[:,t:t+1,:,:]) plt.text(xMinMap,yMinMap, r'Cost: $%.4f$' % totalCost) # Add title plt.title("Time $t=%.4f$s" % time[t]) # Save figure thisGraphSnapshotFig.savefig(os.path.join(thisSnapshotDir, thisSnapshotName + '%03d.pdf' % t)) # Close figure plt.close(fig = thisGraphSnapshotFig) # Print percentage completion if doPrint: # Sample percentage count percentageCount = int( 100*(t+1+n*tSamples)/(tSamples * pos.shape[0]) ) if n == 0 and t == 0: # Print new value print("%3d%%" % percentageCount, end = '', flush = True) else: # Erase the previous characters print('\b \b' * 4 + "%3d%%" % percentageCount, end = '', flush = True) # Print if doPrint: # Erase the percentage and the label print('\b \b' * 4 + "OK", flush = True) class TwentyNews(_dataForClassification): """ TwentyNews: Loads and handles handles the 20NEWS dataset Initialization: Input: ratioValid (float): ratio of the train texts to be part of the validation set nWords (int): number of words to consider (i.e. the nWords most frequent words in the news articles are kept, the rest, discarded) nWordsShortDocs (int): any article with less words than nWordsShortDocs are discarded. nEdges (int): how many edges to keep after creating a geometric graph considering the graph embedding of each new article. distMetric (string): function to use to compute the distance between articles in the embedded space. dataDir (string): directory where to download the 20News dataset to/ to check if it has already been downloaded dataType (dtype): type of loaded data (default: np.float64) device (device): where to store the data (e.g., 'cpu', 'cuda:0', etc.) Methods: .getData(dataSubset): loads the data belonging to dataSubset (i.e. 'train' or 'test') .embedData(): compute the graph embedding of the training dataset after it has been loaded .normalizeData(normType): normalize the data in the embedded space following a normType norm. .createValidationSet(ratio): stores ratio% of the training set as validation set. .createGraph(): uses the word2vec embedding of the training set to compute a geometric graph .getGraph(): fetches the adjacency matrix of the stored graph .getNumberOfClasses(): fetches the number of classes .reduceDataset(nTrain, nValid, nTest): reduces the dataset by randomly selected nTrain, nValid and nTest samples from the training, validation and testing datasets, respectively. authorData = .getAuthorData(samplesType, selectData, [, optionalArguments]) Input: samplesType (string): 'train', 'valid', 'test' or 'all' to determine from which dataset to get the raw author data from selectData (string): 'WAN' or 'wordFreq' to decide if we want to retrieve either the WAN of each excerpt or the word frequency count of each excerpt optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get Output: Either the WANs or the word frequency count of all the excerpts of the selected author .createGraph(): creates a graph from the WANs of the excerpt written by the selected author available in the training set. The fusion of this WANs is done in accordance with the input options following graphTools.createGraph(). The resulting adjacency matrix is stored. .getGraph(): fetches the stored adjacency matrix and returns it .getFunctionWords(): fetches the list of functional words. Returns a tuple where the first element correspond to all the functional words in use, and the second element consists of all the functional words available. Obs.: When we created the graph, some of the functional words might have been dropped in order to make it connected, for example. signals, labels = .getSamples(samplesType[, optionalArguments]) Input: samplesType (string): 'train', 'valid' or 'test' to determine from which dataset to get the samples from optionalArguments: 0 optional arguments: get all the samples from the specified set 1 optional argument (int): number of samples to get (at random) 1 optional argument (list): specific indices of samples to get Output: signals (dtype.array): numberSamples x numberNodes labels (dtype.array): numberSamples >> Obs.: The 0th dimension matches the corresponding signal to its respective label .astype(type): change the type of the data matrix arrays. Input: type (dtype): target type of the variables (e.g. torch.float64, numpy.float64, etc.) .to(device): if dtype is torch.tensor, move them to the specified device. Input: device (string): target device to move the variables to (e.g. 'cpu', 'cuda:0', etc.) accuracy = .evaluate(yHat, y, tol = 1e-9) Input: yHat (dtype.array): estimated labels (1-D binary vector) y (dtype.array): correct labels (1-D binary vector) >> Obs.: both arrays are of the same length tol (float): numerical tolerance to consider two numbers to be equal Output: accuracy (float): proportion of correct labels """ def __init__(self, ratioValid, nWords, nWordsShortDocs, nEdges, distMetric, dataDir, dataType = np.float64, device = 'cpu'): super().__init__() # This creates the attributes: dataType, device, nTrain, nTest, nValid, # and samples, and fills them all with None, and also creates the # methods: getSamples, astype, to, and evaluate. self.dataType = dataType self.device = device # Other relevant information we need to store: self.dataDir = dataDir # Where the data is self.N = nWords # Number of nodes self.nWordsShortDocs = nWordsShortDocs # Number of words under which # a document is too short to be taken into consideration self.M = nEdges # Number of edges self.distMetric = distMetric # Distance metric to use self.dataset = {} # Here we save the dataset classes as they are # handled by mdeff's code self.nClasses = None # Number of classes self.vocab = None # Words considered self.graphData = None # Store the data (word2vec embeddings) required # to build the graph self.adjacencyMatrix = None # Store the graph built from the loaded # data # Get the training dataset. Saves vocab, dataset, and samples self.getData('train') # Embeds the data following the N words and a word2vec approach, saves # the embedded vectors in graphData, and updates vocab to keep only # the N words selected self.embedData() # Get the testing dataset, only for the words stored in vocab. self.getData('test') # Normalize self.normalizeData() # Save number of samples self.nTrain = self.samples['train']['targets'].shape[0] self.nTest = self.samples['test']['targets'].shape[0] # Create validation set self.createValidationSet(ratioValid) # Create graph self.createGraph() # Only after data has been embedded # Change data to specified type and device self.astype(self.dataType) self.to(self.device) def getData(self, dataSubset): # Load dataset dataset = Text20News(data_home = self.dataDir, subset = dataSubset, remove = ('headers','footers','quotes'), shuffle = True) # Get rid of numbers and other stuff dataset.clean_text(num='substitute') # If there's some vocabulary already defined, vectorize (count the # frequencies) of the words in vocab, if not, count all of them if self.vocab is None: dataset.vectorize(stop_words='english') self.vocab = dataset.vocab else: dataset.vectorize(vocabulary = self.vocab) # Get rid of short documents if dataSubset == 'train': dataset.remove_short_documents(nwords = self.nWordsShortDocs, vocab = 'full') # Get rid of images dataset.remove_encoded_images() self.nClasses = len(dataset.class_names) else: dataset.remove_short_documents(nwords = self.nWordsShortDocs, vocab = 'selected') # Save them in the corresponding places self.samples[dataSubset]['signals'] = dataset.data.toarray() self.samples[dataSubset]['targets'] = dataset.labels self.dataset[dataSubset] = dataset def embedData(self): # We need to have loaded the training dataset first. assert 'train' in self.dataset.keys() # Embed them (word2vec embedding) self.dataset['train'].embed() # Keep only the top words (which determine the number of nodes) self.dataset['train'].keep_top_words(self.N) # Update the vocabulary self.vocab = self.dataset['train'].vocab # Get rid of short documents when considering only the specific # vocabulary self.dataset['train'].remove_short_documents( nwords = self.nWordsShortDocs, vocab = 'selected') # Save the embeddings, which are necessary to build a graph self.graphData = self.dataset['train'].embeddings # Update the samples self.samples['train']['signals'] = self.dataset['train'].data.toarray() self.samples['train']['targets'] = self.dataset['train'].labels # If there's an existing dataset, update it to the new vocabulary if 'test' in self.dataset.keys(): self.dataset['test'].vectorize(vocabulary = self.vocab) # Update the samples self.samples['test']['signals'] =self.dataset['test'].data.toarray() self.samples['test']['targets'] = self.dataset['test'].labels def normalizeData(self, normType = 'l1'): for key in self.dataset.keys(): # Normalize the frequencies on the l1 norm. self.dataset[key].normalize(norm = normType) # And save it self.samples[key]['signals'] = self.dataset[key].data.toarray() self.samples[key]['targets'] = self.dataset[key].labels def createValidationSet(self, ratio): # How many valid samples self.nValid = int(ratio * self.nTrain) # Shuffle indices randomIndices = np.random.permutation(self.nTrain) validationIndices = randomIndices[0:self.nValid] trainIndices = randomIndices[self.nValid:] # Fetch those samples and put them in the validation set self.samples['valid']['signals'] = self.samples['train']['signals']\ [validationIndices, :] self.samples['valid']['targets'] = self.samples['train']['targets']\ [validationIndices] # And update the training set self.samples['train']['signals'] = self.samples['train']['signals']\ [trainIndices, :] self.samples['train']['targets'] = self.samples['train']['targets']\ [trainIndices] # Update the numbers self.nValid = self.samples['valid']['targets'].shape[0] self.nTrain = self.samples['train']['targets'].shape[0] def createGraph(self, *args): assert self.graphData is not None assert len(args) == 0 or len(args) == 2 if len(args) == 2: self.M = args[0] # Number of edges self.distMetric = args[1] # Distance metric dist, idx = distance_sklearn_metrics(self.graphData, k = self.M, metric = self.distMetric) self.adjacencyMatrix = adjacency(dist, idx).toarray() def getGraph(self): return self.adjacencyMatrix def getNumberOfClasses(self): return self.nClasses def reduceDataset(self, nTrain, nValid, nTest): if nTrain < self.nTrain: randomIndices = np.random.permutation(self.nTrain) trainIndices = randomIndices[0:nTrain] # And update the training set self.samples['train']['signals'] = self.samples['train']\ ['signals']\ [trainIndices, :] self.samples['train']['targets'] = self.samples['train']\ ['targets']\ [trainIndices] self.nTrain = nTrain if nValid < self.nValid: randomIndices = np.random.permutation(self.nValid) validIndices = randomIndices[0:nValid] # And update the training set self.samples['valid']['signals'] = self.samples['valid']\ ['signals']\ [validIndices, :] self.samples['valid']['targets'] = self.samples['valid']\ ['targets']\ [validIndices] self.nValid = nValid if nTest < self.nTest: randomIndices = np.random.permutation(self.nTest) testIndices = randomIndices[0:nTest] # And update the training set self.samples['test']['signals'] = self.samples['test']\ ['signals']\ [testIndices, :] self.samples['test']['targets'] = self.samples['test']\ ['targets']\ [testIndices] self.nTest = nTest def astype(self, dataType): # This changes the type for the graph data, as well as the adjacency # matrix. We are going to leave the dataset attribute as it is, since # this is the most accurate reflection of mdeff's code. self.graphData = changeDataType(self.graphData, dataType) self.adjacencyMatrix = changeDataType(self.adjacencyMatrix, dataType) # And now, initialize to change the samples as well (and also save the # data type) super().astype(dataType) def to(self, device): # If the dataType is 'torch' if repr(self.dataType).find('torch') >= 0: # Change the stored attributes that are not handled by the inherited # method to(). self.graphData.to(device) self.adjacencyMatrix.to(device) # And call the inherit method to initialize samples (and save to # device) super().to(device) # Copied almost verbatim from the code by Michäel Defferrard, available at # http://github.com/mdeff/cnn_graph import gensim import sklearn, sklearn.datasets, sklearn.metrics import scipy.sparse import re def distance_sklearn_metrics(z, k=4, metric='euclidean'): """Compute exact pairwise distances.""" d = sklearn.metrics.pairwise.pairwise_distances( z, metric=metric) # k-NN graph. idx = np.argsort(d)[:, 1:k+1] d.sort() d = d[:, 1:k+1] return d, idx def adjacency(dist, idx): """Return the adjacency matrix of a kNN graph.""" M, k = dist.shape assert M, k == idx.shape assert dist.min() >= 0 # Weights. sigma2 = np.mean(dist[:, -1])**2 dist = np.exp(- dist**2 / sigma2) # Weight matrix. I = np.arange(0, M).repeat(k) J = idx.reshape(M*k) V = dist.reshape(M*k) W = scipy.sparse.coo_matrix((V, (I, J)), shape=(M, M)) # No self-connections. W.setdiag(0) # Non-directed graph. bigger = W.T > W W = W - W.multiply(bigger) + W.T.multiply(bigger) assert W.nnz % 2 == 0 assert np.abs(W - W.T).mean() < 1e-10 assert type(W) is scipy.sparse.csr.csr_matrix return W def replace_random_edges(A, noise_level): """Replace randomly chosen edges by random edges.""" M, M = A.shape n = int(noise_level * A.nnz // 2) indices = np.random.permutation(A.nnz//2)[:n] rows = np.random.randint(0, M, n) cols = np.random.randint(0, M, n) vals = np.random.uniform(0, 1, n) assert len(indices) == len(rows) == len(cols) == len(vals) A_coo = scipy.sparse.triu(A, format='coo') assert A_coo.nnz == A.nnz // 2 assert A_coo.nnz >= n A = A.tolil() for idx, row, col, val in zip(indices, rows, cols, vals): old_row = A_coo.row[idx] old_col = A_coo.col[idx] A[old_row, old_col] = 0 A[old_col, old_row] = 0 A[row, col] = 1 A[col, row] = 1 A.setdiag(0) A = A.tocsr() A.eliminate_zeros() return A class TextDataset(object): def clean_text(self, num='substitute'): # TODO: stemming, lemmatisation for i,doc in enumerate(self.documents): # Digits. if num == 'spell': doc = doc.replace('0', ' zero ') doc = doc.replace('1', ' one ') doc = doc.replace('2', ' two ') doc = doc.replace('3', ' three ') doc = doc.replace('4', ' four ') doc = doc.replace('5', ' five ') doc = doc.replace('6', ' six ') doc = doc.replace('7', ' seven ') doc = doc.replace('8', ' eight ') doc = doc.replace('9', ' nine ') elif num == 'substitute': # All numbers are equal. Useful for embedding # (countable words) ? doc = re.sub('(\\d+)', ' NUM ', doc) elif num == 'remove': # Numbers are uninformative (they are all over the place). # Useful for bag-of-words ? # But maybe some kind of documents contain more numbers, # e.g. finance. # Some documents are indeed full of numbers. At least # in 20NEWS. doc = re.sub('[0-9]', ' ', doc) # Remove everything except a-z characters and single space. doc = doc.replace('$', ' dollar ') doc = doc.lower() doc = re.sub('[^a-z]', ' ', doc) doc = ' '.join(doc.split()) # same as # doc = re.sub('\s{2,}', ' ', doc) self.documents[i] = doc def vectorize(self, **params): # TODO: count or tf-idf. Or in normalize ? vectorizer = sklearn.feature_extraction.text.CountVectorizer(**params) self.data = vectorizer.fit_transform(self.documents) self.vocab = vectorizer.get_feature_names() assert len(self.vocab) == self.data.shape[1] def keep_documents(self, idx): """Keep the documents given by the index, discard the others.""" self.documents = [self.documents[i] for i in idx] self.labels = self.labels[idx] self.data = self.data[idx,:] def keep_words(self, idx): """Keep the documents given by the index, discard the others.""" self.data = self.data[:,idx] self.vocab = [self.vocab[i] for i in idx] try: self.embeddings = self.embeddings[idx,:] except AttributeError: pass def remove_short_documents(self, nwords, vocab='selected'): """Remove a document if it contains less than nwords.""" if vocab == 'selected': # Word count with selected vocabulary. wc = self.data.sum(axis=1) wc = np.squeeze(np.asarray(wc)) elif vocab == 'full': # Word count with full vocabulary. wc = np.empty(len(self.documents), dtype=np.int) for i,doc in enumerate(self.documents): wc[i] = len(doc.split()) idx = np.argwhere(wc >= nwords).squeeze() self.keep_documents(idx) def keep_top_words(self, M): """Keep in the vocaluary the M words who appear most often.""" freq = self.data.sum(axis=0) freq = np.squeeze(np.asarray(freq)) idx = np.argsort(freq)[::-1] idx = idx[:M] self.keep_words(idx) def normalize(self, norm='l1'): """Normalize data to unit length.""" # TODO: TF-IDF. data = self.data.astype(np.float64) self.data = sklearn.preprocessing.normalize(data, axis=1, norm=norm) def embed(self, filename=None, size=100): """Embed the vocabulary using pre-trained vectors.""" if filename: model = gensim.models.Word2Vec.load_word2vec_format(filename, binary=True) size = model.vector_size else: class Sentences(object): def __init__(self, documents): self.documents = documents def __iter__(self): for document in self.documents: yield document.split() model = gensim.models.Word2Vec(Sentences(self.documents), size=size) self.embeddings = np.empty((len(self.vocab), size)) keep = [] not_found = 0 for i,word in enumerate(self.vocab): try: self.embeddings[i,:] = model[word] keep.append(i) except KeyError: not_found += 1 self.keep_words(keep) def remove_encoded_images(self, freq=1e3): widx = self.vocab.index('ax') wc = self.data[:,widx].toarray().squeeze() idx = np.argwhere(wc < freq).squeeze() self.keep_documents(idx) class Text20News(TextDataset): def __init__(self, **params): dataset = sklearn.datasets.fetch_20newsgroups(**params) self.documents = dataset.data self.labels = dataset.target self.class_names = dataset.target_names assert max(self.labels) + 1 == len(self.class_names) class Epidemics(_data): # Luana R. Ruiz, rubruiz@seas.upenn.edu, 2021/03/04 def __init__(self, seqLen, seedProb, infectionProb, recoveryTime, nTrain, nValid, nTest, x0 = None, dataType = np.float64, device = 'cpu'): super().__init__() self.seqLen = seqLen self.seedProb = seedProb self.infectionProb = infectionProb self.recoveryTime = recoveryTime self.nTrain = nTrain self.nValid = nValid self.nTest = nTest nSamples = nTrain + nValid + nTest self.dataType = dataType self.device = device self.Adj = self.createGraph() self.N = self.Adj.shape[0] if x0 is not None: self.x0 = x0 else: x0 = np.random.binomial(1,self.seedProb,(nSamples,self.N)) while np.sum(np.sum(x0,axis=1)>0) < nSamples: x0 = np.random.binomial(1,self.seedProb,(nSamples,self.N)) self.x0 = x0 horizon = 2*seqLen x_t = x0 x = np.expand_dims(x_t, axis=1) timeInfection = np.zeros((self.N,nSamples)) for t in range(1,horizon): x_tplus1 = x_t for n in range(nSamples): for i in range(self.N): if x_t[n,i] == 1: for j in list(np.argwhere(self.Adj[i,i:]>0)): if x_t[n,j] == 0: x_tplus1[n,j] == np.random.binomial(1, infectionProb*t/horizon) timeInfection[j,n] = t if timeInfection[i,n]-t >= recoveryTime: x_tplus1[n,i] = 2 x_t = x_tplus1 x = np.concatenate((x,np.expand_dims(x_t, axis=1)),axis=1) y = x[:,seqLen:horizon,:] == 1 x = x[:,:seqLen,:] self.samples['train']['signals'] = x[0:nTrain,:,:] self.samples['train']['targets'] = y[0:nTrain,:,:] self.samples['valid']['signals'] = x[nTrain:nTrain+nValid,:,:] self.samples['valid']['targets'] = y[nTrain:nTrain+nValid,:,:] self.samples['test']['signals'] = x[nTrain+nValid:nSamples,:,:] self.samples['test']['targets'] = y[nTrain+nValid:nSamples,:,:] @staticmethod def createGraph(): edge_list = [] with open('datasets/epidemics/edge_list.txt') as csv_file: csv_reader = csv.reader(csv_file, delimiter='\t') for row in csv_reader: aux_list = [] aux_list.append(int(row[0])-1) aux_list.append(int(row[1])-1) edge_list.append(aux_list) nNodes = max(max(edge_list))+1 Adj = [[0]*nNodes for _ in range(nNodes)] for sink, source in edge_list: Adj[sink][source] = 1 Adj = np.array(Adj) Adj = Adj + np.transpose(Adj) > 0 idx_0 = np.argwhere(np.matmul(Adj,np.ones(nNodes))>0).squeeze() Adj = Adj[idx_0,:] Adj = Adj[:,idx_0] return Adj def evaluate(self, yHat, y, tol = 1e-9): dimensions = len(yHat.shape) C = yHat.shape[dimensions-2] N = yHat.shape[dimensions-1] yHat = yHat.reshape((-1,C,N)) yHat = torch.nn.functional.log_softmax(yHat, dim=1) yHat = torch.exp(yHat) yHat = torch.argmax(yHat,dim=1) yHat = yHat.double() y = y.reshape((-1,N)) tp = torch.sum(y*yHat,1) #tn = torch.sum((1-y)*(1-yHat),1) fp = torch.sum((1-y)*yHat,1) fn = torch.sum(y*(1-yHat),1) p = tp / (tp + fp) r = tp / (tp + fn) idx_p = p!=p idx_tp = tp<tol idx_p1 = idx_p*idx_tp p[idx_p] = 0 p[idx_p1] = 1 idx_r = r!=r idx_r1 = idx_r*idx_tp r[idx_r] = 0 r[idx_r1] = 1 f1 = 2*p*r / (p+r) f1[f1!=f1] = 0 return 1 - torch.mean(f1)
223,644
46.594169
95
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/architecture.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ #import sys import torch import torch.nn as nn import pytorch_lightning as pl from layers import GNNLayer, RGNNLayer import numpy as np # Tangent Bundle Neural Network class TNN(pl.LightningModule): def __init__(self, in_features, L, features, lr, weight_decay, sigma, readout_sigma, kappa, n, loss_function, device): """ Parameters ---------- in_features : Input features L : List of Shift Operators (one per layer) features : List of hidden features lr: optimizer's learning rate weight_decay: Weight decay multiplier sigma : Non-linearity readout_sigma: Non-linearity of the last layer kappa : List of filters order n: Number of manifold points loss_function: Loss function device : device """ super(TNN, self).__init__() self.lr = lr self.n = n self.weight_decay = weight_decay self.L = [l.to(device) for l in L] ops = [] self.sigma = sigma self.readout_sigma = readout_sigma in_features = [in_features] + [features[l] for l in range(len(features))] self.N_layers = len(in_features) self.min_mse_train = 1e20 self.min_mse_val = 1e20 self.loss_fn = loss_function for l in range(self.N_layers-1): if l == self.N_layers-2: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "sigma": self.readout_sigma} else: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "sigma": self.sigma} tnn_layer = GNNLayer(**hparams).to(device) ops.extend([tnn_layer]) self.tnn = nn.Sequential(*ops) def forward(self, x): return self.tnn(x) def training_step(self, batch, batch_idx): try: x, y, mask = batch y_hat = self(x) y_trim = y[mask, :] y_hat_trim = y_hat[mask, :] loss = self.loss_fn(y_hat_trim, y_trim) except: x, y = batch y_hat = self(x) y_trim = y y_hat_trim = y_hat loss = self.loss_fn(y_hat_trim, x) self.mse_train = ((y_trim - y_hat_trim).square()).sum() / self.n self.min_mse_train = min(self.mse_train, self.min_mse_train) self.log('train_mse', self.mse_train, on_step=True, on_epoch=True, prog_bar=True) self.log('train_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def validation_step(self, batch, batch_idx): try: x, y, mask = batch y_hat = self(x) y_trim = y[mask, :] y_hat_trim = y_hat[mask, :] except: x, y = batch y_hat = self(x) y_trim = y y_hat_trim = y_hat loss = self.loss_fn(y_hat_trim, y_trim) self.mse_val = ((y_trim - y_hat_trim).square()).sum() / self.n self.min_mse_val = min(self.mse_val, self.min_mse_val) self.log('test_mse', self.mse_val, on_step=True, on_epoch=True, prog_bar=True) self.log('test_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def test_step(self, batch, batch_idx): return self.validation_step(batch, batch_idx) def training_epoch_end(self, outs): pass def validation_epoch_end(self, outs): pass def configure_optimizers(self): optimizer = torch.optim.Adam( self.parameters(), lr=self.lr, weight_decay=self.weight_decay) return {'optimizer': optimizer, 'monitor': 'train_loss'} # Manifold Neural Network (redundant in our experiments, just to keep things separated) class MNN(pl.LightningModule): def __init__(self, in_features, L, features, lr, weight_decay, sigma, readout_sigma, kappa, n, loss_function, device): """ Parameters ---------- in_features : Input features L : List of Shift Operators (one per layer) features : List of hidden features lr: optimizer's learning rate weight_decay: Weight decay multiplier sigma : Non-linearity readout_sigma: Non-linearity of the last layer kappa : List of filters order n: Number of manifold points loss_function: Loss function device : device """ super(MNN, self).__init__() self.lr = lr self.n = n self.weight_decay = weight_decay self.L = [l.to(device) for l in L] ops = [] self.sigma = sigma self.readout_sigma = readout_sigma in_features = [in_features] + [features[l] for l in range(len(features))] self.N_layers = len(in_features) self.min_mse_train = 1e20 self.min_mse_val = 1e20 self.loss_fn = loss_function for l in range(self.N_layers-1): if l == self.N_layers-2: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "sigma": self.readout_sigma} else: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "sigma": self.sigma} mnn_layer = GNNLayer(**hparams).to(device) ops.extend([mnn_layer]) self.mnn = nn.Sequential(*ops) def forward(self, x): return self.mnn(x) def training_step(self, batch, batch_idx): try: x, y, mask = batch y_hat = self(x) y_trim = y[mask, :] y_hat_trim = y_hat[mask, :] loss = self.loss_fn(y_hat_trim, y_trim) except: x, y = batch y_hat = self(x) y_trim = y y_hat_trim = y_hat loss = self.loss_fn(y_hat_trim, x) self.mse_train = ((y_trim - y_hat_trim).square()).sum() / self.n self.min_mse_train = min(self.mse_train, self.min_mse_train) self.log('train_mse', self.mse_train, on_step=True, on_epoch=True, prog_bar=True) self.log('train_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def validation_step(self, batch, batch_idx): try: x, y, mask = batch y_hat = self(x) y_trim = y[mask, :] y_hat_trim = y_hat[mask, :] except: x, y = batch y_hat = self(x) y_trim = y y_hat_trim = y_hat loss = self.loss_fn(y_hat_trim, y_trim) self.mse_val = ((y_trim - y_hat_trim).square()).sum() / self.n self.min_mse_val = min(self.mse_val, self.min_mse_val) self.log('test_mse', self.mse_val, on_step=True, on_epoch=True, prog_bar=True) self.log('test_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def test_step(self, batch, batch_idx): return self.validation_step(batch, batch_idx) def training_epoch_end(self, outs): pass def validation_epoch_end(self, outs): pass def configure_optimizers(self): optimizer = torch.optim.Adam( self.parameters(), lr=self.lr, weight_decay=self.weight_decay) return {'optimizer': optimizer, 'monitor': 'train_loss'} # Recurrent Tangent Bundle Neural Network class RTNN(pl.LightningModule): def __init__(self, in_features, time_window, L, lr, weight_decay, sigma, kappa, loss_function, device): """ Parameters ---------- in_features : Input features time_window: Prediction time window L : List of Shift Operators (one per layer) features : List of hidden features lr: optimizer's learning rate weight_decay: Weight decay multiplier sigma : Non-linearity readout_sigma: Non-linearity of the last layer kappa : List of filters order n: Number of manifold points loss_function: Loss function device : device """ super(RTNN, self).__init__() self.lr = lr self.weight_decay = weight_decay self.L = [l.to(device) for l in L] ops = [] self.sigma = sigma self.N_layers = len(in_features) self.min_mse_train = 1e20 self.min_mse_val = 1e20 self.loss_fn = loss_function for l in range(self.N_layers-1): if l == self.N_layers-2: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "time_window": time_window, "sigma": self.sigma} else: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "time_window": time_window, "sigma": self.sigma} rtnn_layer = RGNNLayer(**hparams).to(device) ops.extend([rtnn_layer]) self.rtnn = nn.Sequential(*ops) def forward(self, x): return self.rtnn(x) def training_step(self, batch, batch_idx): xt, xT = batch xT_hat = self(xt).to(self.device).double() loss = self.loss_fn(xT_hat, xT) self.mse_train = ((xT - xT_hat).square()).sum() / np.prod(xT.shape) self.min_mse_train = min(self.mse_train, self.min_mse_train) self.log('train_mse', self.mse_train, on_step=True, on_epoch=True, prog_bar=True) self.log('train_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def validation_step(self, batch, batch_idx): xt, xT = batch xT_hat = self(xt).to(self.device) loss = self.loss_fn(xT_hat, xT) self.mse_val = ((xT - xT_hat).square()).sum() / np.prod(xT.shape) self.min_mse_val = min(self.mse_val, self.min_mse_val) self.log('test_mse', self.mse_val, on_step=True, on_epoch=True, prog_bar=True) self.log('test_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def test_step(self, batch, batch_idx): return self.validation_step(batch, batch_idx) def training_epoch_end(self, outs): pass def validation_epoch_end(self, outs): pass def configure_optimizers(self): optimizer = torch.optim.Adam( self.parameters(), lr=self.lr, weight_decay=self.weight_decay) return {'optimizer': optimizer, 'monitor': 'train_loss'} # Recurrent Manifold Neural Network (redundant in our experiments, just to keep things separated) class RMNN(pl.LightningModule): def __init__(self, in_features, time_window, L, lr, weight_decay, sigma, kappa, loss_function, device): """ Parameters ---------- in_features : Input features time_window: Prediction time window L : List of Shift Operators (one per layer) features : List of hidden features lr: optimizer's learning rate weight_decay: Weight decay multiplier sigma : Non-linearity readout_sigma: Non-linearity of the last layer kappa : List of filters order n: Number of manifold points loss_function: Loss function device : device """ super(RMNN, self).__init__() self.lr = lr self.weight_decay = weight_decay self.L = [l.to(device) for l in L] ops = [] self.sigma = sigma self.N_layers = len(in_features) self.min_mse_train = 1e20 self.min_mse_val = 1e20 self.loss_fn = loss_function for l in range(self.N_layers-1): if l == self.N_layers-2: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "time_window": time_window, "sigma": self.sigma} else: hparams = {"F_in": in_features[l], "F_out": in_features[l+1], "L": self.L[l], "kappa": kappa[l], "device": device, "time_window": time_window, "sigma": self.sigma} simplicial_attention_layer = RGNNLayer(**hparams).to(device) ops.extend([simplicial_attention_layer]) self.rmnn = nn.Sequential(*ops) def forward(self, x): return self.rmnn(x) def training_step(self, batch, batch_idx): xt, xT = batch xT_hat = self(xt).to(self.device).double() loss = self.loss_fn(xT_hat, xT) self.mse_train = ((xT - xT_hat).square()).sum() / np.prod(xT.shape) self.min_mse_train = min(self.mse_train, self.min_mse_train) self.log('train_mse', self.mse_train, on_step=True, on_epoch=True, prog_bar=True) self.log('train_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def validation_step(self, batch, batch_idx): xt, xT = batch xT_hat = self(xt).to(self.device) loss = self.loss_fn(xT_hat, xT) self.mse_val = ((xT - xT_hat).square()).sum() / np.prod(xT.shape) self.min_mse_val = min(self.mse_val, self.min_mse_val) self.log('test_mse', self.mse_val, on_step=True, on_epoch=True, prog_bar=True) self.log('test_loss', loss.item(), on_step=True, on_epoch=True, prog_bar=False) return loss def test_step(self, batch, batch_idx): return self.validation_step(batch, batch_idx) def training_epoch_end(self, outs): pass def validation_epoch_end(self, outs): pass def configure_optimizers(self): optimizer = torch.optim.Adam( self.parameters(), lr=self.lr, weight_decay=self.weight_decay) return {'optimizer': optimizer, 'monitor': 'train_loss'}
15,535
35.384075
97
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/utils.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Clabat """ import torch import scipy.sparse as sp import scipy.sparse.linalg as spl import numpy as np from scipy.linalg import expm import sys # %% Sheaf Laplacian Utils def compute_neighbours(data,epsilon,epsilon_pca, option = 'mean_shift'): n = data.shape[0] X_i_collection = [] neighbours_collection = np.zeros((n,n)) distances_collection = [] complete_distance_collection = [] for point in range(n): x_i = data[point,:] x_i_dists = np.sum((x_i - data)**2,1)**.5 neigh = (x_i_dists > 0.0) *\ (x_i_dists < epsilon_pca**.5) tmp_neigh = data[neigh,:] tmp_dist_trim_scaled_pcs = x_i_dists[neigh]/epsilon_pca**.5 if option == 'point_shift': X_i_collection.append((tmp_neigh - x_i).T) if option == 'mean_shift': X_i_collection.append((tmp_neigh - np.mean(tmp_neigh,0)).T) distances_collection.append(tmp_dist_trim_scaled_pcs) complete_distance_collection.append(x_i_dists/epsilon**.5) neighbours_collection[point,:] = neigh return X_i_collection, distances_collection, neigh, complete_distance_collection def truncated_gaussian_kernel(distances_collection): n = len(distances_collection) D_i_collection = [] for point in range(n): dist = distances_collection[point] kernel_dist = np.sqrt(np.exp(-dist**2)) * (dist < 1.0) * (dist > 0.0) D_i_collection.append(kernel_dist) return D_i_collection def epanechnikov_kernel(distances_collection): n = len(distances_collection) D_i_collection = [] for point in range(n): dist = distances_collection[point] kernel_dist = np.sqrt((1-dist**2)) * (dist < 1.0) * (dist > 0.0) D_i_collection.append(kernel_dist) return D_i_collection def compute_weighted_X_i(X_i_collection,distances_collection,option = 'epanechnikov'): n = len(X_i_collection) B_i_collection = [] if option == 'epanechnikov': D_i_collection = epanechnikov_kernel(distances_collection) if option == 'gaussian': D_i_collection = truncated_gaussian_kernel(distances_collection) for point in range(n): B_i = X_i_collection[point]@np.diag(D_i_collection[point]) B_i_collection.append(B_i) return B_i_collection def local_pca(B_i_collection, gamma): n = len(B_i_collection) U_i_collection = [] dhat_i_collection = [] for point in range(n): U_i,sigma_i,_ = np.linalg.svd(B_i_collection[point],full_matrices=False) U_i_collection.append(U_i) tmp_cumsum = np.sort(np.cumsum(sigma_i)/np.sum(sigma_i)) d_hat_i = np.where(tmp_cumsum>gamma)[0][0]+1 dhat_i_collection.append(d_hat_i) d_hat = int(np.median(dhat_i_collection)) O_i_collection = [] for point in range(n): O_i = U_i_collection[point][:,:d_hat] O_i_collection.append(O_i) return O_i_collection, d_hat def build_S_W(O_i_collection, complete_distance_collection, option = 'gaussian'): n = len(O_i_collection) d_hat = O_i_collection[0].shape[1] S = np.zeros((n*d_hat,n*d_hat)) if option == 'epanechnikov': D_i_collection = epanechnikov_kernel(complete_distance_collection) if option == 'gaussian': D_i_collection = truncated_gaussian_kernel(complete_distance_collection) for point_i in range(n): for point_j in range(n): w_ij = D_i_collection[point_i][point_j]**2 O_ij_tilde = O_i_collection[point_i].T@O_i_collection[point_j] U_i,_,Vt_i = np.linalg.svd(O_ij_tilde,full_matrices=False) O_ij = U_i@Vt_i S[point_i*d_hat:(point_i+1)*d_hat,point_j*d_hat:(point_j+1)*d_hat,]=w_ij*O_ij W = np.array(D_i_collection) return S, (W+W.T)/2 def build_SheafLaplacian(S,W,d_hat,epsilon): D_cal_inv = np.diag(1/np.sum(W,1)) W_1 = D_cal_inv@W@D_cal_inv D_cal_inv_block = np.kron(D_cal_inv, np.eye(d_hat)) S_1 = D_cal_inv_block@S@D_cal_inv_block D_1_cal_inv = np.diag(1/np.sum(W_1,1)) D_1_inv = np.kron(D_1_cal_inv, np.eye(d_hat)) Delta_n = (1/epsilon)*(D_1_inv@S_1 - np.eye(S.shape[0])) Delta_n = expm(Delta_n) #Delta_n[Delta_n < 1e-10] = 0 return Delta_n def get_laplacians(data, epsilon, epsilon_pca, gamma_svd,tnn_or_gnn): if tnn_or_gnn == "tnn": X_i_collection, distances_collection, _, complete_distance_collection = compute_neighbours(data,epsilon,epsilon_pca) B_i_collection = compute_weighted_X_i(X_i_collection,distances_collection) O_i_collection, d_hat = local_pca(B_i_collection, gamma_svd) S,W = build_S_W(O_i_collection, complete_distance_collection) Delta_n = (1/epsilon)*build_SheafLaplacian(S,W, d_hat, epsilon) return Delta_n, S,W,O_i_collection, d_hat, B_i_collection else: Delta_n = build_CloudLaplacian(data, heat_kernel_t = epsilon) return Delta_n def project_data(data,O_i_collection): d_hat = O_i_collection[0].shape[1] data_proj = np.zeros((data.shape[0]*d_hat,1)) for point in range(len(O_i_collection)): if data.shape[1] == d_hat: data_proj[point*d_hat:(point+1)*d_hat,:] = np.expand_dims(data[point,:],1) else: data_proj[point*d_hat:(point+1)*d_hat,:] = np.expand_dims(O_i_collection[point].T@data[point,:],1) return data_proj def topk(input, k, axis=None, ascending=False): if not ascending: input *= -1 ind = np.argsort(input, axis=axis) ind = np.take(ind, np.arange(k), axis=axis) if not ascending: input *= -1 val = np.take_along_axis(input, ind, axis=axis) return val # %% Cloud Laplacian Utils from sklearn.metrics import pairwise_distances # From https://github.com/tegusi/RGCNN def get_pairwise_euclidean_distance_matrix(tensor): """Compute pairwise distance of a tensor. Args: tensor: tensor (batch_size, num_points, num_dims) Returns: pairwise distance: (batch_size, num_points, num_points) """ tensor = torch.tensor(tensor) adj_matrix = torch.cdist(tensor,tensor) return adj_matrix def get_pairwise_distance_matrix(tensor, t): """Compute pairwise distance of a tensor. Args: tensor: tensor (batch_size, num_points, num_dims) t: scalar Returns: pairwise distance: (batch_size, num_points, num_points) """ # t = 10.55 # Average distance of CIFAR10 # t = 10.55**2 # Average distance square of CIFAR10 if len(tensor.shape)== 2: tensor = np.expand_dims(tensor,0) tensor = torch.tensor(tensor) adj_matrix = torch.squeeze(torch.cdist(tensor,tensor)) adj_matrix = torch.square(adj_matrix) adj_matrix = torch.div( adj_matrix, -4*t) adj_matrix = torch.exp(adj_matrix) adj_matrix = adj_matrix.fill_diagonal_(0) # Delete the diagonal elements return adj_matrix def build_CloudLaplacian(imgs, normalize_exp = True, heat_kernel_t = 10, clamp_value=None): adj_matrix = get_pairwise_distance_matrix(imgs, heat_kernel_t) # Remove large values if clamp_value!=None: zero_tensor = torch.zeros(adj_matrix.size()).to('cuda') adj_matrix = torch.where(adj_matrix > clamp_value, adj_matrix, zero_tensor) if normalize_exp: D = torch.sum(adj_matrix, axis=1) # (batch_size,num_points) eye = torch.eye(adj_matrix.size()[0]) D = torch.diag(1 / torch.sqrt(D)) L = (torch.matmul(torch.matmul(D, adj_matrix), D) - eye).numpy() L = expm(-L) else: D = torch.sum(adj_matrix, axis=1) # (batch_size,num_points) # eye = tf.ones_like(D) # eye = tf.matrix_diag(eye) # D = 1 / tf.sqrt(D) D = torch.diag(D) L = (D - adj_matrix).numpy() return L def get_laplacian_from_adj(adj_matrix, normalize = False, heat_kernel_t = 10, clamp_value=None): # Remove small values adj_matrix = torch.square(adj_matrix) adj_matrix = torch.div( adj_matrix, -4*heat_kernel_t) adj_matrix = torch.exp(adj_matrix) adj_matrix = adj_matrix.fill_diagonal_(0) # Delete the diagonal elements if clamp_value!=None: # remove large values zero_tensor = torch.zeros(adj_matrix.size()).to('cuda') adj_matrix = torch.where(adj_matrix < clamp_value, adj_matrix, zero_tensor) if normalize: D = torch.sum(adj_matrix, axis=1) # (batch_size,num_points) eye = torch.eye(adj_matrix.size()[0]).to('cuda') # Juan Modified This D = torch.diag(1 / torch.sqrt(D)) L = eye - torch.matmul(torch.matmul(D, adj_matrix), D) else: D = torch.sum(adj_matrix, axis=1) # (batch_size,num_points) # eye = tf.ones_like(D) # eye = tf.matrix_diag(eye) # D = 1 / tf.sqrt(D) D = torch.diag(D) L = D - adj_matrix L= L.fill_diagonal_(0) return L def get_gau_adj_from_adj(X_unlab, adj_matrix, normalize = False, heat_kernel_t = 10, clamp_value=None): # Remove small values adj_matrix = torch.square(adj_matrix) # adj_matrix = torch.div( adj_matrix, 4*heat_kernel_t) # adj_matrix = torch.div( adj_matrix, 0.4) adj_matrix = torch.div( adj_matrix, 0.2) # adj_matrix= torch.div(adj_matrix, torch.max(adj_matrix)) adj_matrix = torch.exp(-adj_matrix) # adj_matrix= torch.div(adj_matrix, torch.max(adj_matrix)) # e, V = np.linalg.eig(adj_matrix.cpu().detach().numpy()) adj_matrix = adj_matrix.fill_diagonal_(0) # Delete the diagonal elements # adj_matrix= torch.div(adj_matrix, torch.max(adj_matrix)) zero_tensor = torch.zeros(adj_matrix.size()).to('cuda') # adj_matrix = torch.where(adj_matrix < 1e-5, zero_tensor, adj_matrix) if clamp_value!=None: # remove large values zero_tensor = torch.zeros(adj_matrix.size()).to('cuda') adj_matrix = torch.where(adj_matrix < clamp_value, adj_matrix, zero_tensor) # Remove path through obstacles zero_tensor = torch.zeros(adj_matrix.size()).to('cuda') for i in range(X_unlab.shape[0]): for j in range(i+1, X_unlab.shape[0]): x1 = X_unlab[i, 0] y1 = X_unlab[i, 1] x2 = X_unlab[j, 0] y2 = X_unlab[j, 1] kk = (y2 - y1) / (x2 - x1) if (5- x1) * kk + y1 <=10 and (5- x1) * kk + y1 >= 3 and x1 < 5 and x2 > 5: # print(adj_matrix[i, j]) adj_matrix[i, j] = 0 adj_matrix[j, i] = 0 if (5- x1) * kk + y1 <=10 and (5- x1) * kk + y1 >= 3 and x2 < 5 and x1 > 5: # print(adj_matrix[i, j]) adj_matrix[i, j] = 0 adj_matrix[j, i] = 0 if (15- x1) * kk + y1 <= 7 and (15- x1) * kk + y1 >= 0 and x2 < 15 and x1 > 15: # print(adj_matrix[i, j]) adj_matrix[i, j] = 0 adj_matrix[j, i] = 0 if (15- x1) * kk + y1 <= 7 and (15- x1) * kk + y1 >= 0 and x1 < 15 and x2 > 15: # print(adj_matrix[i, j]) adj_matrix[i, j] = 0 adj_matrix[j, i] = 0 return adj_matrix def get_euclidean_laplacian_from_adj(adj_matrix, normalize = False, clamp_value=None): # Remove small values adj_matrix = torch.square(adj_matrix) # adj_matrix = torch.div( adj_matrix, -4*heat_kernel_t) # adj_matrix = torch.exp(adj_matrix) # adj_matrix = adj_matrix.fill_diagonal_(0) # Delete the diagonal elements if clamp_value!=None: zero_tensor = torch.zeros(adj_matrix.size()).to('cuda') adj_matrix = torch.where(adj_matrix < clamp_value, adj_matrix, zero_tensor) if normalize: D = torch.sum(adj_matrix, axis=1) # (batch_size,num_points) eye = torch.eye(adj_matrix.size()[0]).to('cuda') # Juan Modified This D = torch.diag(1 / torch.sqrt(D)) L = eye - torch.matmul(torch.matmul(D, adj_matrix), D) else: D = torch.sum(adj_matrix, axis=1) # (batch_size,num_points) # eye = tf.ones_like(D) # eye = tf.matrix_diag(eye) # D = 1 / tf.sqrt(D) D = torch.diag(D) L = D - adj_matrix return L def projsplx(tensor): hk1 = np.argsort(tensor) vals = tensor[hk1] n = len(vals) Flag = True i = n - 1 while Flag: ti = (torch.sum(vals[i + 1:]) - 1) / (n - i) if ti >= vals[i]: Flag = False that = ti else: i = i - 1 if i == 0: Flag = False that = (torch.sum(vals) - 1) / n vals = torch.nn.functional.relu(vals - that) vals = vals/torch.sum(vals).item() return vals[np.argsort(hk1)]
12,816
36.043353
125
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/mainWindSampling.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ #import warnings #warnings.filterwarnings("ignore") to suppress warnings import sys import pytorch_lightning as pl from pytorch_lightning.callbacks.early_stopping import EarlyStopping import torch from architecture import TNN,MNN from data_util import WindSampling device = torch.device("cuda" if torch.cuda.is_available() else torch.device("cpu")) import numpy as np from utils import get_laplacians, project_data, topk from tensorboard import program import webbrowser import numpy.ma as ma import pickle as pkl # Set Seeds np.random.seed(0) pl.seed_everything(0) # Custom activation function: Identity activation class linear_act(torch.nn.Module): def __init__(self): super(linear_act, self).__init__() def forward(self, x): return x # Open Tensorboard open_tb = 0 # Select Architecture tnn_or_mnn = sys.argv[1] #%% Data Importing with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/data/windfields/data2016.pkl', 'rb') as file: data_all = pkl.load(file) # In the sampling and reconstrunction experiment we take a single day data_all = data_all[0,:,:] # Normalize the coordinates by the nominal earth radius to avoid numerical instability R = 6356.8 data_all[:,:3] = data_all[:,:3]/R # Scale the data to facilitate training data_all[:,3:] = (data_all[:,3:])/(np.max(data_all[:,3:])-np.min(data_all[:,3:]))#-np.min(data_all[:,3:]) n_max = data_all.shape[0] p = 3 # Ambient Space Dimension d = 2 # Manifold Dimension # MonteCarlo Simulation Parameters outer_num_rel = 8 inner_num_rel = 8 num_avg_samples_coll = [100, 150, 200, 300, 400] # 1st Sampling: to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold avg_sample_pctg_coll = [.5, .7, .9] # 2nd Sampling: the actual mask # Architecture Parameters in_features = int((data_all.shape[1]-p)/d) if tnn_or_mnn == 'tnn' or tnn_or_mnn == 'ftnn' else data_all.shape[1]-p features = [8,4,1] # The last number is the output features. The lenght is the number of layers if tnn_or_mnn == "mnn" or tnn_or_mnn == "fmnn": features[-1] = features[-1]*d dense = [] lr = 4e-4 if tnn_or_mnn == "fmnn" or tnn_or_mnn == "ftnn": readout_sigma =linear_act()# torch.nn.Tanh() sigma = linear_act() else: readout_sigma =linear_act()# torch.nn.Tanh() sigma = torch.nn.Tanh() kappa = [2]*len(features) loss_function = torch.nn.MSELoss(reduction = 'sum')#reduction = 'mean' weight_decay = 1e-3 max_epochs = 500 opt_step_per_epoch = 100 # Total optimization steps = step_per_epoch*max_epochs, division useful for loggin # Logging Parameters string = "Wind_Reconstruction" # Experiment Name save_dir_ = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results' # Saving Directory # Sheaf Laplacian Parameters epsilon_pca = .8#.2#n**(-2/(true_d+1))# n^{-2/(d+1)} gamma = .8 epsilon = .5 open_tb = 0 # Opens TensorBoard in the default browser tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string # TB Tracking Folder for num_avg_samples in num_avg_samples_coll: print() print("Testing with average number of points: "+str(num_avg_samples)) print() p_samp = num_avg_samples/n_max for avg_sample_pctg in avg_sample_pctg_coll: print() print("Testing with masking propability: "+str(avg_sample_pctg)) print() min_mse = np.zeros((outer_num_rel,inner_num_rel)) # 1st Sampling for outer_rel in range(outer_num_rel): sampling_set = np.random.binomial(1, p_samp, n_max)>0 data = data_all[sampling_set,-2:] coord = data_all[sampling_set,:3] n = coord.shape[0] print() print("Outer Realization number "+str(outer_rel)+": "+str(n) + " samples!") print() # Build the Sheaf Laplacian if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": Delta_n_numpy, S,W,O_i_collection, d_hat, B_i_collection = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = project_data(data, O_i_collection) else: Delta_n_numpy = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = data # Laplacian Replicating for each layer Delta_n = len(features)*[torch.from_numpy(Delta_n_numpy)] # Net Prameters hparams ={'in_features': in_features,\ 'L': Delta_n,\ 'features': features,\ 'lr': lr,\ 'weight_decay': weight_decay,\ 'sigma': sigma,\ 'readout_sigma': readout_sigma,\ 'kappa': kappa,\ 'n': n,\ 'loss_function': loss_function,\ 'device': device} for inner_rel in range(inner_num_rel): # 2nd Sampling bern = np.random.binomial(1, avg_sample_pctg, n) if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": mask = np.kron(np.ones((1,d)),np.expand_dims(bern,1)).flatten()>0 else: mask = bern > 0 val_mask = mask==0 print() print("Inner Realization number "+str(inner_rel)+": "+str(sum(mask)) + " masked points!") print() # Data and Net Instantiating data_torch = WindSampling(data_proj,mask,opt_step_per_epoch,device) data_torch_val = WindSampling(data_proj,val_mask,1,device) if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": net = TNN(**hparams).to(device) else: net = MNN(**hparams).to(device) train_loader = \ torch.utils.data.DataLoader( data_torch, batch_size=None, batch_sampler=None, shuffle=True, num_workers=0) val_loader =\ torch.utils.data.DataLoader( data_torch_val, batch_size=None, batch_sampler=None, shuffle=False, num_workers=0) logger = pl.loggers.TensorBoardLogger(name=string, save_dir=save_dir_) early_stop_callback = EarlyStopping(monitor="test_mse", min_delta=1e-6, patience=5, verbose=False, mode="min") trainer = pl.Trainer(max_epochs=max_epochs,logger = logger, log_every_n_steps= 1, accelerator='gpu', devices=1, auto_select_gpus=False, callbacks=[early_stop_callback]) trainer.fit(net, train_loader, val_loader) min_mse[outer_rel,inner_rel] = net.min_mse_val min_mse = min_mse[~np.isnan(min_mse).any(axis=1), :] # Removes eventual corrupted runs (divergent, outliers, etc...) to_delete = topk(min_mse,2) # Removes the worst 2 (redundant in case of divergent but not NaN runs with results_aggregator.py) mask = np.logical_or(min_mse == to_delete[0], min_mse == to_delete[1]) min_mse = ma.masked_array(min_mse, mask = mask) try: with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'rb') as file: mse_dic = pkl.load(file) print("Results file already exisisting... Updating!") try: tmp = mse_dic["avg_points"+str(num_avg_samples)] tmp["avg_mask"+str(avg_sample_pctg)] = {"avg_mse":min_mse.mean(),"std_mse": min_mse.std(), "complete_coll": min_mse} mse_dic["avg_points"+str(num_avg_samples)] = tmp except: mse_dic["avg_points"+str(num_avg_samples)] = {"avg_mask"+str(avg_sample_pctg):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print("Updated!") except: print("Results file not found... Creating!") mse_dic = {"avg_points"+str(num_avg_samples):{"avg_mask"+str(avg_sample_pctg):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print(mse_dic) # Tensor Board Monitoring if open_tb: tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', tracking_address]) url = tb.launch() print(f"Tensorflow listening on {url}") webbrowser.open_new(url) input("Press Enter to Exit") """ print("Minimum TEST MSE: "+ str(net.min_mse_val)) print("Misc. Metrics:") print(trainer.callback_metrics) """
9,380
46.619289
171
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/data_util.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Clabat """ import torch import pickle import pandas as pd import numpy as np from collections import defaultdict import torch.nn.functional as F class WindSampling(torch.utils.data.Dataset): def __init__(self, data_proj_numpy, mask, step_per_epoch, device): self.device = device torch_data = torch.from_numpy(data_proj_numpy).to(self.device) self.X = torch.clone(torch_data) self.X[mask,:] = torch.mean(self.X[mask == 0,:]) self.y = torch.clone(torch_data) self.length = data_proj_numpy.shape[0] self.mask = torch.from_numpy(mask).to(self.device) self.step_per_epoch = step_per_epoch def __getitem__(self, index): return self.X, self.y, self.mask def __len__(self): # Returns length return self.step_per_epoch class TorusDenoising(torch.utils.data.Dataset): def __init__(self, data_clean,data_noisy, step_per_epoch, device): self.device = device torch_data_clean = torch.from_numpy(data_clean).to(self.device) torch_data_noisy = torch.from_numpy(data_noisy).to(self.device) self.X = torch.clone(torch_data_noisy) self.y = torch.clone(torch_data_clean) self.step_per_epoch = step_per_epoch def __getitem__(self, index): return self.X, self.y def __len__(self): # Returns length return self.step_per_epoch class WindPrediction(torch.utils.data.Dataset): def __init__(self, data_proj_numpy, time_window, device): self.device = device self.X = torch.from_numpy(data_proj_numpy).to(self.device) self.time_window = time_window def __getitem__(self, idx): x = self.X[idx:(idx+self.time_window),:,:] y = self.X[(idx+self.time_window):(idx+2*self.time_window),:,:] return x,y def __len__(self): return self.X.shape[0]-2*self.time_window
1,956
30.063492
71
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/layers.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ import torch import torch.nn as nn # Graph Convolutional Neural Network Layer class GNNLayer(nn.Module): def __init__(self, F_in, F_out, L, kappa,device, sigma): """ Parameters ---------- F_in: Numer of input signals F_out: Numer of outpu signals L: Shift Operator kappa: Filters order device: Device sigma: non-linearity """ super(GNNLayer, self).__init__() self.K = kappa self.F_in = F_in self.F_out = F_out self.sigma = sigma self.L = L if self.L.type() == 'torch.cuda.DoubleTensor': self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device).double()) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device).double()) else: self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device)) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device)) self.reset_parameters() self.device = device def reset_parameters(self): """Reinitialize learnable parameters""" gain = nn.init.calculate_gain('relu') nn.init.xavier_uniform_(self.W.data, gain=gain) nn.init.xavier_uniform_(self.b.data, gain=gain) def forward(self, x): alpha_zero = torch.clone(self.L) data = torch.clone(x) alpha_k = torch.clone(alpha_zero) try: z_i = alpha_k @ torch.clone(data @ self.W[0]) except: alpha_k = alpha_k.to(data.device) z_i = alpha_k @ torch.clone(data @ self.W[0]) for k in range(1, self.K): alpha_k = alpha_k @ alpha_zero z_i += alpha_k @ data @ self.W[k] out = self.sigma(z_i) return out # Graph Convolutional Neural Network Layer class RGNNLayer(nn.Module): def __init__(self, F_in, F_out, L, kappa,device, sigma, time_window): """ Parameters ---------- F_in: Numer of input signals F_out: Numer of outpu signals L: Shift Operator kappa: Filters order device: Device sigma: non-linearity time_window: Prediction time window """ super(RGNNLayer, self).__init__() self.K = kappa self.F_in = F_in self.F_out = F_out self.sigma = sigma self.time_window = time_window self.L = L if self.L.type() == 'torch.cuda.DoubleTensor': self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device).double()) self.H = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device).double()) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device).double()) else: self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device)) self.H = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device)) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device)) self.reset_parameters() self.device = device def reset_parameters(self): """Reinitialize learnable parameters.""" gain = nn.init.calculate_gain('relu') nn.init.xavier_uniform_(self.W.data, gain=gain) nn.init.xavier_uniform_(self.H.data, gain=gain) nn.init.xavier_uniform_(self.b.data, gain=gain) def forward(self, x): # x is batch_sizeXhow_many_time_slotsXnumber_of_nodesXnumber_of_features alpha_zero = torch.clone(self.L) data = torch.clone(x).to(self.device).double() out = torch.zeros(data.shape) for data_point in range(data.shape[0]): # Batch Loop: inefficient, can be improved with PyTorch Geometric hidden_state = torch.zeros(data.shape[2:]) for t in range(self.time_window): # Time Loop alpha_k = torch.clone(alpha_zero) hidden_state = hidden_state.to(self.device).double() try: z_i = alpha_k @ torch.clone(data[data_point,t,:,:] @ self.W[0]) + alpha_k @ torch.clone(hidden_state @ self.H[0]) except: alpha_k = alpha_k.to(data.device) z_i = alpha_k @ torch.clone(data[data_point,t,:,:] @ self.W[0]) + alpha_k @ torch.clone(hidden_state @ self.H[0]) for k in range(1, self.K): alpha_k = alpha_k @ alpha_zero z_i += alpha_k @ data[data_point,t,:,:] @ self.W[k] + alpha_k @ torch.clone(hidden_state @ self.H[k]) hidden_state = self.sigma(z_i) out[data_point,t,:,:] = hidden_state return out
4,752
37.959016
135
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/result_aggregator.py
import pickle as pkl import sys import numpy as np import numpy.ma as ma string = sys.argv[1] with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/Journal_repo/results/'+string+'/res_tnn.pkl', 'rb') as file: mse_dic_tnn = pkl.load(file) with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/Journal_repo/results/'+string+'/res_mnn.pkl', 'rb') as file: mse_dic_mnn = pkl.load(file) try: assert mse_dic_tnn.keys() == mse_dic_mnn.keys() print("All Sample Sizes Coincide!") keys = mse_dic_tnn.keys() except: print("Not all Sample Sizes Coincide! Using the intersection...") keys = set(mse_dic_tnn.keys()).intersection(set(mse_dic_mnn.keys())) thresh = float(sys.argv[2]) verbose = int(sys.argv[3]) keys_mask = {} for sample_size in keys: try: assert mse_dic_tnn[sample_size].keys() == mse_dic_mnn[sample_size].keys() print("All Masks/Noise Variances Coincide for Sample Size "+sample_size+"!") keys_mask[sample_size] = mse_dic_tnn[sample_size].keys() except: print("Not all Masks/Noise Variances Coincide for Sample Size "+sample_size+"!" +"Using the intersection...") keys_mask[sample_size] = set(mse_dic_tnn[sample_size].keys()).intersection(set(mse_dic_mnn[sample_size].keys())) for mask_size in keys_mask[sample_size]: # Delete Runs over Threshold (Divergent or Badly Trained) if np.sum(mse_dic_tnn[sample_size][mask_size]["complete_coll"]>thresh) > 0: if verbose: print("Architecture: TNN") print("Sample Size:") print(sample_size) print("Mask Size/Noise Variance") print(mask_size) print("Before:") print(mse_dic_tnn[sample_size][mask_size]) tmp = mse_dic_tnn[sample_size][mask_size]["complete_coll"] mask = tmp > thresh min_mse = ma.masked_array(tmp, mask = mask) mse_dic_tnn[sample_size][mask_size]["complete_coll"] = min_mse mse_dic_tnn[sample_size][mask_size]["avg_mse"] = min_mse.mean() mse_dic_tnn[sample_size][mask_size]["std_mse"] = min_mse.std() if verbose: print("After:") print(mse_dic_tnn[sample_size][mask_size]) if np.sum(mse_dic_mnn[sample_size][mask_size]["complete_coll"]>thresh) > 0: if verbose: print("Architecture: MNN") print("Sample Size:") print(sample_size) print("Mask Size/Noise Variance:") print(mask_size) print("Before:") print(mse_dic_mnn[sample_size][mask_size]) tmp = mse_dic_mnn[sample_size][mask_size]["complete_coll"] mask = tmp > thresh min_mse = ma.masked_array(tmp, mask = mask) mse_dic_mnn[sample_size][mask_size]["complete_coll"] = min_mse mse_dic_mnn[sample_size][mask_size]["avg_mse"] = min_mse.mean() mse_dic_mnn[sample_size][mask_size]["std_mse"] = min_mse.std() if verbose: print("After:") print(mse_dic_mnn[sample_size][mask_size]) aggregated_results = {} for sample_size in keys: for mask_size in keys_mask[sample_size]: print("Sample Size:") print(sample_size) print("Mask Size/Noise Variance:") print(mask_size) print("Who's Better?") print("TNN" if mse_dic_tnn[sample_size][mask_size]['avg_mse']<mse_dic_mnn[sample_size][mask_size]['avg_mse']\ else "MNN") tmp = {'avg_mse_tnn': mse_dic_tnn[sample_size][mask_size]['avg_mse'], 'std_mse_tnn':mse_dic_tnn[sample_size][mask_size]['std_mse'],\ 'avg_mse_mnn': mse_dic_mnn[sample_size][mask_size]['avg_mse'], 'std_mse_mnn': mse_dic_mnn[sample_size][mask_size]['std_mse'] } if sample_size in aggregated_results.keys(): aggregated_results[sample_size][mask_size] = tmp else: aggregated_results[sample_size] = {mask_size: tmp} #print(mse_dic_tnn[sample_size][mask_size]) #print(mse_dic_mnn[sample_size][mask_size]) print(tmp) print(aggregated_results.keys())
4,357
46.369565
156
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/mainTorusDenoising.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ #import warnings #warnings.filterwarnings("ignore") to suppress warnings import sys import pickle as pkl import numpy.ma as ma import pytorch_lightning as pl from pytorch_lightning.callbacks.early_stopping import EarlyStopping import torch from architecture import TNN,MNN from data_util import TorusDenoising device = torch.device("cuda" if torch.cuda.is_available() else torch.device("cpu")) import numpy as np from utils import get_laplacians, project_data, topk from tensorboard import program import webbrowser # Set Seeds np.random.seed(0) pl.seed_everything(0) # Custom activation function: Identity activation class linear_act(torch.nn.Module): def __init__(self): super(linear_act, self).__init__() def forward(self, x): return x # Open Tensorboard open_tb = 0 # Select Architecture tnn_or_mnn = sys.argv[1] open_tb = 0 tnn_or_mnn = sys.argv[1] #%% Synthetic Data Generation () res = 100 # The torus will be sampled on a regular grid of res^2 points p = 3 d = 2 # Torus sampling phi = np.linspace(0, 2*np.pi, res) theta = np.linspace(0, 2*np.pi, res) phi, theta = np.meshgrid(phi, theta) phi = phi.flatten() theta = theta.flatten() r = .1 b = .3 x = np.expand_dims((r*np.cos(theta)+b)*np.cos(phi),1) y = np.expand_dims((r*np.cos(theta)+b)*np.sin(phi),1) z = np.expand_dims((r*np.sin(theta)),1) coord_max = np.concatenate((x,y,z),1) # Smooth Tangent vector field on Torus X = np.expand_dims(-np.sin(theta),1) Y = np.expand_dims(np.cos(theta),1) Z = np.expand_dims(np.zeros(len(theta)),1) data_all = np.concatenate((X,Y,Z),1) n_max = data_all.shape[0] # MonteCarlo Simulation Parameters outer_num_rel = 8 inner_num_rel = 8 num_avg_samples_coll = [400]#[100, 200,150,300,450] # 1st Sampling: to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold noise_sds_coll = [7e-2, 1e-1, 3e-1] # 2nd Sampling: the actual mask # Architecture Parameters in_features = int(data_all.shape[1]/d) if tnn_or_mnn == 'tnn' or tnn_or_mnn == 'ftnn' else data_all.shape[1] features = [8,4,1] if tnn_or_mnn == "mnn" or tnn_or_mnn == "fmnn": features[-1] = features[-1]*p dense = [] lr = 1e-3 if tnn_or_mnn == "fmnn" or tnn_or_mnn == "ftnn": readout_sigma = linear_act() sigma = linear_act()#torch.nn.ReLU() else: readout_sigma = linear_act() sigma = torch.nn.Tanh()#torch.nn.ReLU() kappa = [2]*len(features) loss_function = torch.nn.MSELoss(reduction = 'sum') weight_decay = 0.0 step_per_epoch = 100 max_epochs = 500 # Logging Parameters string = "Torus_Denoising" # Experiment Name save_dir_ = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results' # Saving Directory # Sheaf Laplacian Parameters epsilon_pca = .8#.2#n**(-2/(true_d+1))# n^{-2/(d+1)} epsilon = .5 gamma = .8 open_tb = 0 # Opens TensorBoard in the default browser tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string # TB Tracking Folder for num_avg_samples in num_avg_samples_coll: print() print("Testing with average number of points: "+str(num_avg_samples)) print() p_samp = num_avg_samples/n_max for noise_sd in noise_sds_coll: print() print("Testing with noise variance: "+str(noise_sd)) print() min_mse = np.zeros((outer_num_rel,inner_num_rel)) # 1st Sampling (to reduce the initial dimensionality and ensure random sampling -> let us assume that the complete dataset is the complete manifold) for outer_rel in range(outer_num_rel): sampling_set = np.random.binomial(1, p_samp, n_max)>0 data = data_all[sampling_set,:] coord = coord_max[sampling_set,:] n = coord.shape[0] print() print("Outer Realization number "+str(outer_rel)+": "+str(n) + " samples!") print() for inner_rel in range(inner_num_rel): print() print("Inner Realization number "+str(inner_rel)) print() # Adding Noise to Data data_noisy = data + np.random.normal(0.0, noise_sd, size=data.shape) #np.random.normal(0.0, sigma_noise, size=(1,))* # Build the Sheaf Laplacian if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": Delta_n_numpy, S,W,O_i_collection, _, B_i_collection = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = project_data(data, O_i_collection) data_proj_noisy = project_data(data_noisy, O_i_collection) else: Delta_n_numpy = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = data data_proj_noisy = data_noisy # Laplacian Replicating for each layer Delta_n = len(features)*[torch.from_numpy(Delta_n_numpy)] # Net Parameters hparams ={'in_features': in_features,\ 'L': Delta_n,\ 'features': features,\ 'lr': lr,\ 'weight_decay': weight_decay,\ 'sigma': sigma,\ 'readout_sigma': readout_sigma,\ 'kappa': kappa,\ 'n': n,\ 'loss_function': loss_function,\ 'device': device} # Data and Net Instantiating data_torch = TorusDenoising(data_proj,data_proj_noisy, step_per_epoch, device) if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": net = TNN(**hparams).to(device) else: net = MNN(**hparams).to(device) train_loader = \ torch.utils.data.DataLoader( data_torch, batch_size=None, batch_sampler=None, shuffle=True, num_workers=0) logger = pl.loggers.TensorBoardLogger(name=string, save_dir='/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results') early_stop_callback = EarlyStopping(monitor="train_mse", min_delta=1e-6, patience=5, verbose=False, mode="min") trainer = pl.Trainer(max_epochs=max_epochs,logger = logger, log_every_n_steps= 1, accelerator='gpu', devices=1, auto_select_gpus=False, callbacks=[early_stop_callback]) trainer.fit(net, train_loader) min_mse[outer_rel,inner_rel] = net.min_mse_train min_mse = min_mse[~np.isnan(min_mse).any(axis=1), :] # Removes eventual corrupted runs (divergent, outliers, etc...) to_delete = topk(min_mse,2) # Removes the worst 2 (redundant in case of divergent but not NaN runs with results_aggregator.py) mask = np.logical_or(min_mse == to_delete[0], min_mse == to_delete[1]) min_mse = ma.masked_array(min_mse, mask = mask) try: with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'rb') as file: mse_dic = pkl.load(file) print("Results file already exisisting... Updating!") try: tmp = mse_dic["avg_points"+str(num_avg_samples)] tmp["noise_sd"+str(noise_sd)] = {"avg_mse":min_mse.mean(),"std_mse": min_mse.std(), "complete_coll": min_mse} mse_dic["avg_points"+str(num_avg_samples)] = tmp except: mse_dic["avg_points"+str(num_avg_samples)] = {"noise_sd"+str(noise_sd):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print("Updated!") except: print("Results file not found... Creating!") mse_dic = {"avg_points"+str(num_avg_samples):{"noise_sd"+str(noise_sd):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print(mse_dic) # Tensor Board Monitoring if open_tb: tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', tracking_address]) url = tb.launch() print(f"Tensorflow listening on {url}") webbrowser.open_new(url) input("Press Enter to Exit")
9,188
45.64467
172
py