repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/dpn.py | '''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,562 | 34.989899 | 116 | py |
SBM-Transformer | SBM-Transformer-main/code/model_wrapper.py | import torch
import torch.nn as nn
import math
from model import Model
def pooling(inp, mode):
if mode == "CLS":
pooled = inp[:, 0, :]
elif mode == "MEAN":
pooled = inp.mean(dim = 1)
else:
raise Exception()
return pooled
def append_cls(inp, mask, vocab_size):
batch_size = inp.size(0)
cls_id = ((vocab_size - 1) * torch.ones(batch_size, dtype = torch.long, device = inp.device)).long()
cls_mask = torch.ones(batch_size, dtype = torch.float, device = mask.device)
inp = torch.cat([cls_id[:, None], inp[:, :-1]], dim = -1)
mask = torch.cat([cls_mask[:, None], mask[:, :-1]], dim = -1)
return inp, mask
class SCHead(nn.Module):
def __init__(self, config):
super().__init__()
self.pooling_mode = config["pooling_mode"]
self.mlpblock = nn.Sequential(
nn.Linear(config["transformer_dim"], config["transformer_hidden_dim"]),
nn.ReLU(),
nn.Linear(config["transformer_hidden_dim"], config["num_classes"])
)
def forward(self, inp):
seq_score = self.mlpblock(pooling(inp, self.pooling_mode))
return seq_score
class ModelForSC(nn.Module):
def __init__(self, config):
super().__init__()
self.enable_amp = config["mixed_precision"]
self.pooling_mode = config["pooling_mode"]
self.vocab_size = config["vocab_size"]
self.attn_type = config["attn_type"]
self.model = Model(config)
self.seq_classifer = SCHead(config)
def forward(self, input_ids_0, mask_0, label):
with torch.cuda.amp.autocast(enabled = self.enable_amp):
if self.pooling_mode == "CLS":
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
if self.attn_type.startswith("sbm"):
token_out, all_sparsity = self.model(input_ids_0, mask_0)
else:
token_out = self.model(input_ids_0, mask_0)
seq_scores = self.seq_classifer(token_out)
seq_loss = torch.nn.CrossEntropyLoss(reduction = "none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim = -1) == label).to(torch.float32)
outputs = {}
outputs["loss"] = seq_loss
outputs["accu"] = seq_accu
if self.attn_type.startswith("sbm"):
outputs["sparsity"] = torch.mean(torch.stack((all_sparsity)))
return outputs
class SCHeadDual(nn.Module):
def __init__(self, config):
super().__init__()
self.pooling_mode = config["pooling_mode"]
self.mlpblock = nn.Sequential(
nn.Linear(config["transformer_dim"] * 4, config["transformer_hidden_dim"]),
nn.ReLU(),
nn.Linear(config["transformer_hidden_dim"], config["num_classes"])
)
def forward(self, inp_0, inp_1):
X_0 = pooling(inp_0, self.pooling_mode)
X_1 = pooling(inp_1, self.pooling_mode)
seq_score = self.mlpblock(torch.cat([X_0, X_1, X_0 * X_1, X_0 - X_1], dim = -1))
return seq_score
class ModelForSCDual(nn.Module):
def __init__(self, config):
super().__init__()
self.enable_amp = config["mixed_precision"]
self.pooling_mode = config["pooling_mode"]
self.vocab_size = config["vocab_size"]
self.attn_type = config["attn_type"]
self.model = Model(config)
self.seq_classifer = SCHeadDual(config)
def forward(self, input_ids_0, input_ids_1, mask_0, mask_1, label):
with torch.cuda.amp.autocast(enabled = self.enable_amp):
if self.pooling_mode == "CLS":
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
input_ids_1, mask_1 = append_cls(input_ids_1, mask_1, self.vocab_size)
if self.attn_type.startswith("sbm"):
token_out_0, all_sparsity_0 = self.model(input_ids_0, mask_0)
token_out_1, all_sparsity_1 = self.model(input_ids_1, mask_1)
else:
token_out_0 = self.model(input_ids_0, mask_0)
token_out_1 = self.model(input_ids_1, mask_1)
seq_scores = self.seq_classifer(token_out_0, token_out_1)
seq_loss = torch.nn.CrossEntropyLoss(reduction = "none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim = -1) == label).to(torch.float32)
outputs = {}
outputs["loss"] = seq_loss
outputs["accu"] = seq_accu
if self.attn_type.startswith("sbm"):
outputs["sparsity"] = (torch.mean(torch.cat(all_sparsity_0)) + torch.mean(torch.cat(all_sparsity_1)))/2
return outputs
| 4,761 | 35.630769 | 119 | py |
SBM-Transformer | SBM-Transformer-main/code/attention_sbm.py | import torch
import torch.nn as nn
import math
import json
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from fastRG import fastRG
from STE import *
import time
from dgl.nn.functional import edge_softmax
import dgl.function as fn
import dgl.ops as DF
import dgl
@torch.no_grad()
def block_diag(m):
d = m.dim()
n = m.shape[-3]
siz0 = m.shape[:-3]
siz1 = m.shape[-2:]
m2 = m.unsqueeze(-2)
eye = attach_dim(torch.eye(n, device=m.device).unsqueeze(-2), d - 3, 1)
out = (m2 * eye).reshape(
siz0 + torch.Size(torch.tensor(siz1) * n)
)
return out
@torch.no_grad()
def attach_dim(v, n_dim_to_prepend=0, n_dim_to_append=0):
return v.reshape(
torch.Size([1] * n_dim_to_prepend)
+ v.shape
+ torch.Size([1] * n_dim_to_append))
class SBMAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.drop_attn = nn.Dropout(p = config["attention_dropout"])
self.head_dim = config["head_dim"]
self.type = config["sbm_type"]
self.num_head = config["num_head"]
self.num_clusters = config["num_clusters"]
self.clusters = nn.Parameter(torch.empty(self.num_head, self.num_clusters, self.head_dim))
self.proj = nn.Sequential(
nn.Linear(self.head_dim, self.head_dim),
nn.ReLU(),
nn.Linear(self.head_dim, self.head_dim)
)
nn.init.kaiming_normal_(self.clusters)
def forward(self, Q, K, V, mask):
b, h, n, d = Q.shape
_, _, m, _ = V.shape
k = self.num_clusters
dist = torch.matmul(self.clusters, torch.transpose(self.clusters, -1, -2))
# Activation for inter-cluster correlations
S = nn.Softmax(dim=-1)(dist.reshape(self.num_head, self.num_clusters**2)).reshape(self.num_head,k,k).unsqueeze(0).repeat((b,1,1,1))
Qhat = nn.Sigmoid()(torch.matmul(self.proj(Q), self.clusters.transpose(-1, -2))) # Original
Khat = nn.Sigmoid()(torch.matmul(self.proj(K), self.clusters.transpose(-1, -2)))
if self.type == 'fastRG':
src, dst = fastRG(block_diag(Qhat.view(b*h,n,k)),
block_diag(S.view(b*h,k,k)),
block_diag(Khat.view(b*h,n,k)))
graph = dgl.graph((src, dst), num_nodes=b*h*n)
graph.dstdata.update({'v':V.reshape(b*h*n, d)})
edata = DF.v_dot_u(graph, Q.reshape(b*h*n, d), K.reshape(b*h*n, d))
# Compute probs of sampled edges
eprobs = DF.u_dot_v(graph,
Qhat.reshape(b*h*n, k),
torch.matmul(Khat, S.transpose(-1,-2)).reshape(b*h*n,k))
# Pass through STE
edata = EdgeSample.apply(eprobs, edata)
# Compute attention per edge
graph.edata['a'] = edge_softmax(graph, edata, norm_by='dst')
# Attention via Message Passing
graph.update_all(fn.u_mul_e('v','a','m'), fn.sum('m', 'y'))
del src, dst
return graph.dstdata['y'].view(b,h,n,d), torch.sum(torch.ones_like(eprobs))/(b*h*(n**2))
else:
expA = torch.matmul(Qhat, torch.matmul(S, Khat.transpose(-1, -2)))
graph = SampleGraphSparseGraph.apply(expA)
dot = torch.matmul(Q, torch.transpose(K, -2, -1))
dot = dot / math.sqrt(self.head_dim)
dot.masked_fill_(mask[:,None,None,:] == 0, float('-inf')) # first apply user-provided mask
attn = F.normalize(nn.Softmax(dim=-1)(dot)*graph, p=1, dim=-1)
X = torch.matmul(self.drop_attn(attn), V) # apply dropout then matmul
sparsity = torch.sum(graph, dim=(0,-1,-2))/(b*n*m) # head-wise sparsity
return X, sparsity | 4,000 | 33.791304 | 139 | py |
SBM-Transformer | SBM-Transformer-main/code/attention_linformer.py | import torch
import torch.nn as nn
import math
class LinformerAttention(nn.Module):
projection_matrix = None
def __init__(self, config):
super().__init__()
self.num_head = config["num_head"]
self.head_dim = config["head_dim"]
self.linformer_k = config["linformer_k"]
self.seq_len = config["max_seq_len"]
if LinformerAttention.projection_matrix is not None:
self.E = LinformerAttention.projection_matrix
else:
LinformerAttention.projection_matrix = nn.Parameter(torch.Tensor(self.num_head, self.linformer_k, self.seq_len))
torch.nn.init.normal_(LinformerAttention.projection_matrix, std = 0.02)
self.E = LinformerAttention.projection_matrix
def forward(self, Q, K, V, mask):
K = torch.matmul(self.E, K * mask[:, None, :, None])
V = torch.matmul(self.E, V * mask[:, None, :, None])
dot = torch.matmul(Q, torch.transpose(K, -2, -1))
dot = dot / math.sqrt(self.head_dim)
attn = nn.functional.softmax(dot, dim = -1)
X = torch.matmul(attn, V)
return X
def extra_repr(self):
return f'linformer_k={self.linformer_k}'
| 1,203 | 30.684211 | 124 | py |
SBM-Transformer | SBM-Transformer-main/code/attention_nystrom.py | import torch
import torch.nn as nn
import math
class NystromAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.head_dim = config["head_dim"]
self.num_head = config["num_head"]
self.num_landmarks = config["num_landmarks"]
self.seq_len = config["max_seq_len"]
if "inv_coeff_init_option" in config:
self.init_option = config["inv_init_coeff_option"]
else:
self.init_option = "original"
self.use_conv = "conv_kernel_size" in config
if self.use_conv:
self.conv = nn.Conv2d(
in_channels = self.num_head, out_channels = self.num_head,
kernel_size = (config["conv_kernel_size"], 1), padding = (config["conv_kernel_size"] // 2, 0),
bias = False,
groups = self.num_head)
def forward(self, Q, K, V, mask):
Q = Q * mask[:, None, :, None] / math.sqrt(math.sqrt(self.head_dim))
K = K * mask[:, None, :, None] / math.sqrt(math.sqrt(self.head_dim))
if self.num_landmarks == self.seq_len:
attn = torch.nn.functional.softmax(torch.matmul(Q, K.transpose(-1, -2)) - 1e9 * (1 - mask[:, None, None, :]), dim = -1)
X = torch.matmul(attn, V)
else:
Q_landmarks = Q.reshape(-1, self.num_head, self.num_landmarks, self.seq_len // self.num_landmarks, self.head_dim).mean(dim = -2)
K_landmarks = K.reshape(-1, self.num_head, self.num_landmarks, self.seq_len // self.num_landmarks, self.head_dim).mean(dim = -2)
kernel_1 = torch.nn.functional.softmax(torch.matmul(Q, K_landmarks.transpose(-1, -2)), dim = -1)
kernel_2 = torch.nn.functional.softmax(torch.matmul(Q_landmarks, K_landmarks.transpose(-1, -2)), dim = -1)
kernel_3 = torch.nn.functional.softmax(torch.matmul(Q_landmarks, K.transpose(-1, -2)) - 1e9 * (1 - mask[:, None, None, :]), dim = -1)
X = torch.matmul(torch.matmul(kernel_1, self.iterative_inv(kernel_2)), torch.matmul(kernel_3, V))
if self.use_conv:
X += self.conv(V * mask[:, None, :, None])
return X
def iterative_inv(self, mat, n_iter = 6):
I = torch.eye(mat.size(-1), device = mat.device)
K = mat
if self.init_option == "original":
V = 1 / torch.max(torch.sum(K, dim = -2)) * K.transpose(-1, -2)
else:
V = 1 / torch.max(torch.sum(K, dim = -2), dim = -1).values[:, :, None, None] * K.transpose(-1, -2)
for _ in range(n_iter):
KV = torch.matmul(K, V)
V = torch.matmul(0.25 * V, 13 * I - torch.matmul(KV, 15 * I - torch.matmul(KV, 7 * I - KV)))
return V
def extra_repr(self):
return f'num_landmarks={self.num_landmarks}, seq_len={self.seq_len}' | 2,845 | 42.784615 | 145 | py |
SBM-Transformer | SBM-Transformer-main/code/model.py | import torch
import torch.nn as nn
import numpy as np
import math
from torch.utils.checkpoint import checkpoint
from attention import Attention
class Embeddings(nn.Module):
def __init__(self, config):
super().__init__()
assert config["embedding_dim"] == config["transformer_dim"]
self.dim = config["embedding_dim"]
self.word_embeddings = nn.Embedding(config["vocab_size"], config["embedding_dim"])
torch.nn.init.normal_(self.word_embeddings.weight, std = 0.02)
self.position_embeddings = nn.Embedding(config["max_seq_len"], config["embedding_dim"])
torch.nn.init.normal_(self.position_embeddings.weight, std = 0.02)
self.dropout = torch.nn.Dropout(p = config["dropout_prob"])
def fixed_pos_emb(self, seq_len, device):
position = torch.arange(0, seq_len, device = device)[:, np.newaxis]
div_term = torch.exp(torch.arange(0, self.dim, 2, device = device) * -(math.log(10000.0) / self.dim))
pos_embed = torch.stack([torch.sin(position * div_term), torch.cos(position * div_term)], -1).reshape(seq_len, -1)
return pos_embed
def forward(self, input_ids):
batch_size, seq_len = input_ids.size()
X_token = self.word_embeddings(input_ids)
position_ids = torch.arange(seq_len, dtype = torch.long, device = input_ids.device)[None, :].repeat(batch_size, 1)
X_pos = self.position_embeddings(position_ids)
X = X_token + X_pos
X = self.dropout(X)
return X
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.norm1 = nn.LayerNorm(config["transformer_dim"])
self.mha = Attention(config)
self.dropout1 = torch.nn.Dropout(p = config["dropout_prob"])
self.norm2 = nn.LayerNorm(config["transformer_dim"])
self.attn_type = config["attn_type"]
self.mlpblock = nn.Sequential(
nn.Linear(config["transformer_dim"], config["transformer_hidden_dim"]),
nn.GELU(),
torch.nn.Dropout(p = config["dropout_prob"]),
nn.Linear(config["transformer_hidden_dim"], config["transformer_dim"]),
torch.nn.Dropout(p = config["dropout_prob"])
)
def forward(self, X, mask):
if self.attn_type.startswith("sbm"):
out, sparsity = self.mha([self.norm1(X), mask])
X = self.dropout1(out) + X
X = self.mlpblock(self.norm2(X)) + X
return X, sparsity
else:
X = self.dropout1(self.mha([self.norm1(X), mask])) + X
X = self.mlpblock(self.norm2(X)) + X
return X
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.num_layers = config["num_layers"]
self.tied_weights = config["tied_weights"]
self.attn_type = config["attn_type"]
self.embeddings = Embeddings(config)
if self.tied_weights:
self.transformer = Transformer(config)
else:
for idx in range(self.num_layers):
setattr(self, f"transformer_{idx}", Transformer(config))
self.norm = nn.LayerNorm(config["transformer_dim"])
def forward(self, input_ids, mask = None):
X = self.embeddings(input_ids)
if mask is None:
mask = torch.ones_like(input_ids)
if self.attn_type.startswith("sbm"):
all_sparsity = ()
if self.tied_weights:
for idx in range(self.num_layers):
X, sparsity = self.transformer(X, mask)
all_sparsity += (sparsity,)
else:
for idx in range(self.num_layers):
X, sparsity = getattr(self, f"transformer_{idx}")(X, mask)
all_sparsity += (sparsity,)
X = self.norm(X) * mask[:, :, None]
return X, all_sparsity
else:
if self.tied_weights:
for idx in range(self.num_layers):
X = self.transformer(X, mask)
else:
for idx in range(self.num_layers):
X = getattr(self, f"transformer_{idx}")(X, mask)
X = self.norm(X) * mask[:, :, None]
return X
| 4,326 | 33.070866 | 122 | py |
SBM-Transformer | SBM-Transformer-main/code/dataset.py | import torch
import torch.nn as nn
import math
from torch.utils.data.dataset import Dataset
import sys
import os
import random
import json
import pickle
import numpy as np
class LRADataset(Dataset):
def __init__(self, file_path, endless):
self.endless = endless
with open(file_path, "rb") as f:
self.examples = pickle.load(f)
random.shuffle(self.examples)
self.curr_idx = 0
print(f"Loaded {file_path}... size={len(self.examples)}", flush = True)
def __len__(self):
if self.endless:
return 1000000000
else:
return len(self.examples)
def create_inst(self, inst):
output = {}
output["input_ids_0"] = torch.tensor(inst["input_ids_0"], dtype = torch.long)
output["mask_0"] = (output["input_ids_0"] != 0).float()
if "input_ids_1" in inst:
output["input_ids_1"] = torch.tensor(inst["input_ids_1"], dtype = torch.long)
output["mask_1"] = (output["input_ids_1"] != 0).float()
output["label"] = torch.tensor(inst["label"], dtype = torch.long)
return output
def __getitem__(self, i):
if not self.endless:
return self.create_inst(self.examples[i])
if self.curr_idx >= len(self.examples):
random.shuffle(self.examples)
self.curr_idx = 0
inst = self.examples[self.curr_idx]
self.curr_idx += 1
return self.create_inst(inst)
| 1,513 | 29.28 | 89 | py |
SBM-Transformer | SBM-Transformer-main/code/attention_performer.py | import torch
import torch.nn as nn
import math
from performer_pytorch import FastAttention
class PerformerAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.head_dim = config["head_dim"]
self.rp_dim = config["rp_dim"]
self.kernel_type = config["kernel_type"]
if self.kernel_type == "relu":
self.attn_fn = FastAttention(dim_heads = self.head_dim, nb_features = self.rp_dim, causal = False, kernel_fn = nn.ReLU())
elif self.kernel_type == "exp":
self.attn_fn = FastAttention(dim_heads = self.head_dim, nb_features = self.rp_dim, causal = False, kernel_fn = torch.exp)
def forward(self, Q, K, V, mask):
return self.attn_fn(
Q / math.sqrt(math.sqrt(self.head_dim)),
K / math.sqrt(math.sqrt(self.head_dim)) * mask[:, None, :, None],
V * mask[:, None, :, None])
def extra_repr(self):
return f'rp_dim={self.rp_dim}, kernel_type={self.kernel_type}' | 1,003 | 39.16 | 133 | py |
SBM-Transformer | SBM-Transformer-main/code/fastRG.py | import torch
import numpy as np
import time
import torch.nn.functional as F
from torch import LongTensor, Tensor
from typing import Generator, Iterable, List, Optional, Tuple
@torch.no_grad()
def batched_bincount(inp: Tensor, max_num: int):
batch_shape, num_samples = inp.shape[:-1], inp.shape[-1]
num_batch = np.prod(list(batch_shape))
out = torch.zeros(num_batch, max_num+1, device=inp.device, dtype=torch.int)
aux = torch.ones(num_batch, num_samples, device=inp.device, dtype=torch.int)
out = out.scatter_add_(1, inp.view(-1, num_samples), aux).view(*batch_shape, max_num+1)
del aux
return out[..., :-1].int() # cut out last dummy column
###
# weights and m must have equal batch_shape
#
@torch.no_grad()
def batched_multinomial(weights: Tensor, m: Tensor, replacement: bool = False, flatten: bool = False) -> LongTensor:
batch_shape, n_categories = weights.shape[:-1], weights.size(-1)
num_batch = np.prod(list(batch_shape))
num_samples = torch.max(m)
m = m.view(-1).int()
batch_num = len(m)
mask = torch.tensor([0, 1]*batch_num, device=weights.device, dtype=torch.bool)
mask = mask.repeat_interleave(torch.stack([m, num_samples-m], dim=1).view(-1)).view(num_batch, num_samples)
flat_samples = torch.multinomial(
input=weights.view(-1, n_categories),
num_samples=num_samples,
replacement=replacement,
generator=None,
out=None)
out = flat_samples
if flatten:
result = out[~mask]
return result
else:
out[mask] = n_categories
return out
@torch.no_grad()
def fastRG(X, S, Y):
N, K = X.shape
M, _ = Y.shape
device = X.device
# normalize to column-stochastic
X_sum = torch.sum(X, axis=-2, keepdim=True) # [1, K]
Y_sum = torch.sum(Y, axis=-2, keepdim=True) # [1, K]
Xn = (X / X_sum).transpose(-2,-1) # [K, N]
Yn = (Y / Y_sum).transpose(-2,-1) # [K, N]
# gather normalization and sample number of edges
Sn = X_sum.transpose(-1,-2) * S * Y_sum # [K, K]
print(Sn.shape)
m = torch.poisson(torch.sum(Sn, (-1,-2))).int() # [1]
if m == 0:
return torch.tensor([]), torch.tensor([])
# prepare indices
src = torch.zeros(m, dtype=torch.int, device=device) # [n_edges,]
dst = torch.zeros(m, dtype=torch.int, device=device) # [n_edges,]
# sample number of edges for each cluster-cluster pair
logits = torch.flatten(Sn)/Sn.sum()
samples = torch.multinomial(input=logits, num_samples=m, replacement=True)
tabUVs = torch.bincount(samples, minlength=K*K).view(K,K)
blockDegreesU = torch.sum(tabUVs, axis=-1) # [K,]
blockDegreesV = torch.sum(tabUVs, axis=-2) # [K,]
src = batched_multinomial(Xn, blockDegreesU, replacement=True, flatten=True)
dst = batched_multinomial(Yn, blockDegreesV, replacement=True, flatten=True)
del X_sum, Y_sum, Xn, Yn
del Sn, m, logits, samples, tabUVs
del blockDegreesU, blockDegreesV
return src, dst
###
# Inputs
# - X: [B H N K] tensor
# - S: [B H K K] tensor
# - Y: [B H M K] tensor
# Outputs
# - Mask: [BxHxN BxHxM] block-diagonal sparse binary tensor
# - NOTE1: Bipartite graph Mask[b,h,:,:] has expectation X[b,h]*S[b,h]*Y[b,h].T
# - NOTE2: Similar formatting used in torch-geometric
#
@torch.no_grad()
def fastRG_batched(X, S, Y):
B, H, N, K = X.shape
_, _, M, _ = Y.shape
device = X.device
# normalize to column-stochastic
X_sum = torch.sum(X, axis=-2, keepdim=True) # [B, H, 1, K]
Y_sum = torch.sum(Y, axis=-2, keepdim=True) # [B, H, 1, K]
Xn = (X / X_sum).transpose(-2,-1)
Yn = (Y / Y_sum).transpose(-2,-1)
Xn_flat = Xn.reshape(B*H*K, N)
Yn_flat = Yn.reshape(B*H*K, M)
# gather normalization and sample number of edges
Sn = X_sum.transpose(-1,-2) * S * Y_sum # [B, H, K, K]
m = torch.poisson(torch.sum(Sn, (-1,-2))).int() # [B, H]
m_sum = m.sum()
m_flat = m.view(-1) # [B*H,]
# prepare indices
indices = torch.zeros(2, m_sum, dtype=torch.int, device=device) # [2, sum(n_edges)]
# sample number of edges for each cluster-cluster pair
logits = torch.flatten(Sn,start_dim=2)/torch.sum(Sn,(-1,-2)).unsqueeze(2)
sample = batched_multinomial(logits, m, replacement=True)
tabUVs = batched_bincount(sample, K*K).reshape(B, H, K, K)
tabUVs_flat = tabUVs.reshape(B*H*K*K)
mapping = torch.arange(0,B*H*K*K).reshape(B,H,K,K).transpose(-1,-2).reshape(B*H*K*K)
permuted = tabUVs_flat[mapping]
nnz = torch.nonzero(permuted).squeeze()
nums = permuted[nnz]
sorted_heads = tabUVs_flat.cumsum(dim=0)[(mapping[nnz]-1)] ### BOTTLENECK
sorted_heads[0] = 0
setup = sorted_heads.repeat_interleave(nums, output_size=m_sum)
begin_idxes = nums.cumsum(dim=0).roll(1)
begin_idxes[0] = 0
result = torch.arange(nums.sum(), device=device) - begin_idxes.repeat_interleave(nums, output_size=m_sum)
ofs = result + setup
blockDegreesU = torch.sum(tabUVs, axis=-1) # [B, H, K]
blockDegreesV = torch.sum(tabUVs, axis=-2) # [B, H, K]
blockDegreesU_flat = blockDegreesU.view(B*H*K)
blockDegreesV_flat = blockDegreesV.view(B*H*K)
indices[0,:] = batched_multinomial(Xn_flat, blockDegreesU_flat, replacement=True, flatten=True)
indices[1,:] = batched_multinomial(Yn_flat, blockDegreesV_flat, replacement=True, flatten=True)
indices[1,:] = indices[1,ofs]
b_flat = torch.arange(B, device=device, dtype=torch.int)[:, None].expand(B, H).reshape(B*H)
h_flat = torch.arange(H, device=device, dtype=torch.int)[None, :].expand(B, H).reshape(B*H)
bh_offset_flat_edgewise = torch.cat([torch.ones(e, device=device, dtype=torch.int) * (H * b + h) for b, h, e in zip(b_flat, h_flat, m_flat)])
indices[0].add_(N*bh_offset_flat_edgewise)
indices[1].add_(M*bh_offset_flat_edgewise)
## construct attention-mask and return
return indices[0], indices[1] | 5,974 | 34.147059 | 145 | py |
SBM-Transformer | SBM-Transformer-main/code/attention_linear.py | import torch
import torch.nn as nn
import math
class LinearAttention(nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, Q, K, V, mask):
Q = (nn.functional.elu(Q) + 1) / math.sqrt(math.sqrt(Q.size(2)))
K = (nn.functional.elu(K) + 1) * mask[:, None, :, None] / math.sqrt(math.sqrt(K.size(2)))
V = V * mask[:, None, :, None]
X = torch.matmul(Q, torch.matmul(torch.transpose(K, -2, -1), V))
return X
| 483 | 25.888889 | 97 | py |
SBM-Transformer | SBM-Transformer-main/code/STE.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class SampleGraphSparseGraph(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
A = torch.bernoulli(torch.clamp(input+0.01, min=0, max=1)).requires_grad_(True)
ctx.save_for_backward(A)
return A
def backward(ctx, grad_output):
A, = ctx.saved_tensors
return F.hardtanh(A*grad_output) | 424 | 29.357143 | 87 | py |
SBM-Transformer | SBM-Transformer-main/code/attention_reformer.py | import torch
import torch.nn as nn
from reformer_pytorch import LSHSelfAttention
class LSHAttention(LSHSelfAttention):
def __init__(self, config, query, key, value):
self.num_hash = config["num_hash"]
self.attention_head_size = config["head_dim"]
self.num_attention_heads = config["num_head"]
self.seq_len = config["max_seq_len"]
self.hidden_size = config["transformer_dim"]
super().__init__(self.hidden_size,
heads = self.num_attention_heads,
n_hashes=self.num_hash,
return_attn = False)
def forward(self, X, mask):
out = super().forward(X, input_mask = mask.bool())
return out
| 730 | 33.809524 | 58 | py |
SBM-Transformer | SBM-Transformer-main/code/attention.py | import torch
import torch.nn as nn
import math
import json
from torch.utils.checkpoint import checkpoint
class SoftmaxAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.drop_attn = torch.nn.Dropout(p = config["attention_dropout"])
self.head_dim = config["head_dim"]
def forward(self, Q, K, V, mask):
dot = torch.matmul(Q, torch.transpose(K, -2, -1))
dot = dot / math.sqrt(self.head_dim)
dot = dot - 1e6 * (1 - mask[:, None, None, :])
attn = nn.functional.softmax(dot, dim = -1)
attn = self.drop_attn(attn)
X = torch.matmul(attn, V)
return X
class NoneAttention(nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, Q, K, V, mask):
return V
class Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.grad_checkpointing = config["attention_grad_checkpointing"]
self.dim = config["transformer_dim"]
self.head_dim = config["head_dim"]
self.num_head = config["num_head"]
self.attn_type = config["attn_type"]
self.W_q = nn.Linear(self.dim, self.num_head * self.head_dim)
self.W_k = nn.Linear(self.dim, self.num_head * self.head_dim)
self.W_v = nn.Linear(self.dim, self.num_head * self.head_dim)
if self.attn_type == "softmax":
self.attn = SoftmaxAttention(config)
elif self.attn_type == "none":
self.attn = NoneAttention(config)
elif self.attn_type.startswith("sbm"):
from attention_sbm import SBMAttention
self.attn = SBMAttention(config)
elif self.attn_type.startswith("linformer"):
from attention_linformer import LinformerAttention
self.attn = LinformerAttention(config)
elif self.attn_type.startswith("reformer"):
from attention_reformer import LSHAttention
self.attn = LSHAttention(config, self.W_q, self.W_k, self.W_v)
elif self.attn_type.startswith("nystrom"):
from attention_nystrom import NystromAttention
self.attn = NystromAttention(config)
elif self.attn_type.startswith("performer"):
from attention_performer import PerformerAttention
self.attn = PerformerAttention(config)
elif self.attn_type.startswith("linear"):
from attention_linear import LinearAttention
self.attn = LinearAttention(config)
self.ff = nn.Linear(self.num_head * self.head_dim, self.dim)
def forward(self, inputs):
X, mask = inputs
if self.attn_type.startswith("longformer") or self.attn_type.startswith("reformer"):
with torch.cuda.amp.autocast(enabled = False):
attn_out = self.attn(X.float(), mask.float())
elif self.attn_type.startswith("sbm"):
Q = self.split_heads(self.W_q(X))
K = self.split_heads(self.W_k(X))
V = self.split_heads(self.W_v(X))
with torch.cuda.amp.autocast(enabled = False):
if self.grad_checkpointing:
attn_out, sparsity = checkpoint(self.attn, Q.float(), K.float(), V.float(), mask.float())
else:
attn_out, sparsity = self.attn(Q.float(), K.float(), V.float(), mask.float())
attn_out = self.combine_heads(attn_out)
out = self.ff(attn_out)
return out, sparsity
else:
Q = self.split_heads(self.W_q(X))
K = self.split_heads(self.W_k(X))
V = self.split_heads(self.W_v(X))
with torch.cuda.amp.autocast(enabled = False):
if self.grad_checkpointing:
attn_out = checkpoint(self.attn, Q.float(), K.float(), V.float(), mask.float())
else:
attn_out = self.attn(Q.float(), K.float(), V.float(), mask.float())
attn_out = self.combine_heads(attn_out)
out = self.ff(attn_out)
return out
def combine_heads(self, X):
X = X.transpose(1, 2)
X = X.reshape(X.size(0), X.size(1), self.num_head * self.head_dim)
return X
def split_heads(self, X):
X = X.reshape(X.size(0), X.size(1), self.num_head, self.head_dim)
X = X.transpose(1, 2)
return X
| 4,387 | 35.87395 | 109 | py |
SBM-Transformer | SBM-Transformer-main/code/run_tasks.py | from model_wrapper import ModelForSC, ModelForSCDual
from dataset import LRADataset
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import datetime
import time
import os
import json
import requests
import pickle
import numpy as np
import argparse
import math
import gc
import itertools
import lra_config
parser = argparse.ArgumentParser()
parser.add_argument("--task", type = str, help = "task", dest = "task", required = True)
parser.add_argument("--model", type = str, help = "model", dest = "model", required = True)
parser.add_argument("--skip_train", type = int, help = "skip_train", dest = "skip_train", default = 0)
parser.add_argument("--num_clusters", type = int, help = "num_clusters", dest = "num_clusters", required = False, default=128)
parser.add_argument("--sbm_type", type = str, help = "sbm_type", dest = "sbm_type", required = False, default='BMS+SB')
parser.add_argument("--sparsity_weight", type = float, help = "sparsity_weight", dest = "sparsity_weight", required = False, default = 0)
args = parser.parse_args()
print(args)
attn_type = args.model
task = args.task
sw = args.sparsity_weight
num_clusters = args.num_clusters
checkpoint_dir = "../logs/"
print(lra_config.config[task]["extra_attn_config"].keys(), flush = True)
### Get default configs
model_config = lra_config.config[task]["model"]
model_config.update(lra_config.config[task]["extra_attn_config"][attn_type])
model_config["mixed_precision"] = True
model_config["attn_type"] = attn_type
model_config["max_seq_len"] = int(2 ** math.ceil(math.log2(model_config["max_seq_len"])))
if task == "text":
model_config["max_seq_len"] = 3072
model_config["num_clusters"] = args.num_clusters
model_config["sbm_type"] = args.sbm_type
model_config["sparsity_weight"] = args.sparsity_weight
training_config = lra_config.config[task]["training"]
gpu_memory_config = lra_config.config[task]["gpu_memory"]
device_ids = list(range(torch.cuda.device_count()))
print(f"GPU list: {device_ids}")
slack_msg = json.dumps([model_config, training_config], indent=4)
print(json.dumps([model_config, training_config], indent = 4))
if task == "retrieval":
model = ModelForSCDual(model_config)
else:
model = ModelForSC(model_config)
print(model)
print(f"parameter_size: {[weight.size() for weight in model.parameters()]}", flush = True)
print(f"num_parameter: {np.sum([np.prod(weight.size()) for weight in model.parameters()])}", flush = True)
model = model.cuda()
model = nn.DataParallel(model, device_ids = device_ids)
ds_iter = {
"train":enumerate(DataLoader(LRADataset(f"../datasets/{task}.train.pickle", True), batch_size = training_config["batch_size"], drop_last = True)),
"dev":enumerate(DataLoader(LRADataset(f"../datasets/{task}.dev.pickle", True), batch_size = training_config["batch_size"], drop_last = True)),
"test":enumerate(DataLoader(LRADataset(f"../datasets/{task}.test.pickle", False), batch_size = training_config["batch_size"], drop_last = True)),
}
optimizer = torch.optim.AdamW(
model.parameters(),
lr = training_config["learning_rate"],
betas = (0.9, 0.999), eps = 1e-6, weight_decay = training_config["weight_decay"]
)
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer = optimizer,
max_lr = training_config["learning_rate"],
pct_start = training_config["warmup"] / training_config["num_train_steps"],
anneal_strategy = training_config["lr_decay"],
total_steps = training_config["num_train_steps"]
)
amp_scaler = torch.cuda.amp.GradScaler() if model_config["mixed_precision"] else None
def step(component, step_idx):
t0 = time.time()
optimizer.zero_grad()
_, batch = next(ds_iter[component])
for key in batch:
batch[key] = batch[key].cuda()
if component == "train":
outputs = {}
partial_inputs_list = [{} for _ in range(accumu_steps)]
for key in batch:
for idx, inp in enumerate(torch.chunk(batch[key], accumu_steps, dim = 0)):
partial_inputs_list[idx][key] = inp
for partial_inputs in partial_inputs_list:
partial_outputs = model(**partial_inputs)
for key in partial_outputs:
partial_outputs[key] = partial_outputs[key].mean() / accumu_steps
if key not in outputs:
outputs[key] = partial_outputs[key]
else:
outputs[key] += partial_outputs[key]
loss = partial_outputs["loss"]
if attn_type == "sbm":
sparsity = partial_outputs["sparsity"]
amp_scaler.scale(loss + sw*sparsity).backward()
else:
amp_scaler.scale(loss).backward()
amp_scaler.step(optimizer)
amp_scaler.update()
lr_scheduler.step()
else:
with torch.no_grad():
outputs = {}
partial_inputs_list = [{} for _ in range(accumu_steps)]
for key in batch:
for idx, inp in enumerate(torch.chunk(batch[key], accumu_steps, dim = 0)):
partial_inputs_list[idx][key] = inp
for partial_inputs in partial_inputs_list:
partial_outputs = model(**partial_inputs)
for key in partial_outputs:
partial_outputs[key] = partial_outputs[key].mean() / accumu_steps
if key not in outputs:
outputs[key] = partial_outputs[key]
else:
outputs[key] += partial_outputs[key]
t1 = time.time()
batch_size = batch[list(batch.keys())[0]].size(0)
t_escape = t1 - t0
learning_rate = optimizer.param_groups[0]["lr"]
loss = outputs["loss"].data.item()
accu = outputs["accu"].data.item()
time_since_start = time.time() - init_t
if attn_type == "sbm":
sparsity = outputs["sparsity"].data.item()
print(f"step={step_idx}, tt={time_since_start:.1f}, t={t_escape:.3f}, bs={batch_size}, lr={learning_rate:.6f}, loss={loss:.4f}, sparsity={sparsity:.4f}, accu={accu:.4f}\t\t\t\t", end = "\r", flush = True)
else:
print(f"step={step_idx}, tt={time_since_start:.1f}, t={t_escape:.3f}, bs={batch_size}, lr={learning_rate:.6f}, loss={loss:.4f}, accu={accu:.4f}\t\t\t\t", end = "\r", flush = True)
summary[component]["t"] += t_escape
summary[component]["loss"].append(loss)
summary[component]["accu"].append(accu)
if attn_type == "sbm":
summary[component]["sparsity"].append(sparsity)
def print_summary(summary, save_if_improved, train_step_idx, attn_type, is_test=False):
summary["loss"] = np.mean(summary["loss"])
summary["accu"] = np.mean(summary["accu"])
if attn_type == "sbm":
summary["sparsity"] = np.mean(summary["sparsity"])
print()
if summary["accu"] > summary["best_accu"]:
summary["best_accu"] = summary["accu"]
if save_if_improved:
best_accu = summary["best_accu"]
torch.save({"model_state_dict":model.module.state_dict()}, log_f_path.replace(".log", ".model"))
print(f"best_accu={best_accu}. Saved best model")
summary_round = {"train_step_idx":train_step_idx}
for key in summary:
if type(summary[key]) is str:
summary_round[key] = summary[key]
elif type(summary[key]) is list:
continue
else:
summary_round[key] = round(summary[key], 4)
print(summary_round, flush = True)
log_f.write(json.dumps(summary_round, sort_keys = True) + "\n")
log_f.flush()
if not is_test:
summary["t"] = 0
summary["loss"] = []
summary["accu"] = []
if attn_type == "sbm":
summary["sparsity"] = []
init_t = time.time()
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
log_f_path = os.path.join(checkpoint_dir, f"{task}_{attn_type}_{timestamp}_output.log")
if attn_type == "sbm":
summary = {component:{"t":0, "loss":[], "accu":[], "sparsity":[], "best_accu":0, "component":component} for component in ["train", "dev", "test"]}
else:
summary = {component:{"t":0, "loss":[], "accu":[], "best_accu":0, "component":component} for component in ["train", "dev", "test"]}
log_f = open(log_f_path, "a+")
accumu_steps = max(training_config["batch_size"] // len(device_ids) // gpu_memory_config[attn_type], 1)
print(f"accumu_steps={accumu_steps}")
##### TRAINING SECTION ######
if args.skip_train == 0:
try:
model.train()
for train_step_idx in range(training_config["num_train_steps"]):
outputs = step("train", train_step_idx)
if (train_step_idx + 1) % training_config["eval_frequency"] == 0:
print_summary(summary["train"], False, train_step_idx, attn_type)
model.eval()
for dev_step_idx in range(training_config["num_eval_steps"]):
outputs = step("dev", dev_step_idx)
print_summary(summary["dev"], True, train_step_idx, attn_type)
model.train()
except KeyboardInterrupt as e:
print(e)
checkpoint = torch.load(log_f_path.replace(".log", ".model"), map_location = "cpu")
model.module.load_state_dict(checkpoint["model_state_dict"])
model.eval()
try:
for test_step_idx in itertools.count():
outputs = step("test", test_step_idx)
except StopIteration:
print_summary(summary["test"], False, train_step_idx, attn_type, is_test=True)
| 9,630 | 35.900383 | 212 | py |
cymetric | cymetric-main/tests/test_tfmodels.py | """
Pytest for some tensorflow models.
Requires that `test_pointgen.py` has been run before.
"""
import pytest
import numpy as np
import os as os
#import pickle as pickle
import itertools as it
import tensorflow as tf
tfk = tf.keras
#TODO: Import all metrics and Measures and callbacks and ... then run them.
from cymetric.models.tfhelper import prepare_tf_basis
from cymetric.models.tfmodels import PhiFSModel, FreeModel
from cymetric.models.callbacks import RicciCallback, SigmaCallback, VolkCallback
from cymetric.models.metrics import SigmaLoss, KaehlerLoss, TransitionLoss, \
VolkLoss, RicciLoss, TotalLoss
class TFModel:
def __init__(self, work_dir, tfmodel):
self.cmetrics = [
TotalLoss(), SigmaLoss(), KaehlerLoss(),
TransitionLoss(), VolkLoss(), RicciLoss()]
self.epochs = 1
self.bSize = 64
self.alpha = np.ones(5)
self.norms = np.ones(5)
self.act = 'gelu'
self.units = 64
self.work_dir = work_dir
self.tfmodel = tfmodel
def run_tf_model(self):
basis = self.get_basis(self.work_dir)
data = np.load(os.path.join(self.work_dir, 'dataset.npz'))
kappa = 1/np.mean(data['y_train'][:,-2])
nfold = int(basis['NFOLD'].numpy().real)
nvars = len(data['X_train'][0])
n_out = 1 if self.tfmodel is PhiFSModel else nfold**2
nn = self.get_nn(nvars, n_out)
cb_list = self.get_cbs(data)
# TODO: add all possible arguments.
model = self.tfmodel(nn, basis, alpha=self.alpha, kappa=kappa,
norm=self.norms)
model.compile(custom_metrics=self.cmetrics,
optimizer=tfk.optimizers.Adam())
#Does tracing and training work?
history = model.fit(
data['X_train'], data['y_train'], epochs=self.epochs,
validation_data=(data['X_val'], data['y_val'], data['y_val'][:,-2]),
batch_size=self.bSize, verbose=1, callbacks=cb_list,
sample_weight=data['y_train'][:,-2])
#TODO add more asserts
assert len(list(history.history.keys())) == \
len(cb_list)+2*len(self.cmetrics)
return history.history
def get_nn(self, n_in, n_out):
model = tfk.Sequential()
model.add(tfk.Input(shape=(int(n_in))))
model.add(
tfk.layers.Dense(
self.units,
activation=self.act,
)
)
model.add(tfk.layers.Dense(n_out))
return model
def get_basis(self, work_dir):
fname = os.path.join(work_dir, 'basis.pickle')
basis = np.load(fname, allow_pickle=True)
return prepare_tf_basis(basis)
def get_cbs(self, data):
rcb = RicciCallback((data['X_val'], data['y_val']),
data['val_pullbacks'])
scb = SigmaCallback((data['X_val'], data['y_val']))
volkcb = VolkCallback((data['X_val'], data['y_val']))
return [rcb, scb, volkcb]
@pytest.mark.parametrize("test_model, test_dir",
list(
it.product(
[FreeModel, PhiFSModel],
['fermat', 'fermat_cicy', '533_cicy', '2x2_cicy'],
repeat=1)
)
)
def test_tf_models(test_model, test_dir):
#TODO use fixtures instead.
tfmodel = TFModel(test_dir, test_model)
history = tfmodel.run_tf_model()
# Add some usefull checks
assert history is not None
if __name__ == '__main__':
print('Run pytest from cmd.'
'Requires that pointgen test have been run before') | 3,581 | 33.442308 | 80 | py |
cymetric | cymetric-main/cymetric/models/tfmodels.py | """
A selection of custom tensorflow models for learning
Calabi-Yau metrics using neural networks.
"""
import tensorflow as tf
from cymetric.models.losses import sigma_loss
from cymetric.models.fubinistudy import FSModel
from cymetric.pointgen.nphelper import get_all_patch_degrees, compute_all_w_of_x, get_levicivita_tensor
import numpy as np
tfk = tf.keras
class FreeModel(FSModel):
r"""FreeModel from which all other models inherit.
The training and validation steps are implemented in this class. All
other computational routines are inherited from:
cymetric.models.fubinistudy.FSModel
Example:
Assume that `BASIS` and `data` have been generated with a point
generator.
>>> import tensorflow as tf
>>> import numpy as np
>>> from cymetric.models.tfmodels import FreeModel
>>> from cymetric.models.tfhelper import prepare_tf_basis
>>> tfk = tf.keras
>>> data = np.load('dataset.npz')
>>> BASIS = prepare_tf_basis(np.load('basis.pickle', allow_pickle=True))
set up the nn and FreeModel
>>> nfold = 3
>>> ncoords = data['X_train'].shape[1]
>>> nn = tfk.Sequential(
... [
... tfk.layers.Input(shape=(ncoords)),
... tfk.layers.Dense(64, activation="gelu"),
... tfk.layers.Dense(nfold**2),
... ]
... )
>>> model = FreeModel(nn, BASIS)
next we can compile and train
>>> from cymetric.models.metrics import TotalLoss
>>> metrics = [TotalLoss()]
>>> opt = tfk.optimizers.Adam()
>>> model.compile(custom_metrics = metrics, optimizer = opt)
>>> model.fit(data['X_train'], data['y_train'], epochs=1)
For other custom metrics and callbacks to be tracked, check
:py:mod:`cymetric.models.metrics` and
:py:mod:`cymetric.models.callbacks`.
"""
def __init__(self, tfmodel, BASIS, alpha=None, **kwargs):
r"""FreeModel is a tensorflow model predicting CY metrics.
The output is
.. math:: g_{\text{out}} = g_{\text{NN}}
a hermitian (nfold, nfold) tensor with each float directly predicted
from the neural network.
NOTE:
* The model by default does not train against the ricci loss.
To enable ricci training, set `self.learn_ricci = True`,
**before** the tracing process. For validation data
`self.learn_ricci_val = True`,
can be modified separately.
* The models loss contributions are
1. sigma_loss
2. kaehler loss
3. transition loss
4. ricci loss (disabled)
5. volk loss
* The different losses are weighted with alpha.
* The (FB-) norms for each loss are specified with the keyword-arg
>>> model = FreeModel(nn, BASIS, norm = [1. for _ in range(5)])
* Set kappa to the kappa value of your training data.
>>> kappa = np.mean(data['y_train'][:,-2])
Args:
tfmodel (tfk.model): the underlying neural network.
BASIS (dict): a dictionary containing all monomials and other
relevant information from cymetric.pointgen.pointgen.
alpha ([5//NLOSS], float): Weighting of each loss contribution.
Defaults to None, which corresponds to equal weights.
"""
super(FreeModel, self).__init__(BASIS=BASIS, **kwargs)
self.model = tfmodel
self.NLOSS = 5
# variable or constant or just tensor?
if alpha is not None:
self.alpha = [tf.Variable(a, dtype=tf.float32) for a in alpha]
else:
self.alpha = [tf.Variable(1., dtype=tf.float32) for _ in range(self.NLOSS)]
self.learn_kaehler = tf.cast(True, dtype=tf.bool)
self.learn_transition = tf.cast(True, dtype=tf.bool)
self.learn_ricci = tf.cast(False, dtype=tf.bool)
self.learn_ricci_val = tf.cast(False, dtype=tf.bool)
self.learn_volk = tf.cast(True, dtype=tf.bool)
self.custom_metrics = None
self.kappa = tf.cast(BASIS['KAPPA'], dtype=tf.float32)
self.gclipping = float(5.0)
# add to compile?
self.sigma_loss = sigma_loss(self.kappa, tf.cast(self.nfold, dtype=tf.float32))
def call(self, input_tensor, training=True, j_elim=None):
r"""Prediction of the NN.
.. math:: g_{\text{out}} = g_{\text{NN}}
The additional arguments are included for inheritance reasons.
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
Not used in this model. Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex):
Prediction at each point.
"""
# nn prediction
return self.to_hermitian(self.model(input_tensor, training=training))
def compile(self, custom_metrics=None, **kwargs):
r"""Compiles the model.
kwargs takes any argument of regular `tf.model.compile()`
Example:
>>> model = FreeModel(nn, BASIS)
>>> from cymetric.models.metrics import TotalLoss
>>> metrics = [TotalLoss()]
>>> opt = tfk.optimizers.Adam()
>>> model.compile(custom_metrics = metrics, optimizer = opt)
Args:
custom_metrics (list, optional): List of custom metrics.
See also :py:mod:`cymetric.models.metrics`. If None, no metrics
are tracked during training. Defaults to None.
"""
super(FreeModel, self).compile(**kwargs)
self.custom_metrics = custom_metrics
@property
def metrics(self):
r"""Returns the models metrics including custom metrics.
Returns:
list: metrics
"""
metrics = []
if self._is_compiled:
if self.compiled_loss is not None:
metrics += self.compiled_loss.metrics
if self.compiled_metrics is not None:
metrics += self.compiled_metrics.metrics
if self.custom_metrics is not None:
metrics += self.custom_metrics
for layer in self._flatten_layers():
metrics.extend(layer._metrics)
return metrics
def train_step(self, data):
r"""Train step of a single batch in model.fit().
NOTE:
1. The first epoch will take additional time, due to tracing.
2. Warnings are plentiful. Disable on your own risk with
>>> tf.get_logger().setLevel('ERROR')
3. The conditionals need to be set before tracing.
4. We employ under the hood gradient clipping.
Args:
data (tuple): test_data (x,y, sample_weight)
Returns:
dict: metrics
"""
if len(data) == 3:
x, y, sample_weight = data
else:
sample_weight = None
x, y = data
with tf.GradientTape(persistent=False) as tape:
trainable_vars = self.model.trainable_variables
tape.watch(trainable_vars)
# add other loss contributions.
y_pred = self(x)
if self.learn_kaehler:
cijk_loss = self.compute_kaehler_loss(x)
else:
cijk_loss = tf.zeros_like(x[:, 0])
# cijk_loss = tf.zeros(y.shape[-1], dtype=tf.float32)
if self.learn_transition:
t_loss = self.compute_transition_loss(x)
else:
t_loss = tf.zeros_like(cijk_loss)
if self.learn_ricci:
r_loss = self.compute_ricci_loss(x)
else:
r_loss = tf.zeros_like(cijk_loss)
if self.learn_volk:
# is scalar and not batch vector
volk_loss = self.compute_volk_loss(x, y, y_pred)
else:
volk_loss = tf.zeros_like(cijk_loss)
omega = tf.expand_dims(y[:, -1], -1)
sigma_loss_cont = self.sigma_loss(omega, y_pred)**self.n[0]
total_loss = self.alpha[0]*sigma_loss_cont +\
self.alpha[1]*cijk_loss +\
self.alpha[2]*t_loss +\
self.alpha[3]*r_loss +\
self.alpha[4]*volk_loss
# weight the loss.
if sample_weight is not None:
total_loss *= sample_weight
# Compute gradients
gradients = tape.gradient(total_loss, trainable_vars)
# remove nans and gradient clipping from transition loss.
gradients = [tf.where(tf.math.is_nan(g), 1e-8, g) for g in gradients]
gradients, _ = tf.clip_by_global_norm(gradients, self.gclipping)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Return metrics. NOTE: This interacts badly with any regular MSE
# compiled loss. Make it so that only custom metrics are updated?
self.compiled_metrics.update_state(y, y_pred, sample_weight)
if self.custom_metrics is not None:
loss_dict = {}
loss_dict['loss'] = total_loss
loss_dict['sigma_loss'] = sigma_loss_cont
loss_dict['kaehler_loss'] = cijk_loss
loss_dict['transition_loss'] = t_loss
loss_dict['ricci_loss'] = r_loss
loss_dict['volk_loss'] = volk_loss
# add other loss?
for m in self.custom_metrics:
m.update_state(loss_dict, sample_weight)
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
r"""Same as train_step without the outer gradient tape.
Does *not* update the NN weights.
NOTE:
1. Computes the exaxt same losses as train_step
2. Ricci loss val can be separately enabled with
>>> model.learn_ricci_val = True
3. Requires additional tracing.
Args:
data (tuple): test_data (x,y, sample_weight)
Returns:
dict: metrics
"""
# unpack data
if len(data) == 3:
x, y, sample_weight = data
else:
sample_weight = None
x, y = data
y_pred = self(x)
# add loss contributions
if self.learn_kaehler:
cijk_loss = self.compute_kaehler_loss(x)
else:
cijk_loss = tf.zeros_like(x[:, 0])
# cijk_loss = tf.zeros(y.shape[-1], dtype=tf.float32)
if self.learn_transition:
t_loss = self.compute_transition_loss(x)
else:
t_loss = tf.zeros_like(cijk_loss)
if self.learn_ricci_val or self.learn_ricci:
r_loss = self.compute_ricci_loss(x)
else:
r_loss = tf.zeros_like(cijk_loss)
if self.learn_volk:
volk_loss = self.compute_volk_loss(x, y, y_pred)
else:
volk_loss = tf.zeros_like(cijk_loss)
omega = tf.expand_dims(y[:, -1], -1)
sigma_loss_cont = self.sigma_loss(omega, y_pred)**self.n[0]
total_loss = self.alpha[0]*sigma_loss_cont +\
self.alpha[1]*cijk_loss +\
self.alpha[2]*t_loss +\
self.alpha[3]*r_loss +\
self.alpha[4]*volk_loss
# weight the loss.
if sample_weight is not None:
total_loss *= sample_weight
# Return metrics.
self.compiled_metrics.update_state(y, y_pred, sample_weight)
if self.custom_metrics is not None:
loss_dict = {}
loss_dict['loss'] = total_loss
loss_dict['sigma_loss'] = sigma_loss_cont
loss_dict['kaehler_loss'] = cijk_loss
loss_dict['transition_loss'] = t_loss
loss_dict['ricci_loss'] = r_loss
loss_dict['volk_loss'] = volk_loss
# add other loss?
for m in self.custom_metrics:
m.update_state(loss_dict, sample_weight)
return {m.name: m.result() for m in self.metrics}
@tf.function
def to_hermitian(self, x):
r"""Returns a hermitian tensor.
Takes a tensor of length (-1,nfold**2) and transforms it
into a (-1,nfold,nfold) hermitian matrix.
Args:
x (tensor[(-1,nfold**2), tf.float]): input tensor
Returns:
tensor[(-1,nfold,nfold), tf.float]: hermitian matrix
"""
t1 = tf.reshape(tf.complex(x, tf.zeros_like(x)),
(-1, self.nfold, self.nfold))
up = tf.linalg.band_part(t1, 0, -1)
low = tf.linalg.band_part(1j * t1, -1, 0)
out = up + tf.transpose(up, perm=[0, 2, 1]) - \
tf.linalg.band_part(t1, 0, 0)
return out + low + tf.transpose(low, perm=[0, 2, 1], conjugate=True)
@tf.function
def compute_volk_loss(self, input_tensor, wo, pred=None):
r"""Computes volk loss.
NOTE:
This is an integral over the batch. Thus batch dependent.
.. math::
\mathcal{L}_{\text{vol}_k} = |\int_B g_{\text{FS}} -
\int_B g_{\text{out}}|_n
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
weights (tf.tensor([bSize], tf.float32)): Integration weights.
pred (tf.tensor([bSize, nfold, nfold], tf.complex64), optional):
Prediction from `self(input_tensor)`.
If None will be calculated. Defaults to None.
Returns:
tf.tensor([bSize], tf.float32): Volk loss.
"""
if pred is None:
pred = self(input_tensor)
aux_weights = tf.cast(wo[:, 0] / wo[:, 1], dtype=tf.complex64)
aux_weights = tf.repeat(tf.expand_dims(aux_weights, axis=0), repeats=[len(self.BASIS['KMODULI'])], axis=0)
# pred = tf.repeat(tf.expand_dims(pred, axis=0), repeats=[len(self.BASIS['KMODULI'])], axis=0)
# ks = tf.eye(len(self.BASIS['KMODULI']), dtype=tf.complex64)
# ks = tf.repeat(tf.expand_dims(self.fubini_study_pb(input_tensor), axis=0), repeats=[len(self.BASIS['KMODULI'])], axis=0)
# input_tensor = tf.repeat(tf.expand_dims(input_tensor, axis=0), repeats=[len(self.BASIS['KMODULI'])], axis=0)
# print(input_tensor.shape, pred.shape, ks.shape)
# actual_slopes = tf.vectorized_map(self._calculate_slope, [input_tensor, pred, ks])
ks = tf.eye(len(self.BASIS['KMODULI']), dtype=tf.complex64)
def body(input_tensor, pred, ks, actual_slopes):
f_a = self.fubini_study_pb(input_tensor, ts=ks[len(actual_slopes)])
res = tf.expand_dims(self._calculate_slope([pred, f_a]), axis=0)
actual_slopes = tf.concat([actual_slopes, res], axis=0)
return input_tensor, pred, ks, actual_slopes
def condition(input_tensor, pred, ks, actual_slopes):
return len(actual_slopes) < len(self.BASIS['KMODULI'])
f_a = self.fubini_study_pb(input_tensor, ts=ks[0])
actual_slopes = tf.expand_dims(self._calculate_slope([pred, f_a]), axis=0)
if len(self.BASIS['KMODULI']) > 1:
_, _, _, actual_slopes = tf.while_loop(condition, body, [input_tensor, pred, ks, actual_slopes], shape_invariants=[input_tensor.get_shape(), pred.get_shape(), ks.get_shape(), tf.TensorShape([None, actual_slopes.shape[-1]])])
actual_slopes = tf.reduce_mean(aux_weights * actual_slopes, axis=-1)
loss = tf.reduce_mean(tf.math.abs(actual_slopes - self.slopes)**self.n[4])
# return tf.repeat(tf.expand_dims(loss, axis=0), repeats=[input_tensor.shape[0]], axis=0)
return tf.repeat(tf.expand_dims(loss, axis=0), repeats=[len(wo)], axis=0)
def save(self, filepath, **kwargs):
r"""Saves the underlying neural network to filepath.
NOTE:
Currently does not save the whole custom model.
Args:
filepath (str): filepath
"""
# TODO: save graph? What about Optimizer?
# https://www.tensorflow.org/guide/keras/save_and_serialize#custom_objects
self.model.save(filepath=filepath, **kwargs)
class MultFSModel(FreeModel):
r"""MultFSModel inherits from :py:class:`FreeModel`.
Example:
Is identical to :py:class:`FreeModel`. Replace the model accordingly.
"""
def __init__(self, *args, **kwargs):
r"""MultFSModel is a tensorflow model predicting CY metrics.
The output of this model has the following Ansatz
.. math:: g_{\text{out}} = g_{\text{FS}} (1 + g_{\text{NN}})
with elementwise multiplication and returns a hermitian (nfold, nfold)
tensor.
"""
super(MultFSModel, self).__init__(*args, **kwargs)
def call(self, input_tensor, training=True, j_elim=None):
r"""Prediction of the model.
.. math::
g_{\text{out}; ij} = g_{\text{FS}; ij} (1_{ij} + g_{\text{NN}; ij})
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Not used. Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex):
Prediction at each point.
"""
# nn prediction
nn_cont = self.to_hermitian(self.model(input_tensor, training=training))
# fs metric
fs_cont = self.fubini_study_pb(input_tensor, j_elim=j_elim)
# return g_fs ( 1+ g_NN)
return fs_cont + tf.math.multiply(fs_cont, nn_cont)
class MatrixFSModel(FreeModel):
r"""MatrixFSModel inherits from :py:class:`FreeModel`.
Example:
Is identical to :py:class:`FreeModel`. Replace the model accordingly.
"""
def __init__(self, *args, **kwargs):
r"""MatrixFSModel is a tensorflow model predicting CY metrics.
The output of this model has the following Ansatz
.. math:: g_{\text{out}} = g_{\text{FS}} (1 + g_{\text{NN}})
with matrix multiplication and returns a hermitian (nfold, nfold)
tensor.
"""
super(MatrixFSModel, self).__init__(*args, **kwargs)
def call(self, input_tensor, training=True, j_elim=None):
r"""Prediction of the model.
.. math::
g_{\text{out}; ik} = g_{\text{FS}; ij} (1_{jk} + g_{\text{NN}; jk})
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Not used. Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex):
Prediction at each point.
"""
nn_cont = self.to_hermitian(self.model(input_tensor, training=training))
fs_cont = self.fubini_study_pb(input_tensor, j_elim=j_elim)
return fs_cont + tf.linalg.matmul(fs_cont, nn_cont)
class AddFSModel(FreeModel):
r"""AddFSModel inherits from :py:class:`FreeModel`.
Example:
Is identical to :py:class:`FreeModel`. Replace the model accordingly.
"""
def __init__(self, *args, **kwargs):
r"""AddFSModel is a tensorflow model predicting CY metrics.
The output of this model has the following Ansatz
.. math:: g_{\text{out}} = g_{\text{FS}} + g_{\text{NN}}
and returns a hermitian (nfold, nfold)tensor.
"""
super(AddFSModel, self).__init__(*args, **kwargs)
def call(self, input_tensor, training=True, j_elim=None):
r"""Prediction of the model.
.. math:: g_{\text{out}; ij} = g_{\text{FS}; ij} + g_{\text{NN}; ij}
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Not used. Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64):
Prediction at each point.
"""
nn_cont = self.to_hermitian(self.model(input_tensor, training=training))
fs_cont = self.fubini_study_pb(input_tensor, j_elim=j_elim)
return fs_cont + nn_cont
class PhiFSModel(FreeModel):
r"""PhiFSModel inherits from :py:class:`FreeModel`.
The PhiModel learns the scalar potential correction to some Kaehler metric
to make it the Ricci-flat metric. The Kaehler metric is taken to be the
Fubini-Study metric.
Example:
Is similar to :py:class:`FreeModel`. Replace the nn accordingly.
>>> nn = tfk.Sequential(
... [
... tfk.layers.Input(shape=(ncoords)),
... tfk.layers.Dense(64, activation="gelu"),
... tfk.layers.Dense(1),
... ]
... )
>>> model = PhiFSModel(nn, BASIS)
You have to use this model if you want to remain in the same Kaehler class
specified by the Kaehler moduli.
"""
def __init__(self, *args, **kwargs):
r"""PhiFSModel is a tensorflow model predicting CY metrics.
The output of this model has the following Ansatz
.. math::
g_{\text{out}} = g_{\text{FS}} +
\partial \bar{\partial} \phi_{\text{NN}}
and returns a hermitian (nfold, nfold) tensor. The model is by
defintion Kaehler and thus this loss contribution is by default
disabled. For similar reasons the Volk loss is also disabled if
the last layer does not contain a bias. Otherwise it is required
for successful tracing.
"""
super(PhiFSModel, self).__init__(*args, **kwargs)
# automatic in Phi network
self.learn_kaehler = tf.cast(False, dtype=tf.bool)
def call(self, input_tensor, training=True, j_elim=None):
r"""Prediction of the model.
.. math::
g_{\text{out}; ij} = g_{\text{FS}; ij} + \
partial_i \bar{\partial}_j \phi_{\text{NN}}
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Not used. Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64):
Prediction at each point.
"""
# nn prediction
with tf.GradientTape(persistent=True) as tape1:
tape1.watch(input_tensor)
with tf.GradientTape(persistent=True) as tape2:
tape2.watch(input_tensor)
# Need to disable training here, because batch norm
# and dropout mix the batches, such that batch_jacobian
# is no longer reliable.
phi = self.model(input_tensor, training=False)
d_phi = tape2.gradient(phi, input_tensor)
dd_phi = tape1.batch_jacobian(d_phi, input_tensor)
dx_dx_phi, dx_dy_phi, dy_dx_phi, dy_dy_phi = \
0.25*dd_phi[:, :self.ncoords, :self.ncoords], \
0.25*dd_phi[:, :self.ncoords, self.ncoords:], \
0.25*dd_phi[:, self.ncoords:, :self.ncoords], \
0.25*dd_phi[:, self.ncoords:, self.ncoords:]
dd_phi = tf.complex(dx_dx_phi + dy_dy_phi, dx_dy_phi - dy_dx_phi)
pbs = self.pullbacks(input_tensor, j_elim=j_elim)
dd_phi = tf.einsum('xai,xij,xbj->xab', pbs, dd_phi, tf.math.conj(pbs))
# fs metric
fs_cont = self.fubini_study_pb(input_tensor, pb=pbs, j_elim=j_elim)
# return g_fs + \del\bar\del\phi
return tf.math.add(fs_cont, dd_phi)
def compute_transition_loss(self, points):
r"""Computes transition loss at each point. In the case of the Phi model, we demand that \phi(\lambda^q_i z_i)=\phi(z_i)
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
Returns:
tf.tensor([bSize], tf.float32): Transition loss at each point.
"""
inv_one_mask = self._get_inv_one_mask(points)
patch_indices = tf.where(~inv_one_mask)[:, 1]
patch_indices = tf.reshape(patch_indices, (-1, self.nProjective))
current_patch_mask = self._indices_to_mask(patch_indices)
fixed = self._find_max_dQ_coords(points)
cpoints = tf.complex(points[:, :self.ncoords], points[:, self.ncoords:])
if self.nhyper == 1:
other_patches = tf.gather(self.fixed_patches, fixed)
else:
combined = tf.concat((fixed, patch_indices), axis=-1)
other_patches = self._generate_patches_vec(combined)
other_patches = tf.reshape(other_patches, (-1, self.nProjective))
other_patch_mask = self._indices_to_mask(other_patches)
# NOTE: This will include same to same patch transitions
exp_points = tf.repeat(cpoints, self.nTransitions, axis=-2)
patch_points = self._get_patch_coordinates(exp_points, tf.cast(other_patch_mask, dtype=tf.bool))
real_patch_points = tf.concat((tf.math.real(patch_points), tf.math.imag(patch_points)), axis=-1)
gj = self.model(real_patch_points, training=True)
gi = tf.repeat(self.model(points), self.nTransitions, axis=0)
all_t_loss = tf.math.abs(gi-gj)
all_t_loss = tf.reshape(all_t_loss, (-1, self.nTransitions))
all_t_loss = tf.math.reduce_sum(all_t_loss**self.n[2], axis=-1)
return all_t_loss/(self.nTransitions*self.nfold**2)
def get_kahler_potential(self, points):
r"""Computes the Kahler potential.
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
Returns:
tf.tensor([bSize], tf.float32): Kahler potential.
"""
if self.nProjective > 1:
# we go through each ambient space factor and create the Kahler potential.
cpoints = tf.complex(
points[:, :self.degrees[0]],
points[:, self.ncoords:self.ncoords+self.degrees[0]])
k_fs = self._fubini_study_n_potentials(cpoints, t=self.BASIS['KMODULI'][0])
for i in range(1, self.nProjective):
s = tf.reduce_sum(self.degrees[:i])
e = s + self.degrees[i]
cpoints = tf.complex(points[:, s:e],
points[:, self.ncoords+s:self.ncoords+e])
k_fs_tmp = self._fubini_study_n_potentials(cpoints, t=self.BASIS['KMODULI'][i])
k_fs += k_fs_tmp
else:
cpoints = tf.complex(
points[:, :self.ncoords],
points[:, self.ncoords:2*self.ncoords])
k_fs = self._fubini_study_n_potentials(cpoints, t=self.BASIS['KMODULI'][0])
k_fs += tf.reshape(self.model(points), [-1])
return k_fs
class ToricModel(FreeModel):
r"""ToricModel is the base class of toric CYs and inherits from
:py:class:`FreeModel`.
Example:
Is similar to :py:class:`FreeModel` but requires additional toric_data.
This one can be generated with :py:mod:`cymetric.sage.sagelib`.
>>> #generate toric_data with sage_lib
>>> import pickle
>>> toric_data = pickle.load('toric_data.pickle')
>>> model = ToricModel(nn, BASIS, toric_data=toric_data)
ToricModel does **not** train the underlying neural network. Instead, it
always predicts a generalization of the kaehler metric for toric CYs.
"""
def __init__(self, *args, **kwargs):
r"""ToricModel is the equivalent to
:py:class:~`cymetric.models.fubinistudy.FSModel`.
It will not learn the Ricci-flat metric, but can be used as a baseline
to compare the neural network against.
NOTE:
1. Requires nevertheless a nn in its (kw)args.
2. Requires `toric_data = toric_data` in its kwargs.
"""
if 'toric_data' in kwargs.keys():
self.toric_data = kwargs['toric_data']
del kwargs['toric_data']
self.nfold = self.toric_data['dim_cy']
self.sections = [tf.cast(m, dtype=tf.complex64) for m in self.toric_data['exps_sections']]
self.patch_masks = np.array(self.toric_data['patch_masks'], dtype=bool)
self.glsm_charges = np.array(self.toric_data["glsm_charges"])
self.nPatches = len(self.patch_masks)
self.nProjective = len(self.toric_data["glsm_charges"])
super(ToricModel, self).__init__(*args, **kwargs)
self.kmoduli = self.BASIS['KMODULI']
self.lc = tf.convert_to_tensor(get_levicivita_tensor(self.nfold), dtype=tf.complex64)
self.slopes = self._target_slopes()
def call(self, input_tensor, training=True, j_elim=None):
r"""Computes the equivalent of the pullbacked
Fubini-Study metric at each point in input_tensor.
.. math:: J = t^\alpha J_\alpha
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex):
Prediction at each point.
"""
# FS prediction
return self.fubini_study_pb(input_tensor, j_elim=j_elim)
def fubini_study_pb(self, points, pb=None, j_elim=None, ts=None):
r"""Computes the pullbacked Fubini-Study metric.
NOTE:
The pb argument overwrites j_elim.
.. math::
g_{ij} = \frac{1}{\pi} J_i^a \bar{J}_j^b \partial_a
\bar{\partial}_b \ln |\vec{z}|^2
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
pb (tf.tensor([bSize, nfold, ncoords], tf.float32)):
Pullback at each point. Overwrite j_elim. Defaults to None.
j_elim (tf.tensor([bSize], tf.int64)): index to be eliminated.
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
ts (tf.tensor([len(kmoduli)], tf.complex64)):
Kahler parameters. Defaults to the ones specified at time of point generation
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64):
FS-metric at each point.
"""
if ts is None:
ts = self.BASIS['KMODULI']
# NOTE: Cannot use super since for toric models we have only one toric space, but more than one Kahler modulus
pullbacks = self.pullbacks(points, j_elim=j_elim) if pb is None else pb
cpoints = tf.complex(points[:, :self.ncoords], points[:, self.ncoords:])
Js = self._fubini_study_n_metrics(cpoints, n=0, t=ts[0])
if len(self.kmoduli) != 1:
for i in range(1, len(self.kmoduli)):
Js += self._fubini_study_n_metrics(cpoints, n=i, t=ts[i])
gFSpb = tf.einsum('xai,xij,xbj->xab', pullbacks, Js, tf.math.conj(pullbacks))
return gFSpb
@tf.function
def _fubini_study_n_metrics(self, points, n=None, t=tf.complex(1., 0.)):
r"""Computes the Fubini-Study equivalent on the ambient space for each
Kaehler modulus.
.. math:: g_\alpha = \partial_i \bar\partial_j \ln \rho_\alpha
Args:
points (tf.tensor([bSize, ncoords], tf.complex64)): Points.
n (int, optional): n^th Kahler potential term. Defaults to None.
t (tf.complex, optional): Volume factor. Defaults to 1+0j.
Returns:
tf.tensor([bSize, ncoords, ncoords], tf.complex64):
Metric contribution at each point for t_n.
"""
alpha = 0 if n is None else n
degrees = self.sections[alpha]
ms = tf.math.pow(points[:, tf.newaxis, :], degrees[tf.newaxis, :, :])
ms = tf.math.reduce_prod(ms, axis=int(-1))
mss = ms * tf.math.conj(ms)
kappa_alphas = tf.reduce_sum(mss, int(-1))
zizj = points[:, :, tf.newaxis] * tf.math.conj(points[:, tf.newaxis, :])
J_alphas = float(1.) / zizj
J_alphas = tf.einsum('x,xab->xab', float(1.) / (kappa_alphas**int(2)), J_alphas)
coeffs = tf.einsum('xa,xb,ai,aj->xij', mss, mss, degrees, degrees) - tf.einsum('xa,xb,ai,bj->xij', mss, mss, degrees, degrees)
return J_alphas * coeffs * t/tf.constant(np.pi, dtype=tf.complex64)
def _generate_helpers(self):
"""Additional helper functions."""
self.nTransitions = int(np.max(np.sum(~self.patch_masks, axis=-2)))
self.fixed_patches = self._generate_all_patches()
patch_degrees = get_all_patch_degrees(self.glsm_charges, self.patch_masks)
w_of_x, del_w_of_x, del_w_of_z = compute_all_w_of_x(patch_degrees, self.patch_masks)
self.patch_degrees = tf.cast(patch_degrees, dtype=tf.complex64)
self.transition_coefficients = tf.cast(w_of_x, dtype=tf.complex64)
self.transition_degrees = tf.cast(del_w_of_z, dtype=tf.complex64)
self.patch_masks = tf.cast(self.patch_masks, dtype=tf.bool)
# Not needed; cause transition loss is different
# self.degrees = None <- also only needed for rescaling and patches in FS
self.proj_matrix = None
self._proj_indices = None
return None
def _generate_all_patches(self):
"""Torics only have on hypersurface, thus we can generate all patches"""
# fixed patches will be of shape (ncoords, npatches, nTransitions)
fixed_patches = np.repeat(np.arange(self.nPatches), self.nTransitions)
fixed_patches = np.tile(fixed_patches, self.ncoords)
fixed_patches = fixed_patches.reshape(
(self.ncoords, self.nPatches, self.nTransitions))
for i in range(self.ncoords):
# keep each coordinate fixed and add all patches, where its zero
all_patches = ~self.patch_masks[:, i]
all_indices = np.where(all_patches)[0]
fixed_patches[i, all_indices, 0:len(all_indices)] = all_indices * \
np.ones((len(all_indices), len(all_indices)), dtype=np.int)
return tf.cast(fixed_patches, dtype=tf.int64)
@tf.function
def _get_patch_coordinates(self, points, patch_index):
r"""Goes to a patch specified by patch_index which contains the patch
index for self.patch_degrees and return the coordinates in this patch.
"""
# NOTE: this is different than for regular FS models it takes the patch index as argument, not a mask
degrees = tf.gather(self.patch_degrees, patch_index[:, 0])
scaled_points = points[:, tf.newaxis, :]
scaled_points = tf.math.pow(scaled_points, degrees)
return tf.reduce_prod(scaled_points, axis=-1)
@tf.function
def _mask_to_patch_index(self, mask):
"""Computes the patch index in self.patch_mask of a given patch mask."""
# NOTE: this computes the patch index, not the indices of the patch coordinates.
mask_to_index = tf.math.equal(mask[:, tf.newaxis, :], self.patch_masks)
mask_to_index = tf.reduce_all(mask_to_index, axis=-1)
indices = tf.where(mask_to_index)
return indices[:, 1:]
@tf.function
def compute_transition_loss(self, points):
r"""Computes transition loss at each point.
This function is essentially the same as for `FSModel`. It only differs
in the patch selection. TODO: Unify this approach?
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float)): Points.
Returns:
tf.tensor([bSize], tf.complex): transition loss at each point.
"""
inv_one_mask = self._get_inv_one_mask(points)
current_patch_mask = ~inv_one_mask
current_patch_index = self._mask_to_patch_index(current_patch_mask)
cpoints = tf.complex(points[:, :self.ncoords],
points[:, self.ncoords:])
fixed = self._find_max_dQ_coords(points)
other_patches = tf.gather_nd(
self.fixed_patches,
tf.concat([fixed, current_patch_index], axis=-1))
other_patch_mask = tf.gather(self.patch_masks, other_patches)
other_patch_mask = tf.reshape(other_patch_mask, (-1, self.ncoords))
# NOTE: This will include same to same patch transitions
exp_points = tf.repeat(cpoints, self.nTransitions, axis=-2)
patch_points = self._get_patch_coordinates(exp_points, tf.reshape(other_patches, (-1, 1)))
fixed = tf.reshape(tf.tile(fixed, [1, self.nTransitions]), (-1, self.nhyper))
real_points = tf.concat((tf.math.real(patch_points), tf.math.imag(patch_points)), axis=-1)
gj = self(real_points, training=True, j_elim=fixed)
gi = tf.repeat(self(points), self.nTransitions, axis=0)
current_patch_mask = tf.repeat(
current_patch_mask, self.nTransitions, axis=0)
Tij = self.get_transition_matrix(
patch_points, other_patch_mask, current_patch_mask, fixed)
all_t_loss = tf.math.abs(self.transition_loss_matrices(gj, gi, Tij))
all_t_loss = tf.math.reduce_sum(all_t_loss, axis=[1, 2])
# This should now be nTransitions
all_t_loss = tf.reshape(all_t_loss, (-1, self.nTransitions))
all_t_loss = tf.math.reduce_sum(all_t_loss, axis=-1)
return all_t_loss/(self.nTransitions*self.nfold**2)
@tf.function
def get_transition_matrix(self, points, i_mask, j_mask, fixed):
r"""Computes transition matrix between patch i and j
for each point in points where fixed is the coordinate,
which is being eliminated.
See also: :py:meth:`cymetric.models.FSModel.get_transition_matrix`.
This function is more simplified than the original one as we
compute a basis for all :math:`\partial w_i / \partial z_j` before hand.
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
i_mask (tf.tensor([bSize, ncoords], tf.bool)): Mask of pi-indices.
j_mask (tf.tensor([bSize, ncoords], tf.bool)): Mask of pi-indices.
fixed (tf.tensor([bSize, 1], tf.int64)): Elimination indices.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64): T_ij on the CY.
"""
same_patch = tf.where(tf.math.reduce_all(i_mask == j_mask, axis=-1))
diff_patch = tf.where(~tf.math.reduce_all(i_mask == j_mask, axis=-1))
same_patch = same_patch[:, 0]
diff_patch = diff_patch[:, 0]
n_p = tf.math.reduce_sum(tf.ones_like(fixed[:, 0]))
n_p_red = tf.math.reduce_sum(tf.ones_like(diff_patch))
# reduce non trivial
i_mask_red = tf.gather(i_mask, diff_patch)
j_mask_red = tf.gather(j_mask, diff_patch)
fixed_red = tf.gather(fixed, diff_patch)
points_red = tf.gather(points, diff_patch)
# recompute patch indices
i_patch_indices = self._mask_to_patch_index(i_mask_red)
j_patch_indices = self._mask_to_patch_index(j_mask_red)
# fill tij
tij_indices = tf.concat([fixed_red, i_patch_indices, j_patch_indices], axis=-1)
tij_degrees = tf.gather_nd(self.transition_degrees, tij_indices)
tij_coeff = tf.gather_nd(self.transition_coefficients, tij_indices)
tij_red = tf.math.pow(points_red[:, tf.newaxis, tf.newaxis, :], tij_degrees)
tij_red = tf.multiply(tij_coeff, tf.reduce_prod(tij_red, axis=-1))
tij_red = tf.transpose(tij_red, perm=[0, 2, 1])
# fill tij
tij_eye = tf.eye(self.nfold, batch_shape=[n_p-n_p_red], dtype=tf.complex64)
tij_all = tf.zeros((n_p, self.nfold, self.nfold), dtype=tf.complex64)
tij_all = tf.tensor_scatter_nd_update(tij_all, tf.reshape(diff_patch, (-1, 1)), tij_red)
tij_all = tf.tensor_scatter_nd_update(tij_all, tf.reshape(same_patch, (-1, 1)), tij_eye)
return tij_all
class PhiFSModelToric(ToricModel):
r"""PhiFSModelToric inherits from :py:class:`ToricModel`.
The PhiModel learns the scalar potential correction to some Kaehler metric
to make it the Ricci-flat metric. The Kaehler metric is taken to be a toric
equivalent of the Fubini-Study metric. See also :py:class:`PhiFSModel`.
Example:
Is similar to :py:class:`FreeModel`. Replace the nn accordingly.
>>> nn = tfk.Sequential(
... [
... tfk.layers.Input(shape=(ncoords)),
... tfk.layers.Dense(64, activation="gelu"),
... tfk.layers.Dense(1, use_bias=False),
... ]
... )
>>> model = PhiFSModelToric(nn, BASIS, toric_data = toric_data)
You have to use this model if you want to remain in the same Kaehler class
specified by the Kaehler moduli.
"""
def __init__(self, *args, **kwargs):
r"""PhiFSModelToric is a tensorflow model predicting CY metrics.
The output of this model has the following Ansatz
.. math::
g_{\text{out}} = g_{\text{FS'}} +
\partial \bar{\partial} \phi_{\text{NN}}
and returns a hermitian (nfold, nfold) tensor. The model is by
defintion Kaehler and thus this loss contribution is by default
disabled.
"""
super(PhiFSModelToric, self).__init__(*args, **kwargs)
self.learn_kaehler = tf.cast(False, dtype=tf.bool)
def call(self, input_tensor, training=True, j_elim=None):
r"""Prediction of the model.
.. math::
g_{\text{out}; ij} = g_{\text{FS'}; ij} +
\partial_i \bar{\partial}_j \phi_{\text{NN}}
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Not used. Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64):
Prediction at each point.
"""
# nn prediction
with tf.GradientTape(persistent=True) as tape1:
tape1.watch(input_tensor)
with tf.GradientTape(persistent=True) as tape2:
tape2.watch(input_tensor)
# see comment at other Phi model why training disabled.
phi = self.model(input_tensor, training=False)
d_phi = tape2.gradient(phi, input_tensor)
dd_phi = tape1.batch_jacobian(d_phi, input_tensor)
dx_dx_phi, dx_dy_phi, dy_dx_phi, dy_dy_phi = \
0.25*dd_phi[:, :self.ncoords, :self.ncoords], \
0.25*dd_phi[:, :self.ncoords, self.ncoords:], \
0.25*dd_phi[:, self.ncoords:, :self.ncoords], \
0.25*dd_phi[:, self.ncoords:, self.ncoords:]
dd_phi = tf.complex(dx_dx_phi + dy_dy_phi, dx_dy_phi - dy_dx_phi)
pbs = self.pullbacks(input_tensor, j_elim=j_elim)
dd_phi = tf.einsum('xai,xij,xbj->xab', pbs, dd_phi, tf.math.conj(pbs))
# fs metric
fs_cont = self.fubini_study_pb(input_tensor, pb=pbs, j_elim=j_elim)
# return g_fs + \del\bar\del\phi
return tf.math.add(fs_cont, dd_phi)
def compute_transition_loss(self, points, num_random_scalings=10):
r"""Computes transition loss at each point. In the case of the Phi model, we demand that \phi(\lambda^q_i z_i)=\phi(z_i)
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
num_random_scalings (int): If None, uses scalings for each patch to set one coordinate to one.
If a number, uses this many random scalings for \lambda
Returns:
tf.tensor([bSize], tf.float32): Transition loss at each point.
"""
if num_random_scalings is None:
return super(PhiFSModelToric, self).compute_transition_loss(points)
cpoints = tf.complex(points[:, :self.ncoords], points[:, self.ncoords:])
num_pns = self.glsm_charges.shape[0]
# we scale the lambdas_rand to have abs value in [0.1, 0.9]
scale_factor_rand = tf.cast(tf.random.uniform(minval=0.1, maxval=0.9, shape=(num_random_scalings, num_pns), dtype=tf.float32), dtype=tf.complex64)
scale_factor_rand = tf.repeat(tf.expand_dims(scale_factor_rand, -1), repeats=self.ncoords, axis=-1)
# real and imaginary part of random lambdas (we draw a different one for each ambient P^n)
lambdas_rand = tf.random.uniform(minval=-1, maxval=1, shape=(num_random_scalings, num_pns, 2), dtype=tf.float32)
lambdas_rand = tf.complex(lambdas_rand[:,:,0], lambdas_rand[:,:,1])
lambdas_rand = tf.repeat(tf.expand_dims(lambdas_rand, -1), repeats=self.ncoords, axis=-1)
lambdas_rand = scale_factor_rand * lambdas_rand/(lambdas_rand * tf.math.conj(lambdas_rand))**(.5) # rescale \lambdas
lambdas_rand = lambdas_rand ** self.glsm_charges
lambdas_rand = tf.reduce_prod(lambdas_rand, 1)
scaled_points = tf.einsum('xi,ai->xai', cpoints, lambdas_rand)
scaled_points = tf.reshape(scaled_points, (-1, self.ncoords)) # this has now shape (batch_size*num_random_scalings, ncoords)
real_patch_points = tf.concat((tf.math.real(scaled_points), tf.math.imag(scaled_points)), axis=-1)
gj = self.model(real_patch_points, training=True)
gi = tf.repeat(self.model(points), num_random_scalings, axis=0)
all_t_loss = tf.math.abs(gi-gj)
all_t_loss = tf.reshape(all_t_loss, (-1, num_random_scalings))
all_t_loss = tf.math.reduce_sum(all_t_loss**self.n[2], axis=-1)
return all_t_loss / num_random_scalings
@tf.function
def _fubini_study_n_potentials(self, points, n=None, t=tf.complex(1., 0.)):
r"""Computes the Fubini-Study equivalent on the ambient space for each
Kaehler modulus.
.. math:: g_\alpha = \partial_i \bar\partial_j \ln \rho_\alpha
Args:
points (tf.tensor([bSize, ncoords], tf.complex64)): Points.
t (tf.complex, optional): Volume factor. Defaults to 1+0j.
Returns:
tf.tensor([bSize, ncoords, ncoords], tf.complex64):
Metric contribution at each point for t_n.
"""
alpha = 0 if n is None else n
degrees = self.sections[alpha]
ms = tf.math.pow(points[:, tf.newaxis, :], degrees[tf.newaxis, :, :])
ms = tf.math.reduce_prod(ms, axis=int(-1))
mss = ms * tf.math.conj(ms)
kappa_alphas = tf.reduce_sum(mss, int(-1))
return tf.cast(t/np.pi, dtype=tf.float32) * tf.cast(tf.math.log(kappa_alphas), tf.float32)
def get_kahler_potential(self, points):
r"""Returns toric equivalent of the FS Kahler potential for each point.
.. math::
J = t^\alpha J_\alpha \quad \text{ with: }
J_\alpha = \frac{i}{2\pi} \partial \bar\partial \ln \rho_\alpha
:math:`\rho_\alpha` is a basis of sections.
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
pb (tf.tensor([bSize, nfold, ncoords], tf.float32)):
Pullback at each point. Overwrite j_elim. Defaults to None.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64):
Kaehler metric at each point.
"""
cpoints = tf.cast(tf.complex(points[:, :self.ncoords], points[:, self.ncoords:]), dtype=tf.complex64)
k_fs = self._fubini_study_n_potentials(cpoints, t=self.kmoduli[0])
if len(self.kmoduli) != 1:
for i in range(1, len(self.kmoduli)):
k_fs += self._fubini_study_n_potentials(cpoints, i, t=self.kmoduli[i])
k_fs += tf.reshape(self.model(points), [-1])
return k_fs
class MatrixFSModelToric(ToricModel):
r"""MatrixFSModelToric inherits from :py:class:`ToricModel`.
See also: :py:class:`MatrixFSModel` and :py:class:`FreeModel`
"""
def __init__(self, *args, **kwargs):
r"""MatrixFSModelToric is a tensorflow model predicting CY metrics.
The output of this model has the following Ansatz
.. math:: g_{\text{out}} = g_{\text{FS'}} (1 + g_{\text{NN}})
with matrix multiplication and returns a hermitian (nfold, nfold)
tensor.
"""
super(MatrixFSModelToric, self).__init__(*args, **kwargs)
def call(self, input_tensor, training=True, j_elim=None):
r"""Prediction of the model.
.. math::
g_{\text{out}; ik} = g_{\text{FS}; ij} (1_{jk} + g_{\text{NN}; jk})
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
training (bool, optional): Not used. Defaults to True.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex):
Prediction at each point.
"""
nn_cont = self.to_hermitian(self.model(input_tensor, training=training))
fs_cont = self.fubini_study_pb(input_tensor, j_elim=j_elim)
return fs_cont + tf.linalg.matmul(fs_cont, nn_cont)
| 50,954 | 41.783375 | 236 | py |
cymetric | cymetric-main/cymetric/models/callbacks.py | """
A collection of tensorflow callbacks.
"""
import tensorflow as tf
import numpy as np
from cymetric.models.measures import ricci_measure, sigma_measure, \
kaehler_measure_loss, transition_measure_loss, ricci_scalar_fn
tfk = tf.keras
sigma_measure_tf = tf.function(func=sigma_measure)
kaehler_measure_tf = tf.function(func=kaehler_measure_loss)
transition_measure_tf = tf.function(func=transition_measure_loss)
ricci_measure_tf = tf.function(func=ricci_measure)
ricci_scalar_tf = tf.function(func=ricci_scalar_fn)
class AlphaCallback(tfk.callbacks.Callback):
"""Callback that allows to manipulate the alpha factors."""
def __init__(self, scheduler):
"""A callback that manipulates the alpha factors.
Args:
scheduler (function): A function that returns a list of five
tf.Variables and takes (int, dict, self.model.alpha) as args.
"""
super(AlphaCallback, self).__init__()
self.manipulater = scheduler
def on_epoch_end(self, epoch, logs=None):
r"""Manipulates alpha values according to function `scheduler`.
Args:
epoch (int): epoch
logs (dict, optional): history.history. Defaults to None.
"""
self.model.alpha = self.manipulater(epoch, logs, self.model.alpha)
class KaehlerCallback(tfk.callbacks.Callback):
"""Callback that tracks the weighted Kaehler measure."""
def __init__(self, validation_data, nth=1, bSize=1000, initial=False):
r"""A callback which computes the kaehler measure for
the validation data after every epoch end.
See also: :py:func:`cymetric.models.measures.kaehler_measure_loss`.
Args:
validation_data (tuple(X_val, y_val)): Validation data.
nth (int, optional): Run every n-th epoch. Defaults to 1.
bSize (int, optional): Batch size. Defaults to 1000.
initial (bool, optional): If True does one iteration before training.
Defaults to False.
"""
super(KaehlerCallback, self).__init__()
self.X_val, self.y_val = validation_data
self.X_val = tf.cast(self.X_val, tf.float32)
self.y_val = tf.cast(self.y_val, tf.float32)
self.weights = tf.cast(self.y_val[:, -2], tf.float32)
self.omega = tf.cast(self.y_val[:, -1], tf.float32)
self.nth = nth
self.bSize = bSize
self.initial = initial
def on_epoch_end(self, epoch, logs=None):
r"""Computes kaehler measure.
Args:
epoch (int): epoch
logs (dict, optional): history.history. Defaults to None.
"""
# might have to batch this
if epoch % self.nth == 0:
n_p = len(self.X_val)
# kaehler loss measure already takes the mean
kaehler_losses = []
for i in range(int(n_p/self.bSize)):
s = i*self.bSize
if i != int(n_p/self.bSize)-1:
e = (i+1)*self.bSize
kaehler_losses += [kaehler_measure_tf(
self.model, self.X_val[s:e])]
else:
e = n_p
kaehler_losses += [kaehler_measure_tf(
self.model, self.X_val[s:e])]
# rescale last entry to give correct weight for mean
kaehler_losses[-1] *= (e-s)/self.bSize
cb_res = np.mean(kaehler_losses).tolist()
logs['kaehler_val'] = cb_res
if cb_res <= 1e-3:
print(' - Kaehler measure val: {:.4e}'.format(cb_res))
else:
print(' - Kaehler measure val: {:.4f}'.format(cb_res))
def on_train_begin(self, logs=None):
r"""Compute Kaehler measure before training as baseline.
Args:
logs (dict, optional): History. Defaults to None.
"""
if self.initial:
self.on_epoch_end(-1, logs=logs)
class RicciCallback(tfk.callbacks.Callback):
"""Callback that tracks the Ricci measure."""
def __init__(self, validation_data, pullbacks, verbose=0,
bSize=1000, nth=1, hlevel=0, initial=False):
r"""A callback which computes the ricci measure for
the validation data after every epoch end.
See also: :py:func:`cymetric.models.measures.ricci_measure`,
:py:func:`cymetric.models.measures.ricci_scalar_fn`.
.. math::
||R|| \equiv \frac{\text{Vol}_K^{\frac{1}{\text{nfold}}}}{\text{Vol}_{\text{CY}}}
\int_X d\text{Vol}_K |R|
Args:
validation_data (tuple(X_val, y_val)): validation data
pullbacks (tensor[(n_p, nfold, n_coord)]): pullback tensors
verbose (int, optional): verbosity if >0 prints some info.
Defaults to 0.
bSize (int, optional): Batch size. Defaults to 1000.
nth (int, optional): Run every n-th epoch. Defaults to 1.
hlevel (int, optional): if > 0 adds increasingly more statistics.
Defaults to 0.
initial (bool, optional): If True does one iteration before training.
Defaults to False.
"""
super(RicciCallback, self).__init__()
self.X_val, self.y_val = validation_data
self.X_val = tf.cast(self.X_val, tf.float32)
self.y_val = tf.cast(self.y_val, tf.float32)
self.weights = tf.cast(self.y_val[:, -2], tf.float32)
self.vol_cy = tf.math.reduce_mean(self.weights, axis=-1)
self.omega = tf.cast(self.y_val[:, -1], tf.float32)
self.pullbacks = tf.cast(pullbacks, tf.complex64)
self.verbose = verbose
self.hlevel = hlevel
self.nth = nth
self.bSize = bSize
self.initial = initial
def on_epoch_end(self, epoch, logs=None):
r"""Computes ricci measure.
Args:
epoch (int): epoch
logs (dict, optional): history.history. Defaults to None.
"""
if epoch % self.nth == 0:
n_p = len(self.X_val)
nfold = tf.cast(self.model.nfold, dtype=tf.float32)
ricci_scalars = np.zeros(n_p)
dets = np.zeros(n_p)
for i in range(int(n_p/self.bSize)):
s = i*self.bSize
e = (i+1)*self.bSize if i != int(n_p/self.bSize)-1 else n_p
ricci_scalars[s:e], dets[s:e] = ricci_scalar_tf(self.model,
self.X_val[s:e],
pullbacks=self.pullbacks[s:e],
verbose=self.verbose,
rdet=True)
ricci_scalars = tf.math.abs(ricci_scalars)
det_over_omega = dets / self.omega
ricci_scalars = tf.cast(ricci_scalars, dtype=tf.float32)
det_over_omega = tf.cast(det_over_omega, dtype=tf.float32)
vol_k = tf.math.reduce_mean(det_over_omega * self.weights, axis=-1)
ricci = (vol_k**(1/nfold) / self.vol_cy) * tf.math.reduce_mean(
det_over_omega * ricci_scalars * self.weights, axis=-1)
cb_res = ricci.numpy().tolist()
logs['ricci_val'] = cb_res
if self.hlevel > 0:
logs['ricci_val_mean'] = float(np.mean(ricci_scalars))
if self.hlevel > 1:
logs['ricci_val_median'] = float(np.median(ricci_scalars))
logs['ricci_val_var'] = float(np.var(ricci_scalars))
logs['ricci_val_std'] = float(np.std(ricci_scalars))
if self.hlevel > 2:
logs['ricci_val_dets'] = float(np.sum(dets < 0)/len(dets))
if cb_res <= 1e-3:
print(' - Ricci measure val: {:.4e}'.format(cb_res))
else:
print(' - Ricci measure val: {:.4f}'.format(cb_res))
def on_train_begin(self, logs=None):
r"""Compute Ricci measure before training as baseline.
Args:
logs (dict, optional): History. Defaults to None.
"""
if self.initial:
self.on_epoch_end(-1, logs=logs)
class SigmaCallback(tfk.callbacks.Callback):
"""Callback that tracks the sigma measure."""
def __init__(self, validation_data, initial=False):
r"""A callback which computes the sigma measure for
the validation data after every epoch end.
See also: :py:func:`cymetric.models.measures.sigma_measure`.
.. math::
\sigma_k \equiv \frac{1}{\text{Vol}_{\text{CY}}}
\int_X d\text{Vol}_{\text{CY}} |1 -
\frac{\det(g)/\text{Vol}_K}{\Omega \wedge \bar\Omega / \text{CY}}|
Args:
validation_data (tuple(X_val, y_val)): validation data
initial (bool, optional): If True does one iteration before training.
Defaults to False.
"""
super(SigmaCallback, self).__init__()
self.X_val, self.y_val = validation_data
self.X_val = tf.cast(self.X_val, tf.float32)
self.y_val = tf.cast(self.y_val, tf.float32)
self.initial = initial
def on_epoch_end(self, epoch, logs=None):
r"""Computes sigma measure.
Args:
epoch (int): epoch
logs (dict, optional): history.history. Defaults to None.
"""
sigma = sigma_measure_tf(self.model, self.X_val, self.y_val)
cb_res = sigma.numpy().tolist()
logs['sigma_val'] = cb_res
if cb_res <= 1e-3:
print(' - Sigma measure val: {:.4e}'.format(cb_res))
else:
print(' - Sigma measure val: {:.4f}'.format(cb_res))
def on_train_begin(self, logs=None):
r"""Compute sigma measure before training as baseline.
Args:
logs (dict, optional): History. Defaults to None.
"""
if self.initial:
self.on_epoch_end(-1, logs=logs)
class TransitionCallback(tfk.callbacks.Callback):
"""Callback that tracks the transition loss weighted over the CY."""
def __init__(self, validation_data, initial=False):
r"""A callback which computes the transition measure for
the validation data after every epoch end.
Args:
validation_data (tuple(X_val, y_val)): validation data
initial (bool, optional): If True does one iteration before training.
Defaults to False.
"""
super(TransitionCallback, self).__init__()
self.X_val, self.y_val = validation_data
self.X_val = tf.cast(self.X_val, tf.float32)
self.y_val = tf.cast(self.y_val, tf.float32)
self.initial = initial
def on_epoch_end(self, epoch, logs=None):
r"""Computes transition measure.
Args:
epoch (int): epoch
logs (dict, optional): history.history. Defaults to None.
"""
transition = transition_measure_tf(self.model, self.X_val)
cb_res = transition.numpy().tolist()
logs['transition_val'] = cb_res
if cb_res <= 1e-3:
print(' - Transition measure val: {:.4e}'.format(cb_res))
else:
print(' - Transition measure val: {:.4f}'.format(cb_res))
def on_train_begin(self, logs=None):
r"""Compute transition measure before training as baseline.
Args:
logs (dict, optional): History. Defaults to None.
"""
if self.initial:
self.on_epoch_end(-1, logs=logs)
class VolkCallback(tfk.callbacks.Callback):
r"""Callback that computes the volume from the metric.
"""
def __init__(self, validation_data, nfold=3, initial=False):
r"""A callback which computes Volk of the validation data
after every epoch end.
.. math::
\text{Vol}_K = \int_X \omega^3
Args:
validation_data (tuple(X_val, y_val)): validation data
nfold (int, optional): degree of CY. Defaults to 3.
initial (bool, optional): If True does one iteration before training.
Defaults to False.
"""
super(VolkCallback, self).__init__()
self.X_val, self.y_val = validation_data
self.X_val = tf.cast(self.X_val, tf.float32)
self.y_val = tf.cast(self.y_val, tf.float32)
self.weights = tf.cast(self.y_val[:, -2], dtype=tf.float32)
self.omega = tf.cast(self.y_val[:, -1], dtype=tf.float32)
self.nfold = tf.cast(nfold, dtype=tf.float32)
# NOTE: Check that convention is consistent with rest of code.
self.factor = float(1.)
self.initial = initial
def on_epoch_end(self, epoch, logs=None):
r"""Tracks Volk during the training process.
Args:
epoch (int): epoch
logs (dict, optional): history.history. Defaults to None.
"""
prediction = self.model(self.X_val)
volk = self.compute_volk(prediction, self.weights, self.omega, self.factor)
cb_res = volk.numpy().tolist()
logs['volk_val'] = cb_res
if cb_res <= 1e-3:
print(' - Volk val: {:.4e}'.format(cb_res))
else:
print(' - Volk val: {:.4f}'.format(cb_res))
def on_train_begin(self, logs=None):
r"""Compute Volk loss before training as baseline.
Args:
logs (dict, optional): History. Defaults to None.
"""
if self.initial:
self.on_epoch_end(-1, logs=logs)
@tf.function
def compute_volk(self, pred, weights, omega, factor):
r"""Vol k integrated over all points.
.. math::
\text{Vol}_K = \int_X \omega^3
= \frac{1}{N} \sum_p \frac{\det(g)}{\Omega \wedge \bar\Omega} w
Note:
This is different than the Volk-loss.
Args:
pred (tf.tensor([n_p, nfold, nfold], tf.complex)):
Metric prediction.
weights (tf.tensor([n_p], tf.float)): Integration weights.
omega (tf.tensor([n_p], tf.float)):
:math:`\Omega \wedge \bar\Omega`.
factor (tf.float): Additional prefactors due to conventions.
Returns:
tf.tensor([n_p], tf.float): Vol k.
"""
det = tf.math.real(tf.linalg.det(pred)) * factor
volk_pred = tf.math.reduce_mean(det * weights / omega, axis=-1)
return volk_pred
| 14,605 | 38.158177 | 94 | py |
cymetric | cymetric-main/cymetric/models/fubinistudy.py | """
Pullbacked fubini study metric implemented as a tfk.model.
"""
import tensorflow as tf
import itertools as it
from cymetric.pointgen.nphelper import generate_monomials, get_levicivita_tensor
import numpy as np
tfk = tf.keras
class FSModel(tfk.Model):
r"""FSModel implements all underlying tensorflow routines for pullbacks
and computing various loss contributions.
It is *not* intended for actual training and does not have an explicit
training step included. It should be used to write your own custom models
for training of CICYs. Toric hypersurfaces require some extra routines,
which are implemented here: `cymetric.models.tfmodels.ToricModel`
"""
def __init__(self, BASIS, norm=None):
r"""A tensorflow implementation of the pulled back Fubini-Study metric.
Args:
BASIS (dict): a dictionary containing all monomials and other
relevant information from e.g.
`cymetric.pointgen.pointgen.PointGenerator`
norm ([5//NLOSS], optional): degree of norm for various losses.
Defaults to 1 for all but Kaehler norm (2).
"""
super(FSModel, self).__init__()
self.BASIS = BASIS
self.ncoords = len(self.BASIS['DQDZB0'])
self.nProjective = len(self.BASIS['AMBIENT'])
self.nfold = int(tf.math.real(self.BASIS['NFOLD']))
if norm is None:
self.n = [tf.cast(1., dtype=tf.float32) for _ in range(5)]
# Default: we want to punish violation of kählerity stronger
self.n[1] = tf.cast(2., dtype=tf.float32)
else:
self.n = [tf.cast(n, dtype=tf.float32) for n in norm]
# projective vars
self.degrees = tf.cast(tf.ones_like(self.BASIS['AMBIENT']) + self.BASIS['AMBIENT'], dtype=tf.int32)
self.pi = tf.constant(tf.cast(np.pi, dtype=tf.complex64))
self.nhyper = int(tf.cast(BASIS['NHYPER'], dtype=tf.int64))
self._generate_helpers()
def _generate_helpers(self):
r"""Bunch of helper functions to run during initialization"""
self.lc = tf.convert_to_tensor(get_levicivita_tensor(self.nfold), dtype=tf.complex64)
self.proj_matrix = self._generate_proj_matrix()
self.nTransitions = self._patch_transitions()
if self.nhyper == 1:
self.fixed_patches = self._generate_all_patches()
self._proj_indices = self._generate_proj_indices()
self.slopes = self._target_slopes()
def _generate_proj_matrix(self):
r"""TensorFlow does not allow for nice slicing. Here we create
`proj_matrix` which stores information about the ambient spaces, so that
we can slice via matrix products. See usage in: `self.fubini_study_pb`.
"""
proj_matrix = {}
for i in range(self.nProjective):
matrix = np.zeros((self.degrees[i], self.ncoords),
dtype=np.complex64)
s = np.sum(self.degrees[:i])
e = np.sum(self.degrees[:i+1])
matrix[:, s:e] = np.eye(self.degrees[i], dtype=np.complex64)
proj_matrix[str(i)] = tf.cast(matrix, dtype=tf.complex64)
return proj_matrix
def _generate_proj_indices(self):
r"""Makes a tensor with corresponding projective index for each variable
from the ambient space.
"""
flat_list = []
for i, p in enumerate(self.degrees):
for _ in range(p):
flat_list += [i]
return tf.cast(flat_list, dtype=tf.int64)
def _generate_all_patches(self):
r"""We generate all possible patches for CICYs. Note for CICYs with
more than one hypersurface patches are generated on spot.
"""
fixed_patches = []
for i in range(self.ncoords):
all_patches = np.array(
list(it.product(*[[j for j in range(sum(self.degrees[:k]), sum(self.degrees[:k+1])) if j != i] for k in range(len(self.degrees))], repeat=1)))
if len(all_patches) == self.nTransitions:
fixed_patches += [all_patches]
else:
# need to pad if there are less than nTransitions.
all_patches = np.tile(all_patches, (int(self.nTransitions/len(all_patches)) + 1, 1))
fixed_patches += [all_patches[0:self.nTransitions]]
fixed_patches = np.array(fixed_patches)
return tf.cast(fixed_patches, dtype=tf.int64)
def _patch_transitions(self):
r"""Computes the maximum number of patch transitions with same fixed
variables. This is often not the same number for all patches. In case
there are less transitions we padd with same to same patches."""
nTransitions = 0
for t in generate_monomials(self.nProjective, self.nhyper):
tmp_deg = [d-t[j] for j, d in enumerate(self.degrees)]
n = tf.math.reduce_prod(tmp_deg)
if n > nTransitions:
nTransitions = n
# if tf.int None vs unknown shape issue in for loop
# over counting by one which is same to same transition
return int(nTransitions)
def _target_slopes(self):
ks = tf.eye(len(self.BASIS['KMODULI']), dtype=tf.complex64)
if self.nfold == 1:
slope = tf.einsum('a, xa->x', self.BASIS['INTNUMS'], ks)
elif self.nfold == 2:
slope = tf.einsum('ab, a, xb->x', self.BASIS['INTNUMS'], self.BASIS['KMODULI'], ks)
elif self.nfold == 3:
slope = tf.einsum('abc, a, b, xc->x', self.BASIS['INTNUMS'], self.BASIS['KMODULI'], self.BASIS['KMODULI'], ks)
elif self.nfold == 4:
slope = tf.einsum('abcd, a, b, c, xd->x', self.BASIS['INTNUMS'], self.BASIS['KMODULI'], self.BASIS['KMODULI'], self.BASIS['KMODULI'], ks)
elif self.nfold == 5:
slope = tf.einsum('abcd, a, b, c, d, xe->x', self.BASIS['INTNUMS'], self.BASIS['KMODULI'], self.BASIS['KMODULI'], self.BASIS['KMODULI'], self.BASIS['KMODULI'], ks)
else:
self.logger.error('Only implemented for nfold <= 5. Run the tensor contraction yourself :).')
slope = tf.zeros(len(input_tensor), dtype=tf.complex64)
return slope
@tf.function
def _calculate_slope(self, args):
r"""Computes the slopes \mu(F_i) = \int J \wedge J \wegde F_i at the point in Kahler moduli space t_a = 1 for all a
and for F_i = O_X(0, 0,... , 1, 0, ..., 0), i.e. the flux integers are k_i^a = \delta_{i,a}"""
pred, f_a = args[0], args[1]
if self.nfold == 1:
slope = tf.einsum('xab->x',
f_a)
elif self.nfold == 2:
slope = tf.einsum('xab,xcd,ac,bd->x',
pred, f_a, self.lc, self.lc)
elif self.nfold == 3:
slope = tf.einsum('xab,xcd,xef,ace,bdf->x',
pred, pred, f_a, self.lc, self.lc)
elif self.nfold == 4:
slope = tf.einsum('xab,xcd,xef,xgh,aceg,bdfh->x',
pred, pred, pred, f_a, self.lc, self.lc)
elif self.nfold == 5:
slope = tf.einsum('xab,xcd,xef,xgh,xij,acegi,bdfhj->x',
pred, pred, pred, pred, f_a, self.lc, self.lc)
else:
self.logger.error('Only implemented for nfold <= 5. Run the tensor contraction yourself :).')
slope = tf.zeros(len(input_tensor), dtype=tf.complex64)
slope = tf.cast(1./tf.exp(tf.math.lgamma(tf.cast(self.BASIS['NFOLD'], dtype=tf.float32) + 1)), dtype=tf.complex64) * slope
return slope
def call(self, input_tensor, training=True, j_elim=None):
r"""Call method. Computes the pullbacked
Fubini-Study metric at each point in input_tensor.
Args:
input_tensor (tf.tensor([bSize, 2*ncoords], tf.float)): Points.
training (bool, optional): Switch between training and eval mode. Not used at the moment
j_elim (tf.array([bSize], tf.int64)): index to be eliminated.
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex):
Pullbacked FS-metric at each point.
"""
return self.fubini_study_pb(input_tensor, j_elim=j_elim)
@tf.function
def compute_kaehler_loss(self, x):
r"""Computes Kähler loss.
.. math::
\cal{L}_{\text{dJ}} = \sum_{ijk} ||Re(c_{ijk})||_n +
||Im(c_{ijk})||_n \\
\text{with: } c_{ijk} = g_{i\bar{j},k} - g_{k\bar{j},i}
Args:
x (tf.tensor([bSize, 2*ncoords], tf.float)): Points.
Returns:
tf.tensor([bSize, 1], tf.float): \sum_ijk abs(cijk)**n
"""
with tf.GradientTape(persistent=True) as t1:
t1.watch(x)
# set training to False for batch_jacobian to work
y_pred = self(x, training=False)
pb = self.pullbacks(x)
gij_re, gij_im = tf.math.real(y_pred), tf.math.imag(y_pred)
gijk_re = tf.cast(t1.batch_jacobian(gij_re, x), dtype=tf.complex64)
gijk_im = tf.cast(t1.batch_jacobian(gij_im, x), dtype=tf.complex64)
cijk = 0.5*(gijk_re[:, :, :, :self.ncoords] +
gijk_im[:, :, :, self.ncoords:] +
1.j*gijk_im[:, :, :, :self.ncoords] -
1.j*gijk_re[:, :, :, self.ncoords:])
cijk_pb = tf.einsum('xija,xka->xijk', cijk, pb)
cijk_pb = cijk_pb - tf.transpose(cijk_pb, [0, 3, 2, 1])
cijk_loss = tf.math.reduce_sum(tf.abs(cijk_pb)**self.n[1], [1, 2, 3])
return cijk_loss
@tf.function
def fubini_study_pb(self, points, pb=None, j_elim=None, ts=None):
r"""Computes the pullbacked Fubini-Study metric.
NOTE:
The pb argument overwrites j_elim.
.. math::
g_{ij} = \frac{1}{\pi} J_i^a \bar{J}_j^b \partial_a
\bar{\partial}_b \ln |\vec{z}|^2
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
pb (tf.tensor([bSize, nfold, ncoords], tf.float32)):
Pullback at each point. Overwrite j_elim. Defaults to None.
j_elim (tf.tensor([bSize], tf.int64)): index to be eliminated.
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
ts (tf.tensor([len(kmoduli)], tf.complex64)):
Kahler parameters. Defaults to the ones specified at time of point generation
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64):
FS-metric at each point.
"""
if ts is None:
ts = self.BASIS['KMODULI']
# TODO: Naming conventions here and in pointgen are different.
if self.nProjective > 1:
# we go through each ambient space factor and create fs.
cpoints = tf.complex(
points[:, :self.degrees[0]],
points[:, self.ncoords:self.ncoords+self.degrees[0]])
fs = self._fubini_study_n_metrics(cpoints, n=self.degrees[0], t=ts[0])
fs = tf.einsum('xij,ia,bj->xab', fs, self.proj_matrix['0'], tf.transpose(self.proj_matrix['0']))
for i in range(1, self.nProjective):
s = tf.reduce_sum(self.degrees[:i])
e = s + self.degrees[i]
cpoints = tf.complex(points[:, s:e],
points[:, self.ncoords+s:self.ncoords+e])
fs_tmp = self._fubini_study_n_metrics(
cpoints, n=self.degrees[i], t=ts[i])
fs_tmp = tf.einsum('xij,ia,bj->xab',
fs_tmp, self.proj_matrix[str(i)],
tf.transpose(self.proj_matrix[str(i)]))
fs += fs_tmp
else:
cpoints = tf.complex(
points[:, :self.ncoords],
points[:, self.ncoords:2*self.ncoords])
fs = self._fubini_study_n_metrics(cpoints,
t=ts[0])
if pb is None:
pb = self.pullbacks(points, j_elim=j_elim)
fs_pb = tf.einsum('xai,xij,xbj->xab', pb, fs, tf.math.conj(pb))
return fs_pb
@tf.function
def _find_max_dQ_coords(self, points):
r"""Finds in each hypersurface the coordinates for which |dQ/dzj|
is largest.
NOTE:
If a coordinate is the largest for more than one hypersurface, it
will only be selected for the first and subsequently the second
largest will be taken, etc..
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
Returns:
tf.tensor([bSize, nhyper], tf.int64): max(dQ/dz) index per hyper.
"""
# creates coordinate mask with patch coordinates
cpoints = tf.complex(points[:, :self.ncoords], points[:, self.ncoords:])
available_mask = tf.cast(self._get_inv_one_mask(points), dtype=tf.complex64)
indices = []
for i in range(self.nhyper):
dQdz = self._compute_dQdz(cpoints, i)
if i == 0:
indices = tf.argmax(tf.math.abs(dQdz*available_mask), axis=-1)
indices = tf.reshape(indices, (-1, 1))
else:
max_dq = tf.argmax(tf.math.abs(dQdz*available_mask), axis=-1)
indices = tf.concat([indices, tf.reshape(max_dq, (-1, 1))], axis=-1)
available_mask -= tf.one_hot(
indices[:, i], self.ncoords, dtype=tf.complex64)
return indices
@tf.function
def pullbacks(self, points, j_elim=None):
r"""Computes the pullback tensor at each point.
NOTE:
Scatter-nd uses a while loop when creating the graph.
.. math::
J^i_a = \frac{dz_i}{dx_a}
where x_a are the nfold good coordinates after eliminating j_elim.
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
j_elim (tf.tensor([bSize, nHyper], tf.int64), optional):
Coordinates(s) to be eliminated in the pullbacks.
If None will take max(dQ/dz). Defaults to None.
Returns:
tf.tensor([bSize, nfold, ncoords], tf.complex64): Pullback at each
point.
"""
inv_one_mask = self._get_inv_one_mask(points)
cpoints = tf.complex(points[:, :self.ncoords],
points[:, self.ncoords:])
if j_elim is None:
dQdz_indices = self._find_max_dQ_coords(points)
else:
dQdz_indices = j_elim
full_mask = tf.cast(inv_one_mask, dtype=tf.float32)
for i in range(self.nhyper):
dQdz_mask = -1.*tf.one_hot(dQdz_indices[:, i], self.ncoords)
full_mask = tf.math.add(full_mask, dQdz_mask)
n_p = tf.cast(tf.reduce_sum(tf.ones_like(full_mask[:, 0])), dtype=tf.int64)
full_mask = tf.cast(full_mask, dtype=tf.bool)
x_z_indices = tf.where(full_mask)
good_indices = x_z_indices[:, 1:2]
pullbacks = tf.zeros((n_p, self.nfold, self.ncoords),
dtype=tf.complex64)
y_indices = tf.repeat(
tf.expand_dims(tf.cast(tf.range(self.nfold), dtype=tf.int64), 0),
n_p, axis=0)
y_indices = tf.reshape(y_indices, (-1, 1))
diag_indices = tf.concat((x_z_indices[:, 0:1], y_indices, good_indices),
axis=-1)
pullbacks = tf.tensor_scatter_nd_update(
pullbacks, diag_indices,
tf.ones(self.nfold*n_p, dtype=tf.complex64)
)
fixed_indices = tf.reshape(dQdz_indices, (-1, 1))
for i in range(self.nhyper):
# compute p_i\alpha eq (5.24)
pia_polys = tf.gather_nd(self.BASIS['DQDZB'+str(i)], good_indices)
pia_factors = tf.gather_nd(self.BASIS['DQDZF'+str(i)], good_indices)
pia = tf.expand_dims(tf.repeat(cpoints, self.nfold, axis=0), 1)
pia = tf.math.pow(pia, pia_polys)
pia = tf.reduce_prod(pia, axis=-1)
pia = tf.reduce_sum(tf.multiply(pia_factors, pia), axis=-1)
pia = tf.reshape(pia, (-1, 1, self.nfold))
if i == 0:
dz_hyper = pia
else:
dz_hyper = tf.concat((dz_hyper, pia), axis=1)
# compute p_ifixed
pif_polys = tf.gather_nd(self.BASIS['DQDZB'+str(i)], fixed_indices)
pif_factors = tf.gather_nd(self.BASIS['DQDZF'+str(i)],
fixed_indices)
pif = tf.expand_dims(tf.repeat(cpoints, self.nhyper, axis=0), 1)
pif = tf.math.pow(pif, pif_polys)
pif = tf.reduce_prod(pif, axis=-1)
pif = tf.reduce_sum(tf.multiply(pif_factors, pif), axis=-1)
pif = tf.reshape(pif, (-1, 1, self.nhyper))
if i == 0:
B = pif
else:
B = tf.concat((B, pif), axis=1)
all_dzdz = tf.einsum('xij,xjk->xki', tf.linalg.inv(B), tf.complex(-1., 0.) * dz_hyper)
# fill at the right position
for i in range(self.nhyper):
fixed_indices = tf.reshape(
tf.repeat(dQdz_indices[:, i], self.nfold), (-1, 1))
zjzi_indices = tf.concat(
(x_z_indices[:, 0:1], y_indices, fixed_indices), axis=-1)
zjzi_values = tf.reshape(all_dzdz[:,:,i], [self.nfold*n_p])
pullbacks = tf.tensor_scatter_nd_update(
pullbacks, zjzi_indices, zjzi_values)
return pullbacks
@tf.function
def _get_inv_one_mask(self, points):
r"""Computes mask with True when z_i != 1+0.j."""
cpoints = tf.complex(points[:, :self.ncoords], points[:, self.ncoords:])
return tf.math.logical_not(tf.experimental.numpy.isclose(cpoints, 1.))
# one_mask = tf.math.logical_or(
# tf.math.less(points[:, 0:self.ncoords], self.epsilon_low),
# tf.math.greater(points[:, 0:self.ncoords], self.epsilon_high))
# zero_mask = tf.math.greater(
# tf.math.abs(points[:, self.ncoords:]), 1.-self.epsilon_low)
# inv_mask = tf.math.logical_and(
# tf.math.logical_not(one_mask), tf.math.logical_not(zero_mask))
# return tf.math.logical_not(inv_mask)
@tf.function
def _indices_to_mask(self, indices):
r"""Takes indices ([bSize,nTrue], int) and creates a faux coordinates
mask. NOTE: the output is *not* of boolean type.
"""
mask = tf.one_hot(indices, depth=self.ncoords)
mask = tf.math.reduce_sum(mask, axis=1)
return mask
@tf.function
def _generate_patches(self, args):
r"""Generates possible patch transitions for the patches sepcified in
args. Note it uses tf.split which won't allow for tf.vectorized_map,
because of different signature during graph building.
"""
# TODO: Clean up all the tf.int64; some are needed because tf mixes its default int types for range and indexing
fixed = args[0:self.nhyper]
original = args[self.nhyper:]
inv_fixed_mask = ~tf.cast(tf.reduce_sum(
tf.one_hot(fixed, self.ncoords), axis=0), tf.bool)
fixed_proj = tf.one_hot(
tf.gather(self._proj_indices, fixed),
self.nProjective, dtype=tf.int64)
fixed_proj = tf.reduce_sum(fixed_proj, axis=0)
splits = tf.cast(self.degrees, dtype=tf.int64) - fixed_proj
all_coords = tf.boolean_mask(
tf.cast(tf.range(self.ncoords), dtype=tf.int64),
inv_fixed_mask)
products = tf.split(all_coords, splits)
all_patches = tf.stack(tf.meshgrid(*products, indexing='ij'), axis=-1)
all_patches = tf.reshape(all_patches, (-1, self.nProjective))
npatches = tf.reduce_sum(tf.ones_like(all_patches[:, 0]))
if npatches != self.nTransitions:
same = tf.tile(original, [self.nTransitions-npatches])
same = tf.reshape(same, (-1, self.nProjective))
same = tf.cast(same, dtype=tf.int64)
return tf.concat([all_patches, same], axis=0)
return all_patches
@tf.function
def _generate_patches_vec(self, combined):
# NOTE: vectorized_map makes issues cause `_generate_patches`
# has a different signature depending on its input.
# The problem arises when using split which changes shape/dimension
# for different input.
# Thus after initial tracing the shapes might not fit anymore.
# However given that it transforms to while loop
# anyway, we can also just use map_fn without any performance gains.
# return tf.vectorized_map(self._generate_patches, combined)
return tf.map_fn(self._generate_patches, combined)
@tf.function
def _fubini_study_n_potentials(self, points, t=tf.complex(1., 0.)):
r"""Computes the Fubini-Study Kahler potential on a single projective
ambient space factor specified by n.
Args:
points (tf.tensor([bSize, ncoords], tf.complex64)): Coordinates of
the n-th projective spce.
t (tf.complex, optional): Volume factor. Defaults to 1+0j.
Returns:
tf.tensor([bsize], tf.float32):
FS-metric in the ambient space coordinates.
"""
point_square = tf.math.reduce_sum(tf.math.abs(points)**2, axis=-1)
return tf.cast(t/self.pi, tf.float32) * tf.cast(tf.math.log(point_square), tf.float32)
@tf.function
def _fubini_study_n_metrics(self, points, n=None, t=tf.complex(1., 0.)):
r"""Computes the Fubini-Study metric on a single projective
ambient space factor specified by n.
Args:
points (tf.tensor([bSize, ncoords], tf.complex64)): Coordinates of
the n-th projective spce.
n (int, optional): Degree of P**n. Defaults to None(=self.ncoords).
t (tf.complex, optional): Volume factor. Defaults to 1+0j.
Returns:
tf.tensor([bsize, ncoords, ncoords], tf.complex64):
FS-metric in the ambient space coordinates.
"""
if n is None:
n = self.ncoords
point_square = tf.math.reduce_sum(tf.math.abs(points)**2, axis=-1)
point_square = tf.cast(point_square, dtype=tf.complex64)
point_diag = tf.einsum('x,ij->xij', point_square,
tf.cast(tf.eye(n), dtype=tf.complex64))
outer = tf.einsum('xi,xj->xij', tf.math.conj(points), points)
outer = tf.cast(outer, dtype=tf.complex64)
gFS = tf.einsum('xij,x->xij', (point_diag - outer), point_square**-2)
return gFS*t/self.pi
@tf.function
def _find_good_coord_mask(self, points):
r"""Creates coordinate mask with x_a = True.
NOTE:
Legacy code. Currently not used anywhere. Remove?
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
Returns:
tf.tensor([bSize, nfold, ncoords], tf.bool): Good coord mask.
"""
# creates coordinate mask with patch coordinates
inv_one_mask = self._get_inv_one_mask(points)
cpoints = tf.complex(points[:, :self.ncoords],
points[:, self.ncoords:])
dQdz = self._compute_dQdz(cpoints)
dQdz = dQdz*tf.cast(inv_one_mask, dtype=tf.complex64)
indices = tf.argmax(tf.math.abs(dQdz), axis=-1)
dQdz_mask = -1.*tf.one_hot(indices, self.ncoords)
full_mask = tf.math.add(
tf.cast(inv_one_mask, dtype=tf.float32), dQdz_mask)
return tf.cast(full_mask, dtype=tf.bool)
@tf.function
def _compute_dQdz(self, points, k):
r"""Computes dQdz at each point.
Args:
points (tf.tensor([bSize, ncoords], tf.complex)):
vector of coordinates
k (int): k-th hypersurface
Returns:
tf.tensor([bSize, ncoords], tf.complex): dQdz at each point.
"""
p_exp = tf.expand_dims(tf.expand_dims(points, 1), 1)
dQdz = tf.math.pow(p_exp, self.BASIS['DQDZB'+str(k)])
dQdz = tf.math.reduce_prod(dQdz, axis=-1)
dQdz = tf.math.multiply(self.BASIS['DQDZF'+str(k)], dQdz)
dQdz = tf.reduce_sum(dQdz, axis=-1)
return dQdz
@tf.function
def _get_patch_coordinates(self, points, patch_mask):
r"""Transforms the coordinates, such that they are in the patch
given in patch_mask.
"""
norm = tf.boolean_mask(points, patch_mask)
norm = tf.reshape(norm, (-1, self.nProjective))
# TODO: think about how to avoid loop and concat.
full_norm = 1.
for i in range(self.nProjective):
degrees = tf.ones(self.degrees[i], dtype=tf.complex64)
tmp_norm = tf.einsum('i,x->xi', degrees, norm[:, i])
if i == 0:
full_norm = tmp_norm
else:
full_norm = tf.concat((full_norm, tmp_norm), axis=-1)
return points / full_norm
@tf.function
def compute_transition_loss(self, points):
r"""Computes transition loss at each point.
.. math::
\mathcal{L} = \frac{1}{d} \sum_{k,j}
||g^k - T_{jk} \cdot g^j T^\dagger_{jk}||_n
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
Returns:
tf.tensor([bSize], tf.float32): Transition loss at each point.
"""
inv_one_mask = self._get_inv_one_mask(points)
patch_indices = tf.where(~inv_one_mask)[:, 1]
patch_indices = tf.reshape(patch_indices, (-1, self.nProjective))
current_patch_mask = self._indices_to_mask(patch_indices)
cpoints = tf.complex(points[:, :self.ncoords],
points[:, self.ncoords:])
fixed = self._find_max_dQ_coords(points)
if self.nhyper == 1:
other_patches = tf.gather(self.fixed_patches, fixed)
else:
combined = tf.concat((fixed, patch_indices), axis=-1)
other_patches = self._generate_patches_vec(combined)
other_patches = tf.reshape(other_patches, (-1, self.nProjective))
other_patch_mask = self._indices_to_mask(other_patches)
# NOTE: This will include same to same patch transitions
exp_points = tf.repeat(cpoints, self.nTransitions, axis=-2)
patch_points = self._get_patch_coordinates(
exp_points,
tf.cast(other_patch_mask, dtype=tf.bool))
fixed = tf.reshape(
tf.tile(fixed, [1, self.nTransitions]), (-1, self.nhyper))
real_patch_points = tf.concat(
(tf.math.real(patch_points), tf.math.imag(patch_points)),
axis=-1)
gj = self(real_patch_points, training=True, j_elim=fixed)
# NOTE: We will compute this twice.
# TODO: disentangle this to save one computation?
gi = tf.repeat(self(points), self.nTransitions, axis=0)
current_patch_mask = tf.repeat(
current_patch_mask, self.nTransitions, axis=0)
Tij = self.get_transition_matrix(
patch_points, other_patch_mask, current_patch_mask, fixed)
all_t_loss = tf.math.abs(self.transition_loss_matrices(gj, gi, Tij))
all_t_loss = tf.math.reduce_sum(all_t_loss**self.n[2], axis=[1, 2])
# This should now be nTransitions
all_t_loss = tf.reshape(all_t_loss, (-1, self.nTransitions))
all_t_loss = tf.math.reduce_sum(all_t_loss, axis=-1)
return all_t_loss/(self.nTransitions*self.nfold**2)
@tf.function
def get_transition_matrix(self, points, i_mask, j_mask, fixed):
r"""Computes transition matrix between patch i and j
for each point in points where fixed is the coordinate,
which is being eliminated.
Example (by hand):
Consider the bicubic with:
.. math::
P_1^2 [a_0 : a_1 : a_2] \text{ and } P_2^2 [b_0 : b_1 : b_2].
Assume we eliminate :math:`b_2` and keep it fixed. Then we
consider two patches.
Patch 1 where :math:`a_0 = b_0 = 1` with new coordinates
:math:`(x_1, x_2, x_3) = (a_1/a_0, a_2/a_0, b_1/b_0)`
Patch 2 where :math:`a_1=b_1=1` with new coordinates
:math:`(w_1, w_2, w_3) = (a_0/a_1, a_2/a_1, b_0/b_1)`
such that we can reexpress w in terms of x:
:math:`w_1(x)=1/x_1,\; w_2(x)=x_2/x_1,\; w_3(x)=1/x_3`
from which follows:
.. math::
T_{11} &= \frac{\partial w_1}{\partial x_1} =
-1/x_1^2 = -a_0^2/a_1^2 \\
T_{12} &= \frac{\partial w_2}{\partial x_1} =
-x_2/x_1^2 = -a_2 a_0/a_1^2 \\
T_{13} &= \frac{\partial w_3}{\partial x_1} = 0 \\
T_{21} &= \frac{\partial w_1}{\partial x_2} = 0 \\
& \dots
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float32)): Points.
i_mask (tf.tensor([bSize, ncoords], tf.bool)): Mask of pi-indices.
j_mask (tf.tensor([bSize, ncoords], tf.bool)): Mask of pi-indices.
fixed (tf.tensor([bSize, 1], tf.int64)): Elimination indices.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64): T_ij on the CY.
"""
same_patch = tf.where(tf.math.reduce_all(i_mask == j_mask, axis=-1))
diff_patch = tf.where(~tf.math.reduce_all(i_mask == j_mask, axis=-1))
same_patch = same_patch[:, 0]
diff_patch = diff_patch[:, 0]
n_p = tf.math.reduce_sum(tf.ones_like(fixed[:, 0]))
n_p_red = tf.math.reduce_sum(tf.ones_like(diff_patch))
# reduce non trivial
i_mask_red = tf.gather(i_mask, diff_patch)
j_mask_red = tf.gather(j_mask, diff_patch)
fixed_red = tf.gather(fixed, diff_patch)
points_red = tf.gather(points, diff_patch)
p2 = tf.reshape(tf.where(j_mask_red)[:, 1], (-1, self.nProjective))
# g1
g1_mask = tf.reduce_sum(tf.one_hot(fixed_red, self.ncoords), axis=-2)
g1_mask = g1_mask + i_mask_red
g1_mask = ~tf.cast(g1_mask, dtype=tf.bool)
g1_i = tf.where(g1_mask)
g1_i = tf.reshape(g1_i[:, 1], (-1, self.nfold))
# g2
g2_mask = tf.reduce_sum(tf.one_hot(fixed_red, self.ncoords), axis=-2)
g2_mask = g2_mask + j_mask_red
g2_mask = ~tf.cast(g2_mask, dtype=tf.bool)
g2_i = tf.where(g2_mask)
g2_i = tf.reshape(g2_i[:, 1], (-1, self.nfold))
# find proj indices
proj_indices = tf.reshape(
tf.tile(self._proj_indices, [n_p_red]),
(-1, self.ncoords))
g1_proj = tf.boolean_mask(proj_indices, g1_mask)
g1_proj = tf.reshape(g1_proj, (-1, self.nfold))
ratios = tf.reshape(
tf.boolean_mask(points_red, i_mask_red) / tf.boolean_mask(points_red, j_mask_red),
(-1, self.nProjective))
tij_red = tf.zeros((n_p_red, self.nfold, self.nfold),
dtype=tf.complex64)
# fill the mixed ratio elements
for j in range(self.nProjective):
t_pos = tf.einsum('xi,xj->xij',
tf.cast(g1_i == p2[:, j:j+1], dtype=tf.int32),
tf.cast(g1_proj == j, dtype=tf.int32))
t_indices = tf.where(tf.cast(t_pos, dtype=tf.bool))
num_indices = tf.gather_nd(
g2_i, tf.concat((t_indices[:, 0:1], t_indices[:, 2:3]), axis=-1))
num_indices = tf.concat(
(t_indices[:, 0:1], tf.reshape(num_indices, (-1, 1))), axis=-1)
num_tpos = tf.gather_nd(points_red, num_indices)
ratio_indices = num_indices[:, 0] # match the x-axis indices
ratio_tpos = tf.gather(ratios[:, j], ratio_indices)
denom_indices = p2[:, j:j+1]
denom_indices = tf.concat(
(tf.reshape(tf.range(n_p_red), (-1, 1)), denom_indices), axis=-1)
denom_tpos = tf.gather_nd(points_red, denom_indices)
denom_tpos = tf.gather(denom_tpos, ratio_indices)
t_values = -1.*num_tpos*ratio_tpos/denom_tpos
# update tij
tij_red = tf.tensor_scatter_nd_update(
tij_red, t_indices, t_values)
# fill the single ratio elements
c_pos = tf.where(tf.reshape(g1_i, (-1, 1, self.nfold)) == tf.reshape(g2_i, (-1, self.nfold, 1)))
c_indices = tf.gather_nd(g1_proj, c_pos[:, 0:2])
c_indices = tf.concat(
(c_pos[:, 0:1], tf.reshape(c_indices, (-1, 1))), axis=-1)
c_values = tf.gather_nd(ratios, c_indices)
# need to switch cols, either here or before
c_pos = tf.concat((c_pos[:, 0:1], c_pos[:, 2:3], c_pos[:, 1:2]), axis=-1)
tij_red = tf.tensor_scatter_nd_update(tij_red, c_pos, c_values)
# fill tij
tij_eye = tf.eye(self.nfold, batch_shape=[n_p-n_p_red], dtype=tf.complex64)
tij_all = tf.zeros((n_p, self.nfold, self.nfold), dtype=tf.complex64)
tij_all = tf.tensor_scatter_nd_update(
tij_all, tf.reshape(diff_patch, (-1, 1)), tij_red)
tij_all = tf.tensor_scatter_nd_update(
tij_all, tf.reshape(same_patch, (-1, 1)), tij_eye)
return tij_all
@tf.function
def transition_loss_matrices(self, gj, gi, Tij):
r"""Computes transition loss matrix between metric
in patches i and j with transition matrix Tij.
Args:
gj (tf.tensor([bSize, nfold, nfold], tf.complex64)):
Metric in patch j.
gi (tf.tensor([bSize, nfold, nfold], tf.complex64)):
Metric in patch i.
Tij (tf.tensor([bSize, nfold, nfold], tf.complex64)):
Transition matrix from patch i to patch j.
Returns:
tf.tensor([bSize, nfold, nfold], tf.complex64):
.. math::`g_j - T^{ij} g_i T^{ij,\dagger}`
"""
return gj - tf.einsum('xij,xjk,xkl->xil', Tij, gi,
tf.transpose(Tij, perm=[0, 2, 1], conjugate=True))
@tf.function
def compute_ricci_scalar(self, points, pb=None):
r"""Computes the Ricci scalar for each point.
.. math::
R = g^{ij} J_i^a \bar{J}_j^b \partial_a \bar{\partial}_b
\log \det g
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float)): Points.
pb (tf.tensor([bSize, nfold, ncoords], tf.float), optional):
Pullback tensor at each point. Defaults to None.
Returns:
tf.tensor([bSize], tf.float): R|_p.
"""
x_vars = points
# take derivatives
with tf.GradientTape(persistent=True) as tape1:
tape1.watch(x_vars)
with tf.GradientTape(persistent=True) as tape2:
tape2.watch(x_vars)
# training = false for batch_jacobian
prediction = self(x_vars, training=False)
det = tf.math.real(tf.linalg.det(prediction))
# * factorial / (2**nfold)
log = tf.math.log(det)
di_dg = tape2.gradient(log, x_vars)
didj_dg = tf.cast(tape1.batch_jacobian(di_dg, x_vars),
dtype=tf.complex64)
# add derivatives together to complex tensor
ricci_ij = didj_dg[:, 0:self.ncoords, 0:self.ncoords]
ricci_ij += 1j*didj_dg[:, 0:self.ncoords, self.ncoords:]
ricci_ij -= 1j*didj_dg[:, self.ncoords:, 0:self.ncoords]
ricci_ij += didj_dg[:, self.ncoords:, self.ncoords:]
ricci_ij *= 0.25
pred_inv = tf.linalg.inv(prediction)
if pb is None:
pullbacks = self.pullbacks(points)
else:
pullbacks = pb
ricci_scalar = tf.einsum('xba,xai,xij,xbj->x', pred_inv, pullbacks,
ricci_ij, tf.math.conj(pullbacks))
ricci_scalar = tf.math.real(ricci_scalar)
return ricci_scalar
@tf.function
def compute_ricci_loss(self, points, pb=None):
r"""Computes the absolute value of the Ricci scalar for each point. Since negative
Ricci scalars are bad, we take a loss of \|1-e^-ricci\|^p. This will exponentially
punish negative Ricci scalars, and it vanishes for Ricci scalar 0
.. seealso:: method :py:meth:`.compute_ricci_scalar`.
Args:
points (tf.tensor([bSize, 2*ncoords], tf.float)): Points.
pb (tf.tensor([bSize, nfold, ncoords], tf.float), optional):
Pullback tensor at each point. Defaults to None.
Returns:
tf.tensor([bSize], tf.float): \|R\|_n.
"""
ricci_scalar = self.compute_ricci_scalar(points, pb)
return tf.math.abs(1-tf.math.exp(-ricci_scalar))
| 37,282 | 44.027778 | 175 | py |
cymetric | cymetric-main/cymetric/models/tfhelper.py | """
A collection of various helper functions.
"""
import tensorflow as tf
def prepare_tf_basis(basis, dtype=tf.complex64):
r"""Casts each entry in Basis to dtype.
Args:
basis (dict): dictionary containing geometric information
dtype (_type_, optional): type to cast to. Defaults to tf.complex64.
Returns:
dict: with tensors rather than ndarrays
"""
new_basis = {}
for key in basis:
new_basis[key] = tf.cast(basis[key], dtype=dtype)
return new_basis
def train_model(fsmodel, data, optimizer=None, epochs=50, batch_sizes=[64, 10000],
verbose=1, custom_metrics=[], callbacks=[], sw=False):
r"""Training loop for fixing the Kähler class. It consists of two
optimisation steps.
1. With a small batch size and volk loss disabled.
2. With only MA and volk loss enabled and a large batchsize such that
the MC integral is a reasonable approximation and we don't lose
the MA progress from the first step.
Args:
fsmodel (cymetric.models.tfmodels): Any of the custom metric models.
data (dict): numpy dictionary with keys 'X_train' and 'y_train'.
optimizer (tfk.optimiser, optional): Any tf optimizer. Defaults to None.
If None Adam is used with default hyperparameters.
epochs (int, optional): # of training epochs. Every training sample will
be iterated over twice per Epoch. Defaults to 50.
batch_sizes (list, optional): batch sizes. Defaults to [64, 10000].
verbose (int, optional): If > 0 prints epochs. Defaults to 1.
custom_metrics (list, optional): List of tf metrics. Defaults to [].
callbacks (list, optional): List of tf callbacks. Defaults to [].
sw (bool, optional): If True, use integration weights as sample weights.
Defaults to False.
Returns:
model, training_history
"""
training_history = {}
hist1 = {}
# hist1['opt'] = ['opt1' for _ in range(epochs)]
hist2 = {}
# hist2['opt'] = ['opt2' for _ in range(epochs)]
learn_kaehler = fsmodel.learn_kaehler
learn_transition = fsmodel.learn_transition
learn_ricci = fsmodel.learn_ricci
learn_ricci_val = fsmodel.learn_ricci_val
if sw:
sample_weights = data['y_train'][:, -2]
else:
sample_weights = None
if optimizer is None:
optimizer = tf.keras.optimizers.Adam()
for epoch in range(epochs):
batch_size = batch_sizes[0]
fsmodel.learn_kaehler = learn_kaehler
fsmodel.learn_transition = learn_transition
fsmodel.learn_ricci = learn_ricci
fsmodel.learn_ricci_val = learn_ricci_val
fsmodel.learn_volk = tf.cast(False, dtype=tf.bool)
fsmodel.compile(custom_metrics=custom_metrics, optimizer=optimizer)
if verbose > 0:
print("\nEpoch {:2d}/{:d}".format(epoch + 1, epochs))
history = fsmodel.fit(
data['X_train'], data['y_train'],
epochs=1, batch_size=batch_size, verbose=verbose,
callbacks=None, sample_weight=sample_weights
)
for k in history.history.keys():
if k not in hist1.keys():
hist1[k] = history.history[k]
else:
hist1[k] += history.history[k]
batch_size = min(batch_sizes[1], len(data['X_train']))
fsmodel.learn_kaehler = tf.cast(False, dtype=tf.bool)
fsmodel.learn_transition = tf.cast(False, dtype=tf.bool)
fsmodel.learn_ricci = tf.cast(False, dtype=tf.bool)
fsmodel.learn_ricci_val = tf.cast(False, dtype=tf.bool)
fsmodel.learn_volk = tf.cast(True, dtype=tf.bool)
fsmodel.compile(custom_metrics=custom_metrics, optimizer=optimizer)
history = fsmodel.fit(
data['X_train'], data['y_train'],
epochs=1, batch_size=batch_size, verbose=verbose,
callbacks=callbacks, sample_weight=sample_weights
)
for k in history.history.keys():
if k not in hist2.keys():
hist2[k] = history.history[k]
else:
hist2[k] += history.history[k]
# training_history['epochs'] = list(range(epochs)) + list(range(epochs))
# for k in hist1.keys():
# training_history[k] = hist1[k] + hist2[k]
for k in set(list(hist1.keys()) + list(hist2.keys())):
training_history[k] = hist2[k] if k in hist2 else hist1[k]
training_history['epochs'] = list(range(epochs))
return fsmodel, training_history
| 4,556 | 40.807339 | 82 | py |
cymetric | cymetric-main/cymetric/models/metrics.py | """
A bunch of custom metrics for the custom model.
Need to be declared separately otherwise .fit
throws an error, since they only take the loss values
and not y_pred, y_true as arguments.
"""
import tensorflow as tf
tfk = tf.keras
class SigmaLoss(tfk.metrics.Metric):
def __init__(self, name='sigma_loss', **kwargs):
super(SigmaLoss, self).__init__(name=name, **kwargs)
self.sigma_loss = self.add_weight(name='sl', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, values, sample_weight=None):
"""
Args:
values: tupe (data['X_val'], data['y_val'])
sample_weight: sample weights for the validation set (Default: None)
Returns:
"""
loss = values['sigma_loss']
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
loss = tf.multiply(loss, sample_weight)
new_value = (tf.reduce_mean(loss, axis=-1) -
self.sigma_loss)/(self.count+1)
self.sigma_loss.assign_add(new_value)
self.count.assign_add(1)
def result(self):
return self.sigma_loss
def reset_state(self):
self.sigma_loss.assign(0)
self.count.assign(0)
class KaehlerLoss(tfk.metrics.Metric):
def __init__(self, name='kaehler_loss', **kwargs):
super(KaehlerLoss, self).__init__(name=name, **kwargs)
self.kaehler_loss = self.add_weight(name='kl', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, values, sample_weight=None):
"""
Args:
values: tupe (data['X_val'], data['y_val'])
sample_weight: sample weights for the validation set (Default: None)
Returns:
"""
loss = values['kaehler_loss']
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
loss = tf.multiply(loss, sample_weight)
new_value = (tf.reduce_mean(loss, axis=-1) - self.kaehler_loss)/(self.count+1)
self.kaehler_loss.assign_add(new_value)
self.count.assign_add(1)
def result(self):
return self.kaehler_loss
def reset_state(self):
self.kaehler_loss.assign(0)
self.count.assign(0)
class TransitionLoss(tfk.metrics.Metric):
def __init__(self, name='transition_loss', **kwargs):
super(TransitionLoss, self).__init__(name=name, **kwargs)
self.transition_loss = self.add_weight(name='tl', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, values, sample_weight=None):
"""
Args:
values: tupe (data['X_val'], data['y_val'])
sample_weight: sample weights for the validation set (Default: None)
Returns:
"""
loss = values['transition_loss']
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
loss = tf.multiply(loss, sample_weight)
new_value = (tf.reduce_mean(loss, axis=-1) - self.transition_loss)/(self.count+1)
self.transition_loss.assign_add(new_value)
self.count.assign_add(1)
def result(self):
return self.transition_loss
def reset_state(self):
self.transition_loss.assign(0)
self.count.assign(0)
class RicciLoss(tfk.metrics.Metric):
def __init__(self, name='ricci_loss', **kwargs):
super(RicciLoss, self).__init__(name=name, **kwargs)
self.ricci_loss = self.add_weight(name='rl', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, values, sample_weight=None):
"""
Args:
values: tupe (data['X_val'], data['y_val'])
sample_weight: sample weights for the validation set (Default: None)
Returns:
"""
loss = values['ricci_loss']
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
loss = tf.multiply(loss, sample_weight)
new_value = (tf.reduce_mean(loss, axis=-1) - self.ricci_loss)/(self.count+1)
self.ricci_loss.assign_add(new_value)
self.count.assign_add(1)
def result(self):
return self.ricci_loss
def reset_state(self):
self.ricci_loss.assign(0)
self.count.assign(0)
class VolkLoss(tfk.metrics.Metric):
def __init__(self, name='volk_loss', **kwargs):
super(VolkLoss, self).__init__(name=name, **kwargs)
self.volk_loss = self.add_weight(name='vk', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, values, sample_weight=None):
"""
Args:
values: tupe (data['X_val'], data['y_val'])
sample_weight: sample weights for the validation set (Default: None)
Returns:
"""
loss = values['volk_loss']
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
loss = tf.multiply(loss, sample_weight)
new_value = (tf.reduce_mean(loss, axis=-1) - self.volk_loss)/(self.count+1)
self.volk_loss.assign_add(new_value)
self.count.assign_add(1)
def result(self):
return self.volk_loss
def reset_state(self):
self.volk_loss.assign(0)
self.count.assign(0)
class TotalLoss(tfk.metrics.Metric):
def __init__(self, name='loss', **kwargs):
super(TotalLoss, self).__init__(name=name, **kwargs)
self.total_loss = self.add_weight(name='tl', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, values, sample_weight=None):
"""
Args:
values: tupe (data['X_val'], data['y_val'])
sample_weight: sample weights for the validation set (Default: None)
Returns:
"""
loss = values['loss']
# total loss already gets rescaled by sample weight
# if sample_weight is not None:
# sample_weight = tf.cast(sample_weight, self.dtype)
# loss = tf.multiply(loss, sample_weight)
new_value = (tf.reduce_mean(loss, axis=-1) - self.total_loss)/(self.count+1)
self.total_loss.assign_add(new_value)
self.count.assign_add(1)
def result(self):
return self.total_loss
def reset_state(self):
self.total_loss.assign(0)
self.count.assign(0)
| 6,649 | 32.585859 | 89 | py |
cymetric | cymetric-main/cymetric/potential/donaldson.py | """
Implementation of the Donaldson algorithm using numpy.
"""
import numpy as np
from joblib import Parallel, delayed
from cymetric.pointgen.nphelper import generate_monomials
from cymetric.models.fubinistudy import FSModel
import os as os
from scipy.special import factorial
import sympy as sp
from sympy.geometry.util import idiff
import logging
import sys as sys
import tensorflow as tf
tfk = tf.keras
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s')
logger = logging.getLogger('Donaldson')
class Donaldson:
def __init__(self, pointgen, k=None, pw=[], log=3):
self.pointgen = pointgen
if log == 1:
level = logging.DEBUG
elif log == 2:
level = logging.INFO
else:
level = logging.WARNING
logger.setLevel(level=level)
if k is not None:
self._init_monomials(k)
self.hbalanced = self.compute_hbalanced(k, pw)
else:
self.k = [0]
self.sections = None
self.jacobians = None
self.hessians = None
self.dzdzdz_basis = None
def __call__(self, points):
return self.g_pull_backs(points)
def _init_monomials(self, k):
self.k = [k for _ in range(len(self.pointgen.ambient))]
self._generate_sections(self.k)
self._generate_jacobians()
self._generate_hessians()
def set_hbalanced(self, hb, k):
self.hbalanced = hb
self._init_monomials(k)
def compute_hbalanced(self, k, point_weights=[], max_iterations=10,
n_proc=-1, n_chunks=100):
r"""Donaldson algorithm to compute hbalanced.
Args:
k (int): degree k of the line bundle
point_weights (list, optional): point weights. Defaults to [].
max_iterations (int, optional): # of inverses being taken. Defaults to 15.
n_proc (int, optional): # of cores being used. Defaults to -1.
n_chunks (int, optional): # chunks in toperator. Defaults to 100.
Returns:
ndarray[np.complex]: hbalanced matrix
"""
# TODO: Make it so we can use mixed degrees for products?
if self.jacobians is None or k != np.mean(self.k):
self._init_monomials(k)
if len(point_weights) == 0:
# generate number of points which we can chunk nicely later
n_pw = int(self._needed_points(self.nsections)/n_chunks-1)*n_chunks
logger.info('Generating {} point weights.'.format(n_pw))
point_weights = self.pointgen.generate_point_weights(n_pw)
logger.info('Point weights generated.')
else:
# chunk it
n_pw = int(len(point_weights)/(n_chunks-1))*n_chunks
point_weights = point_weights[:n_pw]
if n_pw < self._needed_points(self.nsections):
logger.warning('Too little point weights {} < {} \
(needed for numerical stability).'.format(
n_pw, self._needed_points(self.nsections)))
volume_cy = (1/n_pw) * np.sum(point_weights['weight'])
h_balanced_new = self._initial_hbalanced(self.nsections)
logger.info(
'Applying T-operator for {} iterations'.format(max_iterations))
# apply t_operator
for i in range(max_iterations):
# TODO: vectorize this
top = np.sum(Parallel(n_jobs=n_proc)(delayed(self.t_operator_vec)(chunks, h_balanced_new)
for chunks in point_weights.reshape((n_chunks, -1))), axis=0)
h_balanced = (self.nsections / (n_pw * volume_cy)) * top
h_balanced = np.linalg.inv(h_balanced)
h_balanced = np.transpose(h_balanced)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('{}-th iteration with relative change {}.'.format(i,
np.sum(np.abs(h_balanced-h_balanced_new))/np.sum(np.abs(h_balanced))))
logger.debug('isHermitian: {}. isDense: {}.'.format(
np.allclose(h_balanced, np.conjugate(h_balanced.T)),
np.sum(np.abs(h_balanced))/(self.nsections**2)))
logger.debug('is invertible: {}.'.format(np.all(np.isclose(np.einsum('ij,jk', h_balanced,
np.linalg.inv(h_balanced)), np.eye(self.nsections, dtype=np.complex), atol=1e-10))))
h_balanced_new = np.copy(h_balanced)
return h_balanced_new
def _initial_hbalanced(self, nsections, permut=True, nattempts=10, atol=1e-12):
# use permutation around diagonal
if not permut:
return np.eye(nsections, dtype=np.complex128)
# 10 attempts? don't want to get stuck in infinite loop
for _ in range(nattempts):
h = np.random.randn(nsections, nsections) + \
1j*np.random.randn(nsections, nsections)
h = np.triu(h, 1) + np.conjugate(np.triu(h, 1)).T + \
np.eye(nsections)*np.random.randn(nsections)
# check if invertible
h = np.eye(len(h), dtype=np.complex128)+0.1*h
if np.all(np.isclose(np.einsum('ij,jk', h, np.linalg.inv(h)),
np.eye(nsections, dtype=np.complex), atol=atol)):
return h.astype(np.complex128)
logger.warning('Unable to find initial \
invertible matrix in {} attempts. \
Returning Identity.'.format(nattempts))
return np.eye(nsections, dtype=np.complex128)
def _needed_points(self, nsections):
#2.22 in 1910.08605
return 10 * nsections**2 + 50000
def _generate_sections(self, k):
# TODO: This function is a mess. numpyze it.
self.sections = None
ambient_polys = [0 for i in range(len(k))]
for i in range(len(k)):
# create all monomials of degree k in ambient space factors
ambient_polys[i] = list(generate_monomials(
self.pointgen.degrees[i], k[i]))
# create all combinations for product of projective spaces
monomial_basis = [x for x in ambient_polys[0]]
for i in range(1, len(k)):
lenB = len(monomial_basis)
monomial_basis = monomial_basis*len(ambient_polys[i])
for l in range(len(ambient_polys[i])):
for j in range(lenB):
monomial_basis[l*lenB+j] = monomial_basis[l *
lenB+j]+ambient_polys[i][l]
sections = np.array(monomial_basis, dtype=np.int32)
# reduce sections; pick (arbitrary) first monomial in point gen
reduced = np.unique(
np.where(sections - self.pointgen.monomials[0] < -0.1)[0])
self.sections = sections[reduced]
self.nsections = len(self.sections)
# Sanity check; M is (py)CICY object
# if self.nsections != np.round(self.M.line_co_euler(k)):
# logger.warning('Reduced basis {} is not fully reduced {}.'.format(
# self.nsections, self.M.line_co_euler(k)))
# vectorized
def g_pull_backs(self, points, h=None):
if h is None:
h = self.hbalanced
pbs = self.pointgen.pullbacks(points)
g_kaehler = self.kaehler_metrics(h, points)
return np.einsum('xai,xij,xbj->xab', pbs, g_kaehler, np.conjugate(pbs))
# at single point
def g_pull_back(self, h, point):
jac = self.pointgen.pullback_tensor(point)
g_kaehler = self.kaehler_metric(h, point)
return np.einsum('ai,ij,bj', jac, g_kaehler, np.conjugate(jac))
def kaehler_metrics(self, h, points):
s_ps = self.eval_sections_vec(points)
partial_sps = self.eval_jacobians_vec(points)
k_0 = np.real(1 / np.einsum('ij,xi,xj->x',
h, s_ps, np.conjugate(s_ps)))
k_1 = np.einsum('ab,xai,xb->xi', h, partial_sps, np.conjugate(s_ps))
k_1_bar = np.conjugate(k_1)
k_2 = np.einsum('ab,xai,xbj->xij', h, partial_sps,
np.conjugate(partial_sps))
return (np.einsum('x,xij->xij', k_0, k_2) -
np.einsum('x,xi,xj->xij', k_0 ** 2, k_1, k_1_bar))/(np.mean(self.k) * np.pi)
def load_hbalanced(self, fname, k):
self.hbalanced = np.load(fname, allow_pickle=True)
self._init_monomials(k)
def save_dzdzdz_basis(self, dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
fname = os.path.join(dirname, 'dbasis.npz')
np.savez_compressed(fname,
DZDZDZB=self.dzdzdz_basis,
DZDZDZF=self.dzdzdz_factor,
)
def load_dzdzdz_basis(self, fname):
dbasis = np.load(fname, allow_pickle=True)
self.dzdzdz_basis = dbasis['DZDZDZB']
self.dzdzdz_factor = dbasis['DZDZDZF']
def save_hbalanced(self, dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
fname = os.path.join(
dirname, 'k'+str(int(np.mean(self.k)))+'hbalanced.npz')
np.savez_compressed(fname,
hb=self.hbalanced,
k=self.k,
sections=self.sections,
jacobians=self.jacobians,
j_factors=self.j_factors
)
def load_hbalanced_dict(self, fname):
df = np.load(fname, allow_pickle=True)
self.hbalanced = df['hb']
self.k = df['k']
self.sections = df['sections']
self.jacobians = df['jacobians']
self.j_factors = df['j_factors']
def eval_sections_vec(self, points):
return np.multiply.reduce(np.power(np.expand_dims(points, 1),
self.sections), axis=-1)
def eval_sections(self, point):
return np.multiply.reduce(np.power(point, self.sections), axis=-1)
def t_operator_vec(self, point_weights, hn):
s_ps = self.eval_sections_vec(point_weights['point'])
inner_products = np.real(
np.einsum('ij, xi, xj -> x', hn, s_ps, np.conjugate(s_ps)))
all_products = np.einsum('xij,x -> ij', np.einsum('xi,xj->xij', s_ps, np.conjugate(s_ps)),
point_weights['weight'] / inner_products)
#t_op = np.add.reduce(all_products)
return all_products
def t_operator_single(self, pw, hn):
s_p = self.eval_sections(pw['point'])
inner_product = np.real(
np.einsum('ij, i, j', hn, s_p, np.conjugate(s_p)))
return np.einsum('i,j', s_p, np.conjugate(s_p))*pw['weight'] / inner_product
def eval_jacobians(self, point):
return np.multiply.reduce(np.power(point,
self.jacobians), axis=-1) * self.j_factors[0]
def eval_jacobians_vec(self, points):
return np.multiply.reduce(np.power(np.expand_dims(np.expand_dims(points, 1), 1),
self.jacobians), axis=-1) * self.j_factors
def eval_hessians(self, point):
return np.multiply.reduce(np.power(point,
self.hessians), axis=-1) * self.h_factors[0]
def eval_hessians_vec(self, points):
# TODO: I think points needs to be expanded along more dims.
return np.multiply.reduce(np.power(np.expand_dims(points, 1),
self.hessians), axis=-1) * self.h_factors
def _generate_jacobians(self):
# check which ones are good
self.jacobians = np.expand_dims(self.sections, 1) - \
np.eye(self.pointgen.n_coords, dtype=np.int)
self.j_factors = np.expand_dims(self.jacobians.diagonal(0, 1, 2)+1, 0)
mask = np.any(self.jacobians < 0, axis=-1)
self.jacobians[mask] = np.zeros(
(np.sum(mask), self.pointgen.n_coords), dtype=np.int)
self.j_factors[0, mask] = np.zeros(np.sum(mask), dtype=np.int)
# TODO: remove zeros for faster performance?
# but then we can no longer vectorize with numpy
def _generate_hessians(self):
self.hessians = np.expand_dims(self.jacobians, 2) - \
np.eye(self.pointgen.n_coords, dtype=np.int)
self.h_factors = (self.hessians.diagonal(0, 2, 3)+1) * \
np.expand_dims(self.j_factors[0], axis=-1)
self.h_factors = np.expand_dims(self.h_factors, 0)
mask = np.any(self.hessians < 0, axis=-1)
self.hessians[mask] = np.zeros(
(np.sum(mask), self.pointgen.n_coords), dtype=np.int)
self.h_factors[0, mask] = np.zeros(np.sum(mask), dtype=np.int)
# TODO: remove zeros for faster performance?
# but then we can no longer vectorize with numpy
def sigma_measure(self, h=None, k=None, point_weights=[]):
if k is not None:
self._init_monomials(k)
if h is None:
h = self.hbalanced
# confirm that k and hbalanced match
assert len(self.sections) == len(
h), "dimensions of k and h don't match"
if len(point_weights) == 0:
n_t = 10000
point_weights = self.pointgen.generate_point_weights(
n_t, omega=True)
else:
n_t = len(point_weights)
if n_t < 10000:
logger.warning('It is recommended to use 10000 points.')
logger.info('Computing sigma measure for {} points.'.format(n_t))
volume_cy = np.mean(point_weights['weight'])
omega_wedge_omega = np.real(
point_weights['omega'] * np.conj(point_weights['omega']))
det = np.linalg.det(self(point_weights['point']))
det = np.real(det)*factorial(self.pointgen.nfold) / \
(2**self.pointgen.nfold)
vol_k = np.mean(det*point_weights['weight']/omega_wedge_omega)
ratio = volume_cy/np.real(vol_k)
logger.info(
'CY-volume: {}, K-vol: {}, ratio: {}.'.format(volume_cy, vol_k, ratio))
sigma_integrand = np.abs(
np.ones(n_t) - ratio * det/omega_wedge_omega) * point_weights['weight']
sigma = np.mean(sigma_integrand) / volume_cy
logger.info('Sigma measure: {}.'.format(sigma))
return sigma
def ricci_measure(self, h=None, k=None, point_weights=[]):
# maybe make a prepare function? to not repeat this everytime
# have to generate sections and such
if k is not None:
self._init_monomials(k)
if h is None:
h = self.hbalanced
if self.dzdzdz_basis is None:
self._generate_dzdzdz_basis()
assert len(self.sections) == len(
h), "dimensions of k and h don't match"
if len(point_weights) == 0:
n_t = 10000
point_weights = self.pointgen.generate_point_weights(n_t)
else:
n_t = len(point_weights)
if n_t < 10000:
logger.warning('It is recommended to use 10000 points.')
logger.info('Computing Ricci measure for {} points.'.format(n_t))
volume_cy = np.mean(point_weights['weight'])
omega = np.array([self.pointgen.Omega(p)
for p in point_weights['point']])
omega_wedge_omega = np.real(omega * np.conj(omega))
det = np.linalg.det(self(point_weights['point']))
det = np.real(det)*factorial(self.pointgen.nfold) / \
(2**self.pointgen.nfold)
vol_k = np.mean(det*point_weights['weight']/omega_wedge_omega)
ratio = volume_cy/np.real(vol_k)
logger.info(
'CY-volume: {}, K-vol: {}, ratio: {}.'.format(volume_cy, vol_k, ratio))
ricci = np.array(Parallel(n_jobs=-1, backend='multiprocessing',
batch_size=500)(delayed(self.ricci_trace)(h, p) for p in point_weights['point']))
ricci_measure = (vol_k**(1/self.pointgen.nfold)) * np.mean(np.abs(ricci) *
point_weights['weight'] * det/omega_wedge_omega)/volume_cy
logger.info('Ricci measure: {}. Mean abs(R): {}.'.format(ricci_measure,
np.mean(np.abs(ricci))))
return ricci_measure
# TODO: Rewrite in terms of proper basis.
def _generate_dzdzdz_basis(self):
# take one more implicit derivative
self.dzdzdz_basis = [[[0 for _ in range(self.pointgen.n_coords)]
for _ in range(self.pointgen.n_coords)] for _ in range(self.pointgen.n_coords)]
self.dzdzdz_factor = [[[0 for _ in range(self.pointgen.n_coords)]
for _ in range(self.pointgen.n_coords)] for _ in range(self.pointgen.n_coords)]
self.iiderivatives = [[Parallel(n_jobs=-1, backend='multiprocessing')
(delayed(self.second_idiff)(i, j, k)
for i in range(self.pointgen.n_coords))
for j in range(self.pointgen.n_coords)]
for k in range(self.pointgen.n_coords)]
for k in range(self.pointgen.n_coords):
for j in range(self.pointgen.n_coords):
for i in range(self.pointgen.n_coords):
if i != j and i != k:
self.dzdzdz_basis[k][j][i], self.dzdzdz_factor[k][j][i] = self.pointgen._frac_to_monomials(
self.iiderivatives[k][j][i])
def second_idiff(self, i, j, k):
return self._take_2nd_implicit_deriv(self.pointgen.poly, self.pointgen.x[i],
self.pointgen.x[j], self.pointgen.x[k]) if i != j and i != k else 0
def _take_2nd_implicit_deriv(self, eq, z1, z2, z3):
# check if z2 and z3 are the same
if z2 == z3:
return idiff(eq, z1, z2, n=2)
# solve manually
dep = {z1}
f = {s: sp.Function(s.name)(z2, z3) for s in eq.free_symbols
if s != z2 and s != z3 and s in dep}
dz1dz2 = sp.Function(z1.name)(z2, z3).diff(z2)
dz1dz3 = sp.Function(z1.name)(z2, z3).diff(z3)
dzij = sp.Function(z1.name)(z2, z3).diff(z2).diff(z3)
eq = eq.subs(f)
derivs = {}
d2 = sp.solve(eq.diff(z2), dz1dz2)[0]
d3 = sp.solve(eq.diff(z3), dz1dz3)[0]
derivs[dz1dz2] = d2
derivs[dz1dz3] = d3
zij = sp.solve(eq.diff(z2).diff(z3), dzij)[0].subs(derivs)
return zij.subs([(v, k) for k, v in f.items()])
def compute_dzdzdz(self, point, zj, zi, zk):
# compute dzj/(dzk dzi)
numerator = np.sum(self.dzdzdz_factor[zk][zi][zj][0] *
np.multiply.reduce(np.power(point, self.dzdzdz_basis[zk][zi][zj][0]), axis=-1))
denominator = np.sum(self.dzdzdz_factor[zk][zi][zj][1] *
np.multiply.reduce(np.power(point, self.dzdzdz_basis[zk][zi][zj][1]), axis=-1))
return numerator/denominator
def ricci_trace(self, h, point):
# take trace wrt to pullback
g = self.g_pull_back(h, point)
g_inv = np.linalg.inv(g)
ricci_tensor = self.ricci_tensor(h, point)
return np.einsum('ba, ab', g_inv, ricci_tensor).real
# TODO: Vectorize this
def ricci_tensor(self, h, point):
# We compute B. 75
kaehler_terms = self.kaehler_terms(h, point)
# a bit of unnecessary computation here
gt = self.kaehler_metric(h, point)
di_gt = self.partiali_g(kaehler_terms)
dij_gt = self.partialij_g(kaehler_terms)
J = self.pointgen.pullback_tensor(point)
diJ = self.partial_pullback_tensor(point)
# we need B.76
di_g = np.einsum('iaj,jk,bk -> iab', diJ, gt, np.conjugate(J)) + \
np.einsum('aj,ijk,bk -> iab', J, di_gt, np.conjugate(J))
# we need B.77
dij_g = np.einsum('ial,ljk,bk -> ijab', diJ, np.conjugate(di_gt), np.conjugate(J)) + \
np.einsum('al,ijlk,bk -> ijab', J, dij_gt, np.conjugate(J)) + \
np.einsum('ial,lk,jbk -> ijab', diJ, gt, np.conjugate(diJ)) + \
np.einsum('al,ikl,jbk -> ijab', J, di_gt, np.conjugate(diJ))
ginv = np.linalg.inv(self.g_pull_back(h, point))
# B.75, take trace wrt to g or
# take regular trace?
ricci_tensor = np.einsum('ba,ijab -> ij', np.eye(3), (-1 * np.einsum('ab,ibc,cd,jed -> ijae', ginv, di_g, ginv, np.conjugate(di_g)) +
np.einsum('ab,ijbc -> ijac', ginv, dij_g)))
# B.72 - we return
ricci_tensor = np.einsum(
'ai,bj,ij -> ab', J, np.conjugate(J), ricci_tensor)
return ricci_tensor
def kaehler_metric(self, h, point):
# save the results? so we do not need to recompute everything for each point?
s_p = self.eval_sections(point)
partial_sp = self.eval_jacobians(point)
k_0 = np.real(1 / np.einsum('ij,i,j', h, s_p, np.conjugate(s_p)))
k_1 = np.einsum('ab,ai,b', h, partial_sp, np.conjugate(s_p))
k_1_bar = np.conjugate(k_1)
k_2 = np.einsum('ab,ai,bj', h, partial_sp, np.conjugate(partial_sp))
return (k_0 * k_2 - (k_0 ** 2) * np.einsum('i,j', k_1, k_1_bar))/(np.mean(self.k) * np.pi)
def kaehler_terms(self, h, point):
# compute B.67-B.70, B.79-B.80, B.82-B.83
s_p = self.eval_sections(point)
partial_sp = self.eval_jacobians(point)
double_partial_sp = self.eval_hessians(point)
k_00 = np.real(1 / np.einsum('ij,i,j', h, s_p, np.conjugate(s_p)))
k_10 = np.einsum('ab,ai,b', h, partial_sp, np.conjugate(s_p))
k_20 = np.einsum('ab,aij,b', h, double_partial_sp, np.conjugate(s_p))
k_11 = np.einsum('ab,ai,bj', h, partial_sp, np.conjugate(partial_sp))
k_21 = np.einsum('ab,aik,bl -> ikl', h,
double_partial_sp, np.conjugate(partial_sp))
k_22 = np.einsum('ab,aik,bjl -> ijkl', h,
double_partial_sp, np.conjugate(double_partial_sp))
return [k_00, k_10, k_20, k_11, k_21, k_22]
def partial_pullback_tensor(self, point):
# J_aj^i = dz_i/(dx_a dz_j)
zi = self.pointgen._find_max_dQ_coord(point)
diag_i = np.where(self.pointgen._find_good_coordinate_mask(point))[0]
J_aji = np.zeros((self.pointgen.n_coords, self.pointgen.nfold,
self.pointgen.n_coords), dtype=np.complex128)
for j in range(self.pointgen.n_coords):
for a in range(self.pointgen.nfold):
if j != zi:
J_aji[j][a][zi] = self.compute_dzdzdz(
point, zi, diag_i[a], j)
return J_aji
def partiali_g(self, k):
# B.78
# g_ikl
di_g = - k[0]**2 * (np.einsum('i,kl -> ikl', k[1], k[3]) +
np.einsum('k, il -> ikl', k[1], k[3]) +
np.einsum('l, ik -> ikl', np.conjugate(k[1]), k[2])) + \
k[0] * k[4] + 2 * k[0]**3 * \
np.einsum('i,k,l -> ikl', k[1], k[1], np.conjugate(k[1]))
return di_g / (np.pi * np.mean(self.k))
def partialij_g(self, k):
# B.81
# g_ijkl
dij_g = k[0] * k[5] - k[0]**2 * (np.einsum('ij,kl -> ijkl', k[3], k[3]) +
np.einsum('ik,jl -> ijkl', k[2], np.conjugate(k[2])) +
np.einsum('kj,il -> ijkl', k[3], k[3]) +
# the next four become hermitian together
np.einsum('j,ikl -> ijkl', np.conjugate(k[1]), k[4]) +
np.einsum('l,ikj -> ijkl', np.conjugate(k[1]), k[4]) +
np.einsum('i,ljk -> ijkl', k[1], np.conjugate(k[4])) +
np.einsum('k,jli -> ijkl',
k[1], np.conjugate(k[4]))
) + \
2 * k[0]**3 * (np.einsum('i,j,kl -> ijkl', k[1], np.conjugate(k[1]), k[3]) +
np.einsum('ij,k,l -> ijkl', k[3], k[1], np.conjugate(k[1])) +
# same for next four
np.einsum('j,k,il -> ijkl', np.conjugate(k[1]), k[1], k[3]) +
np.einsum('i,kj,l -> ijkl', k[1], k[3], np.conjugate(k[1])) +
np.einsum('i,k,jl -> ijkl', k[1], k[1], np.conjugate(k[2])) +
np.einsum(
'j,ik,l -> ijkl', np.conjugate(k[1]), k[2], np.conjugate(k[1]))
) - \
6 * k[0]**4 * np.einsum('i,j,k,l -> ijkl', k[1],
np.conjugate(k[1]), k[1], np.conjugate(k[1]))
return dij_g / (np.pi * np.mean(self.k))
class HbalancedModel(FSModel):
r"""MatrixFSModelToric inherits :py:class:`cymetric.fsmodel.FSModel`.
Computes the metric with tensorflow gradienttapes from a hbalanced metric.
Require hbalanced tensor from Donaldson algorithm.
NOTE:
- This one has not been tested extensively. Use with caution.
- If one were to implement a training step, one could learn h_b directly.
"""
def __init__(self, hb, k, sections, jacobians, j_factors,
BASIS, **kwargs):
super(HbalancedModel, self).__init__(
BASIS=BASIS, **kwargs)
self.hbalanced = tf.cast(hb, dtype=tf.complex64)
self.sections = tf.cast(sections, dtype=tf.complex64)
self.jacobians = tf.cast(jacobians, dtype=tf.complex64)
self.j_factors = tf.cast(j_factors, dtype=tf.complex64)
self.k = tf.cast(k, dtype=tf.complex64)
def call(self, input_tensor, training=True):
return self.g_pull_backs(input_tensor)
@tf.function
def g_pull_backs(self, points):
cpoints = tf.complex(points[:, :self.ncoords],
points[:, self.ncoords:])
pbs = self.pullbacks(points)
g_kaehler = self.kaehler_metrics(cpoints)
return tf.einsum('xai,xij,xbj->xab', pbs, g_kaehler, tf.math.conj(pbs))
@tf.function
def kaehler_metrics(self, points):
s_ps = self.eval_sections_vec(points)
partial_sps = self.eval_jacobians_vec(points)
k_0 = 1. / tf.einsum('ij,xi,xj->x', self.hbalanced,
s_ps, tf.math.conj(s_ps))
k_1 = tf.einsum('ab,xai,xb->xi', self.hbalanced,
partial_sps, tf.math.conj(s_ps))
k_1_bar = tf.math.conj(k_1)
k_2 = tf.einsum('ab,xai,xbj->xij', self.hbalanced,
partial_sps, tf.math.conj(partial_sps))
k_02 = tf.einsum('x, xij -> xij', k_0, k_2)
k_011 = tf.einsum('x,xij->xij', tf.square(k_0),
tf.einsum('xi,xj->xij', k_1, k_1_bar))
return (k_02 - k_011) / (self.k * self.pi)
@tf.function
def eval_jacobians_vec(self, points):
return tf.math.reduce_prod(tf.math.pow(tf.expand_dims(
tf.expand_dims(points, 1), 1),
self.jacobians), axis=-1) * self.j_factors
@tf.function
def eval_sections_vec(self, points):
return tf.math.reduce_prod(tf.math.pow(tf.expand_dims(points, 1),
self.sections), axis=-1)
def compute_ricci_measure(self, points, y, bSize=1000, verbose=1):
# simple optimizable hack to compute batched
# ricci mesaure
weights = y[:, -2]
omegas = y[:, -1]
ricci_scalars = tf.zeros_like(points[:, 0])
for i in range(len(points)//bSize):
tmp_ricci = self.compute_ricci_scalar(
points[i*bSize:(i+1)*bSize]
)
tmp_ricci = tf.math.abs(tmp_ricci)
ricci_scalars = tf.tensor_scatter_nd_update(
ricci_scalars,
tf.reshape(tf.range(i*bSize, (i+1)*bSize), [-1, 1]),
tmp_ricci
)
tmp_ricci = self.compute_ricci_scalar(
points[(len(points)//bSize)*bSize:]
)
tmp_ricci = tf.math.abs(tmp_ricci)
ricci_scalars = tf.tensor_scatter_nd_update(
ricci_scalars,
tf.reshape(tf.range((len(points)//bSize)
* bSize, len(points)), [-1, 1]),
tmp_ricci
)
det = tf.math.real(tf.linalg.det(self(points)))
nfold = tf.cast(self.nfold, dtype=tf.float32)
factorial = tf.exp(tf.math.lgamma(nfold+1))
det = det * factorial / (2**nfold)
det_over_omega = det / omegas
volume_cy = tf.math.reduce_mean(weights, axis=-1)
vol_k = tf.math.reduce_mean(det_over_omega * weights, axis=-1)
if verbose:
tf.print('Mean abs(R) = ',
tf.math.reduce_mean(tf.abs(ricci_scalars)),
output_stream=sys.stdout)
ricci_measure = (vol_k**(1/nfold) / volume_cy) * \
tf.math.reduce_mean(
det_over_omega * ricci_scalars * weights, axis=-1)
return ricci_measure
| 29,386 | 44.702955 | 149 | py |
cymetric | cymetric-main/cymetric/wolfram/mathematicalib.py | import numpy as np
import sys
import os
import re
import logging
import pickle
logging.basicConfig(stream=sys.stdout)
mcy_logger = logging.getLogger('mathematica')
from cymetric.pointgen.pointgen_mathematica import PointGeneratorMathematica, ToricPointGeneratorMathematica
from cymetric.pointgen.nphelper import prepare_dataset, prepare_basis_pickle
import tensorflow as tf
import tensorflow.keras as tfk
tf.get_logger().setLevel('ERROR')
from cymetric.models.tfmodels import PhiFSModel, MultFSModel, FreeModel, MatrixFSModel, AddFSModel, PhiFSModelToric, MatrixFSModelToric
from cymetric.models.tfhelper import prepare_tf_basis, train_model
from cymetric.models.callbacks import SigmaCallback, KaehlerCallback, TransitionCallback, RicciCallback, VolkCallback, AlphaCallback
from cymetric.models.metrics import SigmaLoss, KaehlerLoss, TransitionLoss, RicciLoss, VolkLoss
from wolframclient.language import wl
from wolframclient.serializers import export as wlexport
from wolframclient.deserializers import WXFConsumer, binary_deserialize, WXFConsumerNumpy
Complex = np.complex64
class wlConsumer(WXFConsumer):
def build_function(self, head, args, **kwargs):
# return a built in complex if head is Complex and argument length is 2.
if head == wl.Complex and len(args) == 2:
return complex(*args)
elif head == wl.NumericArray:
return np.array(*args[0])
# otherwise delegate to the super method (default case).
else:
return super().build_function(head, args, **kwargs)
def point_vec_to_complex(p):
if len(p) == 0:
return np.array([[]])
p = np.array(p)
plen = len(p[0])//2
return p[:, :plen] + 1.j*p[:, plen:]
def to_numpy_arrays(my_args):
args_dict = {}
for k, v in my_args.items():
if isinstance(v, list) or isinstance(v, tuple):
args_dict[k] = np.array(v)
elif type(v) == type(wl.NumericArray([0])):
args_dict[k] = binary_deserialize(wlexport(v, target_format='wxf'), consumer=wlConsumer())
else:
args_dict[k] = v
args_dict['logger_level'] = eval(args_dict['logger_level'])
return args_dict
def generate_points(my_args):
global mcy_logger
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug("Using output directory {}".format(os.path.abspath(args['Dir'])))
# print ambient space
amb_str = ""
for d in args['ambient_dims']:
amb_str += "P^{} x ".format(d)
amb_str = amb_str[:-2]
mcy_logger.debug("Ambient space: {}".format(amb_str))
mcy_logger.debug("Kahler moduli: {}".format(args['KahlerModuli']))
args_str = re.sub('\],\n', '], ', str(args))
args_str = re.sub(' +', ' ', str(args_str))
mcy_logger.debug(args_str)
# need to specify monomials and their coefficients
if args['monomials'] == [] or args['coeffs'] == []:
raise ValueError("You need to specify both the monomials and their coefficients")
point_gen = PointGeneratorMathematica([np.array(x) for x in args['monomials']], [np.array(x) for x in args['coeffs']], args['KahlerModuli'], args['ambient_dims'], precision=args['Precision'], point_file_path=args['point_file_path'], selected_t=args['selected_t'])
# save point generator to pickle
mcy_logger.info("Saving point generator to {:}".format(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle")))
with open(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle"), 'wb') as hnd:
pickle.dump(point_gen, hnd)
kappa = prepare_dataset(point_gen, args['num_pts'], args['Dir'], normalize_to_vol_j=True)
mcy_logger.info("Computing derivatives of J_FS, Omega, ...")
prepare_basis_pickle(point_gen, args['Dir'], kappa)
mcy_logger.debug("done")
def generate_points_toric(my_args):
global mcy_logger
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug("Using output directory {}".format(os.path.abspath(args['Dir'])))
# print ambient space
args_str = re.sub('\], \n', '], ', str(args))
args_str = re.sub(' +', ' ', str(args_str))
mcy_logger.debug(args_str)
with open(os.path.join(args['Dir'], 'toric_data.pickle'), 'rb') as f:
toric_data = pickle.load(f)
for key in toric_data:
mcy_logger.debug(key)
mcy_logger.debug(toric_data[key])
point_gen = ToricPointGeneratorMathematica(toric_data, precision=args['Precision'], verbose=args['Verbose'], point_file_path=args['point_file_path'])
# save point generator to pickle
mcy_logger.info("Saving point generator to {:}".format(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle")))
with open(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle"), 'wb') as hnd:
pickle.dump(point_gen, hnd)
kappa = prepare_dataset(point_gen, args['num_pts'], args['Dir'], normalize_to_vol_j=True)
mcy_logger.info("Computing derivatives of J_FS, Omega, ...")
prepare_basis_pickle(point_gen, args['Dir'], kappa)
mcy_logger.debug("done")
def train_NN(my_args):
global mcy_logger
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug(args)
# get info of generated points
data = np.load(os.path.join(args['Dir'], 'dataset.npz'))
BASIS = prepare_tf_basis(pickle.load(open(os.path.join(args['Dir'], 'basis.pickle'), 'rb')))
kappa = BASIS['KAPPA'].numpy()
# load toric data if exists/needed
toric_data = None
if args['Model'] == 'PhiFSToric':
if os.path.exists(args['toric_data_path']):
toric_data = pickle.load(open(args['toric_data_path'], 'rb'))
else:
mcy_logger.error("Model set to {}, but {} with toric data not found.".format(args['Model'], args['toric_data_path']))
# force GPU disable if argument is set:
if args["DisableGPU"]:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# check whether Keras is running on GPU or CPU:
tf_devices = "GPU"
if len(tf.config.list_physical_devices('GPU')) == 0:
tf_devices = "CPU"
mcy_logger.debug("Using {} for computation.".format(tf_devices))
# extract architecture for NN
nfold = tf.cast(BASIS['NFOLD'], dtype=tf.float32).numpy()
n_in = data['X_train'].shape[1]
n_hiddens, acts = args["HiddenLayers"], args["ActivationFunctions"]
n_out = nfold**2
if args['Model'] == 'PhiFS' or args['Model'] == 'PhiFSToric':
args['PrintLosses'][1] = False # Kahler loss is automatically 0
args['PrintMeasures'][1] = False # Kahler loss is automatically 0
n_out = 1
# callbacks
if args['EvaluateModel']:
scb = SigmaCallback((data['X_val'], data['y_val']))
kcb = KaehlerCallback((data['X_val'], data['y_val']))
tcb = TransitionCallback((data['X_val'], data['y_val']))
rcb = RicciCallback((data['X_val'], data['y_val']), data['val_pullbacks'])
volkck = VolkCallback((data['X_val'], data['y_val']))
cb_list = [scb, kcb, tcb, rcb, volkck]
cb_list = [x for x, y in zip(cb_list, args['PrintMeasures']) if y]
else:
cb_list = []
# metrics
args['PrintLosses'][3] = False # Ricci loss not computed at the moment
cmetrics = [SigmaLoss(), KaehlerLoss(), TransitionLoss(), RicciLoss(), VolkLoss()]
cmetrics = [x for x, y in zip(cmetrics, args['PrintLosses']) if y]
# build model
if args['Model'] == 'PhiFS' or args['Model'] == 'PhiFSToric':
model = tf.keras.Sequential()
model.add(tfk.Input(shape=(n_in,)))
for n_hidden, act in zip(n_hiddens, acts):
model.add(tfk.layers.Dense(n_hidden, activation=act))
model.add(tfk.layers.Dense(n_out, use_bias=False))
# # reproduces the FS Kahler potential for the bicubic
# import math
# def reorder_input(x):
# x1 = x[:,0:x.shape[-1]//4]
# x2 = x[:,x.shape[-1]//4:2*x.shape[-1]//4]
# x3 = x[:,2*x.shape[-1]//4:3*x.shape[-1]//4]
# x4 = x[:,3*x.shape[-1]//4:]
# return tf.keras.layers.concatenate([x1,x3], axis=1), tf.keras.layers.concatenate([x2,x4], axis=1)
#
# inp1 = tf.keras.layers.Input(shape=(12,))
# in1, in2 = tf.keras.layers.Lambda(reorder_input)(inp1)
# x1 = tf.keras.layers.dot([in1, in1], axes=-1)
# x2 = tf.keras.layers.dot([in2, in2], axes=-1)
# for n_hidden, act in zip(n_hiddens, acts):
# x1 = tf.keras.layers.Dense(n_hidden, activation=act)(x1)
# x2 = tf.keras.layers.Dense(n_hidden, activation=act)(x2)
# x1 = tfk.layers.Dense(n_out, use_bias=False, activation='sigmoid')(x1)
# x2 = tfk.layers.Dense(n_out, use_bias=False, activation='sigmoid')(x2)
# x1 = tf.math.log(x1)
# x2 = tf.math.log(x2)
# x = tf.keras.layers.add([0.1/math.pi * x1, 0.1/math.pi * x2])
# x = tfk.layers.Dense(n_out)(0.0000000001*x)
#
# model = tf.keras.models.Model(inputs=[inp1], outputs=x)
else:
model = tf.keras.Sequential()
model.add(tfk.Input(shape=(n_in,)))
for n_hidden, act in zip(n_hiddens, acts):
model.add(tfk.layers.Dense(n_hidden, activation=act))
model.add(tfk.layers.Dense(n_out))
mcy_logger.debug("Using model {}".format(args['Model']))
if args['Model'] == 'PhiFS':
fsmodel = PhiFSModel(model, BASIS, alpha=args['Alphas'])
elif args['Model'] == 'PhiFSToric':
fsmodel = PhiFSModelToric(model, BASIS, alpha=args['Alphas'], toric_data=toric_data)
elif args['Model'] == 'MultFS':
fsmodel = MultFSModel(model, BASIS, alpha=args['Alphas'])
elif args['Model'] == 'MatrixMultFS':
fsmodel = MatrixFSModel(model, BASIS, alpha=args['Alphas'])
elif args['Model'] == 'MatrixMultFSToric':
fsmodel = MatrixFSModelToric(model, BASIS, alpha=args['Alphas'], toric_data=toric_data)
elif args['Model'] == 'AddFS':
fsmodel = AddFSModel(model, BASIS, alpha=args['Alphas'])
elif args['Model'] == 'Free':
fsmodel = FreeModel(model, BASIS, alpha=args['Alphas'])
else:
mcy_logger.error("{} is not a recognized option for a model".format(args['Model']))
return {}
optimizer = tfk.optimizers.Adam(learning_rate=args['LearningRate'])
model.summary(print_fn=mcy_logger.debug)
# train model
fsmodel, training_history = train_model(fsmodel, data, optimizer=optimizer, epochs=args['Epochs'], batch_sizes=args['BatchSizes'], verbose=2, custom_metrics=cmetrics, callbacks=cb_list)
# save trained model
fsmodel.model.save(os.path.join(args['Dir'], 'model'))
return training_history
def get_g(my_args):
global mcy_logger
my_args = dict(my_args)
pts = my_args['points']
del my_args['points']
# parse arguments
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug(args)
# load toric data if exists/needed
toric_data = None
if args['Model'] == 'PhiFSToric':
if os.path.exists(args['toric_data_path']):
toric_data = pickle.load(open(args['toric_data_path'], 'rb'))
else:
mcy_logger.error("Model set to {}, but {} with toric data not found.".format(args['Model'], args['toric_data_path']))
BASIS = prepare_tf_basis(pickle.load(open(os.path.join(args['Dir'], 'basis.pickle'), 'rb')))
kappa = BASIS['KAPPA'].numpy()
pts = tf.convert_to_tensor(pts, dtype=tf.float32)
model = tfk.models.load_model(os.path.join(args['Dir'], 'model'))
if args['Model'] == 'PhiFS':
fsmodel = PhiFSModel(model, BASIS)
elif args['Model'] == 'PhiFSToric':
fsmodel = PhiFSModelToric(model, BASIS, toric_data=toric_data)
elif args['Model'] == 'MultFS':
fsmodel = MultFSModel(model, BASIS)
elif args['Model'] == 'MatrixMultFS':
fsmodel = MatrixFSModel(model, BASIS)
elif args['Model'] == 'MatrixMultFSToric':
fsmodel = MatrixFSModelToric(model, BASIS, toric_data=toric_data)
elif args['Model'] == 'AddFS':
fsmodel = AddFSModel(model, BASIS)
elif args['Model'] == 'Free':
fsmodel = FreeModel(model, BASIS)
else:
mcy_logger.error("{} is not a recognized option for a model".format(args['Model']))
return []
gs = fsmodel(pts)
return gs.numpy()
def get_g_fs(my_args):
global mcy_logger
my_args = dict(my_args)
pts = np.array(point_vec_to_complex(my_args['points']), dtype=np.complex128)
del my_args['points']
# parse arguments
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug(args)
with open(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle"), 'rb') as hnd:
point_gen = pickle.load(hnd)
pbs = point_gen.pullbacks(pts)
ts = args['ts'] if args['ts'] != [] else point_gen.kmoduli
fs = point_gen.fubini_study_metrics(pts, vol_js=ts)
fs_pbs = np.einsum('xai,xij,xbj->xab', pbs, fs, np.conj(pbs))
return fs_pbs
def get_kahler_potential(my_args):
global mcy_logger
my_args = dict(my_args)
pts = my_args['points']
del my_args['points']
# parse arguments
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug(args)
# load toric data if exists/needed
toric_data = None
if args['Model'] == 'PhiFSToric':
if os.path.exists(args['toric_data_path']):
toric_data = pickle.load(open(args['toric_data_path'], 'rb'))
else:
mcy_logger.error("Model set to {}, but {} with toric data not found.".format(args['Model'], args['toric_data_path']))
BASIS = prepare_tf_basis(pickle.load(open(os.path.join(args['Dir'], 'basis.pickle'), 'rb')))
pts = tf.convert_to_tensor(pts, dtype=tf.float32)
model = tfk.models.load_model(os.path.join(args['Dir'], 'model'))
if args['Model'] == 'PhiFS':
fsmodel = PhiFSModel(model, BASIS)
elif args['Model'] == 'PhiFSToric':
fsmodel = PhiFSModelToric(model, BASIS, toric_data=toric_data)
else:
mcy_logger.error("Calculating the Kahler potential for model {} is not supported".format(args['Model']))
return []
ks = fsmodel.get_kahler_potential(pts)
return ks.numpy()
def get_weights(my_args):
global mcy_logger
my_args = dict(my_args)
pts = point_vec_to_complex(my_args['points'])
del my_args['points']
# parse arguments
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug(args)
with open(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle"), 'rb') as hnd:
point_gen = pickle.load(hnd)
return point_gen.point_weight(pts, normalize_to_vol_j=True)
def get_omegas(my_args):
global mcy_logger
my_args = dict(my_args)
pts = point_vec_to_complex(my_args['points'])
del my_args['points']
# parse arguments
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug(args)
with open(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle"), 'rb') as hnd:
point_gen = pickle.load(hnd)
omega = point_gen.holomorphic_volume_form(pts)
return omega * np.conj(omega)
def get_pullbacks(my_args):
global mcy_logger
my_args = dict(my_args)
pts = point_vec_to_complex(my_args['points'])
del my_args['points']
# parse arguments
args = to_numpy_arrays(my_args)
mcy_logger.setLevel(args['logger_level'])
mcy_logger.debug(args)
with open(os.path.join(os.path.abspath(args['Dir']), "point_gen.pickle"), 'rb') as hnd:
point_gen = pickle.load(hnd)
return point_gen.pullbacks(pts)
| 15,916 | 38.7925 | 267 | py |
cymetric | cymetric-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
#sys.path.insert(0, os.path.abspath(os.path.join('.', 'source', 'cymetric')))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
# -- Project information -----------------------------------------------------
project = 'cymetric'
copyright = '2021, Fabian Ruehle and Robin Schneider'
author = 'Fabian Ruehle and Robin Schneider'
# The full version, including alpha/beta/rc tags
with open(os.path.join("..", "..", "VERSION"), "r") as fh:
VERSION = fh.read()
release = VERSION
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'sphinx.ext.mathjax',
'myst_parser', 'sphinx.ext.todo', 'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# more configuration options
add_module_names = False
add_function_parentheses = True
autoclass_content = "both"
html_theme_options = {
'navigation_depth': 4,
}
#https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_sidebars
html_sidebars = {
'**': ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'],
'using/windows': ['windowssidebar.html', 'searchbox.html'],
} | 2,791 | 34.794872 | 84 | py |
PnP-CASSI | PnP-CASSI-main/dvp_linear_inv_cassi.py | import time
import math
import numpy as np
from skimage.restoration import (denoise_tv_chambolle, denoise_bilateral,
denoise_wavelet, estimate_sigma)
from skimage.measure import (compare_psnr, compare_ssim)
from utils import (A, At, psnr, shift, shift_back,calculate_ssim,TV_denoiser)
# import skimage.metrics.peak_signal_noise_ratio as psnr
# import skimage.metrics.structural_similarity as ssim
from hsi import HSI_SDeCNN as net
import torch
from bm3d import bm3d_deblurring, BM3DProfile, gaussian_kernel
import scipy.io as sio
def gap_denoise(y, Phi, A, At, _lambda=1, accelerate=True,
denoiser='tv', iter_max=50, noise_estimate=True, sigma=None,
tv_weight=0.1, tv_iter_max=5, multichannel=True, x0=None,
X_orig=None, model=None, show_iqa=True):
'''
Alternating direction method of multipliers (ADMM)[1]-based denoising
regularization for snapshot compressive imaging (SCI).
Parameters
----------
y : two-dimensional (2D) ndarray of ints, uints or floats
Input single measurement of the snapshot compressive imager (SCI).
Phi : three-dimensional (3D) ndarray of ints, uints or floats, omitted
Input sensing matrix of SCI with the third dimension as the
time-variant, spectral-variant, volume-variant, or angular-variant
masks, where each mask has the same pixel resolution as the snapshot
measurement.
Phi_sum : 2D ndarray,
Sum of the sensing matrix `Phi` along the third dimension.
A : function
Forward model of SCI, where multiple encoded frames are collapsed into
a single measurement.
At : function
Transpose of the forward model.
proj_meth : {'admm' or 'gap'}, optional
Projection method of the data term. Alternating direction method of
multipliers (ADMM)[1] and generalizedv alternating projection (GAP)[2]
are used, where ADMM for noisy data, especially real data and GAP for
noise-free data.
gamma : float, optional
Parameter in the ADMM projection, where more noisy measurements require
greater gamma.
denoiser : string, optional
Denoiser used as the regularization imposing on the prior term of the
reconstruction.
_lambda : float, optional
Regularization factor balancing the data term and the prior term,
where larger `_lambda` imposing more constrains on the prior term.
iter_max : int or uint, optional
Maximum number of iterations.
accelerate : boolean, optional
Enable acceleration in GAP.
noise_estimate : boolean, optional
Enable noise estimation in the denoiser.
sigma : one-dimensional (1D) ndarray of ints, uints or floats
Input noise standard deviation for the denoiser if and only if noise
estimation is disabled(i.e., noise_estimate==False). The scale of sigma
is [0, 255] regardless of the the scale of the input measurement and
masks.
tv_weight : float, optional
weight in total variation (TV) denoising.
x0 : 3D ndarray
Start point (initialized value) for the iteration process of the
reconstruction.
model : pretrained model for image/video denoising.
Returns
-------
x : 3D ndarray
Reconstructed 3D scene captured by the SCI system.
References
----------
.. [1] X. Liao, H. Li, and L. Carin, "Generalized Alternating Projection
for Weighted-$\ell_{2,1}$ Minimization with Applications to
Model-Based Compressive Sensing," SIAM Journal on Imaging Sciences,
vol. 7, no. 2, pp. 797-823, 2014.
.. [2] X. Yuan, "Generalized alternating projection based total variation
minimization for compressive sensing," in IEEE International
Conference on Image Processing (ICIP), 2016, pp. 2539-2543.
.. [3] Y. Liu, X. Yuan, J. Suo, D. Brady, and Q. Dai, "Rank Minimization
for Snapshot Compressive Imaging," IEEE Transactions on Pattern
Analysis and Machine Intelligence, doi:10.1109/TPAMI.2018.2873587,
2018.
'''
# [0] initialization
if x0 is None:
print(At)
x0 = At(y, Phi) # default start point (initialized value)
if not isinstance(sigma, list):
sigma = [sigma]
if not isinstance(iter_max, list):
iter_max = [iter_max] * len(sigma)
y1 = np.zeros_like(y)
Phi_sum = np.sum(Phi,2)
Phi_sum[Phi_sum==0]=1
# [1] start iteration for reconstruction
x = x0 # initialization
psnr_all = []
ssim_all=[]
k = 0
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
model = net()
model.load_state_dict(torch.load(r'./check_points/deep_denoiser.pth'))
model.eval()
for q, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
for idx, nsig in enumerate(sigma): # iterate all noise levels
for it in range(iter_max[idx]):
#print('max1_{0}_{1}:'.format(idx,it),np.max(x))
yb = A(x,Phi)
if accelerate: # accelerated version of GAP
y1 = y1 + (y-yb)
x = x + _lambda*(At((y1-yb)/Phi_sum,Phi)) # GAP_acc
else:
x = x + _lambda*(At((y-yb)/Phi_sum,Phi)) # GAP
x = shift_back(x,step=1)
# switch denoiser
if denoiser.lower() == 'tv': # total variation (TV) denoising
x = denoise_tv_chambolle(x, nsig / 255, n_iter_max=tv_iter_max, multichannel=multichannel)
#x= TV_denoiser(x, tv_weight, n_iter_max=tv_iter_max)
elif denoiser.lower() == 'hsicnn':
l_ch=10
m_ch=10
h_ch=10
if (k>123 and k<=125 ) or (k>=119 and k<=121) or (k>=115 and k<=117) or (k>=111 and k<=113) or (k>=107 and k<=109) or (k>=103 and k<=105) or (k>=99 and k<=101) or (k>=95 and k<=97) or (k>=91 and k<=93) or (k>=87 and k<=89) or (k>=83 and k<=85):
tem = None
for i in range(31):
net_input = None
if i < 3:
ori_nsig = nsig
if i==0:
net_input = np.dstack((x[:, :, i], x[:, :, i], x[:, :, i], x[:, :, i:i + 4]))
elif i==1:
net_input = np.dstack((x[:, :, i-1], x[:, :, i-1], x[:, :, i-1], x[:, :, i:i + 4]))
elif i==2:
net_input = np.dstack((x[:, :, i-2], x[:, :, i-2], x[:, :, i-1], x[:, :, i:i + 4]))
net_input = torch.from_numpy(np.ascontiguousarray(net_input)).permute(2,0,1).float().unsqueeze(0)
net_input = net_input.to(device)
Nsigma = torch.full((1, 1, 1, 1), l_ch / 255.).type_as(net_input)
output = model(net_input, Nsigma)
output = output.data.squeeze().cpu().numpy()
if k<0:
output = denoise_tv_chambolle(x[:, :, i], nsig / 255, n_iter_max=tv_iter_max,multichannel=False)
nsig = ori_nsig
if i == 0:
tem = output
else:
tem = np.dstack((tem, output))
elif i > 27:
ori_nsig=nsig
if k>=45:
nsig/=1
if i==28:
net_input = np.dstack((x[:, :, i - 3:i + 1], x[:, :, i+1], x[:, :, i+2], x[:, :, i+2]))
elif i==29:
net_input = np.dstack((x[:, :, i - 3:i + 1], x[:, :, i+1], x[:, :, i+1], x[:, :, i+1]))
elif i==30:
net_input = np.dstack((x[:, :, i - 3:i + 1], x[:, :, i], x[:, :, i], x[:, :, i]))
net_input = torch.from_numpy(np.ascontiguousarray(net_input)).permute(2, 0,1).float().unsqueeze(0)
net_input = net_input.to(device)
Nsigma = torch.full((1, 1, 1, 1), m_ch / 255.).type_as(net_input)
output = model(net_input, Nsigma)
output = output.data.squeeze().cpu().numpy()
if k<0:
output = denoise_tv_chambolle(x[:, :, i], 10 / 255, n_iter_max=tv_iter_max,multichannel=False)
tem = np.dstack((tem, output))
nsig=ori_nsig
else:
ori_nsig = nsig
net_input = x[:, :, i - 3:i + 4]
net_input = torch.from_numpy(np.ascontiguousarray(net_input)).permute(2, 0,1).float().unsqueeze(0)
net_input = net_input.to(device)
Nsigma = torch.full((1, 1, 1, 1), h_ch / 255.).type_as(net_input)
output = model(net_input, Nsigma)
output = output.data.squeeze().cpu().numpy()
tem = np.dstack((tem, output))
nsig = ori_nsig
#x = np.clip(tem,0,1)
x=tem
else:
x = denoise_tv_chambolle(x, nsig / 255, n_iter_max=tv_iter_max, multichannel=multichannel)
#x = TV_denoiser(x, tv_weight, n_iter_max=tv_iter_max)
elif denoiser.lower() =='bm3d':
sigma = nsig/255
v = np.zeros((15, 15))
for x1 in range(-7, 8, 1):
for x2 in range(-7, 8, 1):
v[x1 + 7, x2 + 7] = 1 / (x1 ** 2 + x2 ** 2 + 1)
v = v / np.sum(v)
for i in range(28):
x[:,:,i]= bm3d_deblurring(np.atleast_3d(x[:,:,i]), sigma, v)
else:
raise ValueError('Unsupported denoiser {}!'.format(denoiser))
# [optional] calculate image quality assessment, i.e., PSNR for
# every five iterations
if show_iqa and X_orig is not None:
ssim_all.append(calculate_ssim(X_orig, x))
psnr_all.append(psnr(X_orig, x))
if (k+1)%1 == 0:
if not noise_estimate and nsig is not None:
if nsig < 1:
print(' GAP-{0} iteration {1: 3d}, sigma {2: 3g}/255, '
'PSNR {3:2.2f} dB.'.format(denoiser.upper(),
k+1, nsig*255, psnr_all[k]),
'SSIM:{}'.format(ssim_all[k]))
else:
print(' GAP-{0} iteration {1: 3d}, sigma {2: 3g}, '
'PSNR {3:2.2f} dB.'.format(denoiser.upper(),
k+1, nsig, psnr_all[k]),
'SSIM:{}'.format(ssim_all[k]))
else:
print(' GAP-{0} iteration {1: 3d}, '
'PSNR {2:2.2f} dB.'.format(denoiser.upper(),
k+1, psnr_all[k]),
'SSIM:{}'.format(ssim_all[k]))
x = shift(x,step=1)
if k==123:
break
k = k+1
return x, psnr_all
def admm_denoise(y, Phi, A, At, _lambda=1, gamma=0.01,
denoiser='tv', iter_max=50, noise_estimate=True, sigma=None,
tv_weight=0.1, tv_iter_max=5, multichannel=True, x0=None,
X_orig=None, show_iqa=True):
'''
Alternating direction method of multipliers (ADMM)[1]-based denoising
regularization for snapshot compressive imaging (SCI).
Parameters
----------
y : two-dimensional (2D) ndarray of ints, uints or floats
Input single measurement of the snapshot compressive imager (SCI).
Phi : three-dimensional (3D) ndarray of ints, uints or floats, omitted
Input sensing matrix of SCI with the third dimension as the
time-variant, spectral-variant, volume-variant, or angular-variant
masks, where each mask has the same pixel resolution as the snapshot
measurement.
Phi_sum : 2D ndarray
Sum of the sensing matrix `Phi` along the third dimension.
A : function
Forward model of SCI, where multiple encoded frames are collapsed into
a single measurement.
At : function
Transpose of the forward model.
proj_meth : {'admm' or 'gap'}, optional
Projection method of the data term. Alternating direction method of
multipliers (ADMM)[1] and generalizedv alternating projection (GAP)[2]
are used, where ADMM for noisy data, especially real data and GAP for
noise-free data.
gamma : float, optional
Parameter in the ADMM projection, where more noisy measurements require
greater gamma.
denoiser : string, optional
Denoiser used as the regularization imposing on the prior term of the
reconstruction.
_lambda : float, optional
Regularization factor balancing the data term and the prior term,
where larger `_lambda` imposing more constrains on the prior term.
iter_max : int or uint, optional
Maximum number of iterations.
accelerate : boolean, optional
Enable acceleration in GAP.
noise_estimate : boolean, optional
Enable noise estimation in the denoiser.
sigma : one-dimensional (1D) ndarray of ints, uints or floats
Input noise standard deviation for the denoiser if and only if noise
estimation is disabled(i.e., noise_estimate==False). The scale of sigma
is [0, 255] regardless of the the scale of the input measurement and
masks.
tv_weight : float, optional
weight in total variation (TV) denoising.
x0 : 3D ndarray
Start point (initialized value) for the iteration process of the
reconstruction.
Returns
-------
x : 3D ndarray
Reconstructed 3D scene captured by the SCI system.
References
----------
.. [1] S. Boyd, N. Parikh, E. Chu, B. Peleato, and J. Eckstein,
"Distributed Optimization and Statistical Learning via the
Alternating Direction Method of Multipliers," Foundations and
Trends® in Machine Learning, vol. 3, no. 1, pp. 1-122, 2011.
.. [2] X. Yuan, "Generalized alternating projection based total variation
minimization for compressive sensing," in IEEE International
Conference on Image Processing (ICIP), 2016, pp. 2539-2543.
.. [3] Y. Liu, X. Yuan, J. Suo, D. Brady, and Q. Dai, "Rank Minimization
for Snapshot Compressive Imaging," IEEE Transactions on Pattern
Analysis and Machine Intelligence, doi:10.1109/TPAMI.2018.2873587,
2018.
'''
# [0] initialization
if x0 is None:
x0 = At(y,Phi) # default start point (initialized value)
if not isinstance(sigma, list):
sigma = [sigma]
if not isinstance(iter_max, list):
iter_max = [iter_max] * len(sigma)
# [1] start iteration for reconstruction
x = x0 # initialization
theta = x0
Phi_sum = np.sum(Phi,2)
Phi_sum[Phi_sum==0]=1
b = np.zeros_like(x0)
psnr_all = []
ssim_all=[]
k = 0
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
model = net()
model.load_state_dict(
torch.load(r'/home/dgl/zhengsiming/self_train/check_points/deep_denoiser.pth'))
model.eval()
for q, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
for idx, nsig in enumerate(sigma): # iterate all noise levels
for it in range(iter_max[idx]):
# Euclidean projection
yb = A(theta+b,Phi)
x = (theta+b) + _lambda*(At((y-yb)/(Phi_sum+gamma),Phi)) # ADMM
x1 = shift_back(x-b,step=2)
#x1=x-b
# switch denoiser
if denoiser.lower() == 'tv': # total variation (TV) denoising
#theta = denoise_tv_chambolle(x1, nsig/255, n_iter_max=tv_iter_max, multichannel=multichannel)
theta = TV_denoiser(x1, tv_weight, n_iter_max=tv_iter_max)
elif denoiser.lower() == 'hsicnn':
if k>=89:
tem = None
for i in range(28):
net_input = None
if i < 3:
if i==0:
net_input = np.dstack((x1[:, :, i], x1[:, :, i], x1[:, :, i], x1[:, :, i:i + 4]))
elif i==1:
net_input = np.dstack((x1[:, :, i-1], x1[:, :, i-1], x1[:, :, i-1], x1[:, :, i:i + 4]))
elif i==2:
net_input = np.dstack((x1[:, :, i-2], x1[:, :, i-2], x1[:, :, i-1], x1[:, :, i:i + 4]))
net_input = torch.from_numpy(np.ascontiguousarray(net_input)).permute(2, 0,1).float().unsqueeze(0)
net_input = net_input.to(device)
Nsigma = torch.full((1, 1, 1, 1), 10 / 255.).type_as(net_input)
output = model(net_input, Nsigma)
output = output.data.squeeze().float().cpu().numpy()
if i == 0:
tem = output
else:
tem = np.dstack((tem, output))
elif i > 24:
if i == 25:
net_input = np.dstack((x1[:, :, i - 3:i + 1], x1[:, :, i + 1], x1[:, :, i + 2], x1[:, :, i + 2]))
elif i == 26:
net_input = np.dstack((x1[:, :, i - 3:i + 1], x1[:, :, i + 1], x1[:, :, i + 1], x1[:, :, i + 1]))
elif i == 27:
net_input = np.dstack((x1[:, :, i - 3:i + 1], x1[:, :, i], x1[:, :, i], x1[:, :, i]))
net_input = torch.from_numpy(np.ascontiguousarray(net_input)).permute(2, 0, 1).float().unsqueeze(0)
net_input = net_input.to(device)
Nsigma = torch.full((1, 1, 1, 1),10 / 255.).type_as(net_input)
output = model(net_input, Nsigma)
output = output.data.squeeze().float().cpu().numpy()
tem = np.dstack((tem, output))
else:
net_input = x1[:, :, i - 3:i + 4]
net_input = torch.from_numpy(np.ascontiguousarray(net_input)).permute(2, 0,1).float().unsqueeze(0)
net_input = net_input.to(device)
Nsigma = torch.full((1, 1, 1, 1), 10 / 255.).type_as(net_input)
output = model(net_input, Nsigma)
output = output.data.squeeze().float().cpu().numpy()
tem = np.dstack((tem, output))
theta = tem
else:
#print('theta:', np.max(theta))
theta = denoise_tv_chambolle(x1, tv_weight, n_iter_max=tv_iter_max, multichannel=multichannel)
else:
raise ValueError('Unsupported denoiser {}!'.format(denoiser))
# [optional] calculate image quality assessment, i.e., PSNR for
# every five iterations
if show_iqa and X_orig is not None:
psnr_all.append(psnr(X_orig, theta))
ssim_all.append(calculate_ssim(X_orig,theta))
if (k+1)%1 == 0:
if not noise_estimate and nsig is not None:
if nsig < 1:
print(' ADMM-{0} iteration {1: 3d}, sigma {2: 3g}/255, '
'PSNR {3:2.2f} dB.'.format(denoiser.upper(),
k+1, nsig*255, psnr_all[k]),
'SSIM:{}'.format(ssim_all[k]))
else:
print(' ADMM-{0} iteration {1: 3d}, sigma {2: 3g}, '
'PSNR {3:2.2f} dB.'.format(denoiser.upper(),
k+1, nsig, psnr_all[k]),
'SSIM:{}'.format(ssim_all[k]))
else:
print(' ADMM-{0} iteration {1: 3d}, '
'PSNR {2: 2.2f} dB.'.format(denoiser.upper(),
k+1, psnr_all[k]),
'SSIM:{}'.format(ssim_all[k]))
theta = shift(theta,step=2)
b = b - (x-theta) # update residual
k = k+1
return theta, psnr_all,ssim_all
def GAP_TV_rec(y,Phi,A, At,Phi_sum, maxiter, step_size, weight, row, col, ColT, X_ori):
y1 = np.zeros((row,col))
begin_time = time.time()
f = At(y,Phi)
for ni in range(maxiter):
fb = A(f,Phi)
y1 = y1+ (y-fb)
f = f + np.multiply(step_size, At( np.divide(y1-fb,Phi_sum),Phi ))
f = denoise_tv_chambolle(f, weight,n_iter_max=30,multichannel=True)
if (ni+1)%5 == 0:
# mse = np.mean(np.sum((y-A(f,Phi))**2,axis=(0,1)))
end_time = time.time()
print("GAP-TV: Iteration %3d, PSNR = %2.2f dB,"
" time = %3.1fs."
% (ni+1, psnr(f, X_ori), end_time-begin_time))
return f
def ADMM_TV_rec(y,Phi,A, At,Phi_sum, maxiter, step_size, weight, row, col, ColT, eta,X_ori):
#y1 = np.zeros((row,col))
begin_time = time.time()
theta = At(y,Phi)
v =theta
b = np.zeros((row,col,ColT))
for ni in range(maxiter):
yb = A(theta+b,Phi)
#y1 = y1+ (y-fb)
v = (theta+b) + np.multiply(step_size, At( np.divide(y-yb,Phi_sum+eta),Phi ))
#vmb = v-b
theta = denoise_tv_chambolle(v-b, weight,n_iter_max=30,multichannel=True)
b = b-(v-theta)
weight = 0.999*weight
eta = 0.998 * eta
if (ni+1)%5 == 0:
# mse = np.mean(np.sum((y-A(v,Phi))**2,axis=(0,1)))
end_time = time.time()
print("ADMM-TV: Iteration %3d, PSNR = %2.2f dB,"
" time = %3.1fs."
% (ni+1, psnr(v, X_ori), end_time-begin_time))
return v
| 22,862 | 48.702174 | 261 | py |
PnP-CASSI | PnP-CASSI-main/hsi.py | import numpy as np
import torch.nn as nn
import torch
import torch.utils.data as data
from torch.optim import Adam
from torch.optim import lr_scheduler
from collections import OrderedDict
import os
import math
from torch.utils.data import DataLoader
import random
from torch.optim import Adam
def pixel_unshuffle(input, upscale_factor):
batch_size, channels, in_height, in_width = input.size()
out_height = in_height // upscale_factor
out_width = in_width // upscale_factor
input_view = input.contiguous().view(
batch_size, channels, out_height, upscale_factor,
out_width, upscale_factor)
channels *= upscale_factor ** 2
unshuffle_out = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return unshuffle_out.view(batch_size, channels, out_height, out_width)
class PixelUnShuffle(nn.Module):
def __init__(self, upscale_factor):
super(PixelUnShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input):
return pixel_unshuffle(input, self.upscale_factor)
def sequential(*args):
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv_rule(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True,if_relu=True):
L=[]
L.append(nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=kernel_size,stride=stride,padding=padding,bias=bias))
if if_relu:
L.append(nn.ReLU(inplace=True))
return sequential(*L)
class HSI_SDeCNN(nn.Module):
def __init__(self, in_nc=7, out_nc=1, nc=128, nb=15):
super(HSI_SDeCNN,self).__init__()
sf=2
self.m_down=PixelUnShuffle(upscale_factor=sf)
m_head=conv_rule(in_nc*sf*sf+1,nc)
m_body=[conv_rule(nc,nc) for _ in range(nb-2)]
m_tail=conv_rule(nc,out_nc*sf*sf,if_relu=False)
self.model = sequential(m_head, *m_body, m_tail)
self.m_up = nn.PixelShuffle(upscale_factor=sf)
def forward(self, x,sigma):
h, w = x.size()[-2:]
paddingBottom = int(np.ceil(h / 2) * 2 - h)
paddingRight = int(np.ceil(w / 2) * 2 - w)
x = torch.nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x = self.m_down(x)
m = sigma.repeat(1, 1, x.size()[-2], x.size()[-1])
x = torch.cat((x, m), 1)
x = self.model(x)
x = self.m_up(x)
x = x[..., :h, :w]
return x | 2,852 | 31.793103 | 138 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/train_momentum.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
from datetime import date
from functools import partial
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from transformers import (AdamW, AutoConfig, AutoTokenizer,
get_linear_schedule_with_warmup)
from mdr.retrieval.config import train_args
from mdr.retrieval.criterions import (mhop_eval, mhop_loss)
from mdr.retrieval.data.mhop_dataset import MhopDataset, mhop_collate
from mdr.retrieval.models.mhop_retriever import RobertaMomentumRetriever
from mdr.retrieval.utils.utils import AverageMeter, move_to_cuda
from mdr.retrieval.data.fever_dataset import FeverDataset
def main():
args = train_args()
if args.fp16:
import apex
apex.amp.register_half_function(torch, 'einsum')
date_curr = date.today().strftime("%m-%d-%Y")
model_name = f"{args.prefix}-seed{args.seed}-bsz{args.train_batch_size}-fp16{args.fp16}-lr{args.learning_rate}-decay{args.weight_decay}-warm{args.warmup_ratio}-valbsz{args.predict_batch_size}-m{args.m}-k{args.k}-t{args.temperature}"
args.output_dir = os.path.join(args.output_dir, date_curr, model_name)
tb_logger = SummaryWriter(os.path.join(args.output_dir.replace("logs","tflogs")))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print(
f"output directory {args.output_dir} already exists and is not empty.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, "log.txt")),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r",
device, n_gpu, bool(args.local_rank != -1))
if args.accumulate_gradients < 1:
raise ValueError("Invalid accumulate_gradients parameter: {}, should be >= 1".format(
args.accumulate_gradients))
args.train_batch_size = int(
args.train_batch_size / args.accumulate_gradients)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
bert_config = AutoConfig.from_pretrained(args.model_name)
model = RobertaMomentumRetriever(bert_config, args)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
collate_fc = partial(mhop_collate, pad_id=tokenizer.pad_token_id)
if args.do_train and args.max_c_len > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_c_len, bert_config.max_position_embeddings))
if "fever" in args.predict_file:
eval_dataset = FeverDataset(
tokenizer, args.predict_file, args.max_q_len, args.max_q_sp_len, args.max_c_len)
else:
eval_dataset = MhopDataset(
tokenizer, args.predict_file, args.max_q_len, args.max_q_sp_len, args.max_c_len)
eval_dataloader = DataLoader(
eval_dataset, batch_size=args.predict_batch_size, collate_fn=collate_fc, pin_memory=True, num_workers=args.num_workers)
logger.info(f"Num of dev batches: {len(eval_dataloader)}")
model.to(device)
print(f"number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
if args.do_train:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = Adam(optimizer_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
if args.fp16:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
else:
if args.fp16:
from apex import amp
model = amp.initialize(model, opt_level=args.fp16_opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
global_step = 0 # gradient update step
batch_step = 0 # forward batch count
best_mrr = 0
train_loss_meter = AverageMeter()
model.train()
if "fever" in args.train_file:
train_dataset = FeverDataset(tokenizer, args.train_file, args.max_q_len, args.max_q_sp_len, args.max_c_len, train=True)
else:
train_dataset = MhopDataset(tokenizer, args.train_file, args.max_q_len, args.max_q_sp_len, args.max_c_len, train=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, pin_memory=True, collate_fn=collate_fc, num_workers=args.num_workers, shuffle=True)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
warmup_steps = t_total * args.warmup_ratio
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
logger.info('Start training....')
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_dataloader):
batch_step += 1
batch = move_to_cuda(batch)
loss = mhop_loss(model, batch, args)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
if (batch_step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
if args.eval_period != -1 and global_step % args.eval_period == 0:
mrrs = predict(args, model, eval_dataloader,
device, logger)
mrr = mrrs["mrr_avg"]
logger.info("Step %d Train loss %.2f MRR %.2f on epoch=%d" % (global_step, train_loss_meter.avg, mrr*100, epoch))
if best_mrr < mrr:
logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" %
(best_mrr*100, mrr*100, epoch))
torch.save(model.module.encoder_q.state_dict(), os.path.join(
args.output_dir, f"checkpoint_q_best.pt"))
torch.save(model.module.encoder_q.state_dict(), os.path.join(
args.output_dir, f"checkpoint_k_best.pt"))
model = model.to(device)
best_mrr = mrr
mrrs = predict(args, model, eval_dataloader, device, logger)
mrr = mrrs["mrr_avg"]
logger.info("Step %d Train loss %.2f MRR-AVG %.2f on epoch=%d" % (
global_step, train_loss_meter.avg, mrr*100, epoch))
for k, v in mrrs.items():
tb_logger.add_scalar(k, v*100, epoch)
if best_mrr < mrr:
logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" % (best_mrr*100, mrr*100, epoch))
torch.save(model.module.encoder_q.state_dict(), os.path.join(
args.output_dir, f"checkpoint_q_best.pt"))
torch.save(model.module.encoder_q.state_dict(), os.path.join(
args.output_dir, f"checkpoint_k_best.pt"))
best_mrr = mrr
logger.info("Training finished!")
elif args.do_predict:
acc = predict(args, model, eval_dataloader, device, logger)
logger.info(f"test performance {acc}")
def predict(args, model, eval_dataloader, device, logger):
model.eval()
rrs_1, rrs_2 = [], [] # reciprocal rank
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch)
with torch.no_grad():
outputs = model(batch_to_feed)
eval_results = mhop_eval(outputs, args)
_rrs_1, _rrs_2 = eval_results["rrs_1"], eval_results["rrs_2"]
rrs_1 += _rrs_1
rrs_2 += _rrs_2
mrr_1 = np.mean(rrs_1)
mrr_2 = np.mean(rrs_2)
logger.info(f"evaluated {len(rrs_1)} examples...")
logger.info(f'MRR-1: {mrr_1}')
logger.info(f'MRR-2: {mrr_2}')
model.train()
return {"mrr_1": mrr_1, "mrr_2": mrr_2, "mrr_avg": (mrr_1 + mrr_2) / 2}
if __name__ == "__main__":
main()
| 10,671 | 44.220339 | 236 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/encode_corpus.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Description: encode text corpus into a store of dense vectors.
Usage (adjust the batch size according to your GPU memory):
CUDA_VISIBLE_DEVICES=0,1,2,3 python scripts/encode_corpus.py \
--do_predict \
--predict_batch_size 1000 \
--model_name roberta-base \
--predict_file ${CORPUS_PATH} \
--init_checkpoint ${MODEL_CHECKPOINT} \
--embed_save_path ${SAVE_PATH} \
--fp16 \
--max_c_len 300 \
--num_workers 20
"""
import collections
import logging
import json
import os
import random
from tqdm import tqdm
import numpy as np
import torch
from transformers import AutoConfig, AutoTokenizer
from torch.utils.data import DataLoader
from mdr.retrieval.data.encode_datasets import EmDataset, em_collate
from mdr.retrieval.models.retriever import CtxEncoder, RobertaCtxEncoder
from mdr.retrieval.config import encode_args
from mdr.retrieval.utils.utils import move_to_cuda, load_saved
def main():
args = encode_args()
if args.fp16:
import apex
apex.amp.register_half_function(torch, 'einsum')
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
if not args.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
bert_config = AutoConfig.from_pretrained(args.model_name)
if "roberta" in args.model_name:
model = RobertaCtxEncoder(bert_config, args)
else:
model = CtxEncoder(bert_config, args)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
eval_dataset = EmDataset(
tokenizer, args.predict_file, args.max_q_len, args.max_c_len, args.is_query_embed, args.embed_save_path)
eval_dataloader = DataLoader(
eval_dataset, batch_size=args.predict_batch_size, collate_fn=em_collate, pin_memory=True, num_workers=args.num_workers)
assert args.init_checkpoint != ""
model = load_saved(model, args.init_checkpoint, exact=False)
model.to(device)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model = amp.initialize(model, opt_level=args.fp16_opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
embeds = predict(model, eval_dataloader)
print(embeds.size())
np.save(args.embed_save_path, embeds.cpu().numpy())
def predict(model, eval_dataloader):
if type(model) == list:
model = [m.eval() for m in model]
else:
model.eval()
embed_array = []
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch)
with torch.no_grad():
results = model(batch_to_feed)
embed = results['embed'].cpu()
embed_array.append(embed)
## linear combination tuning on dev data
embed_array = torch.cat(embed_array)
model.train()
return embed_array
if __name__ == "__main__":
main()
| 3,707 | 30.423729 | 127 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/end2end.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Efficient end2end QA with HNSW index
taskset --cpu-list 0-15 python end2end.py ../data/hotpot/hotpot_qas_val.json
"""
import argparse
import json
import logging
from functools import partial
import time
import argparse
import collections
import json
import logging
from torch.utils.data import DataLoader
import faiss
import numpy as np
import torch
from tqdm import tqdm
from transformers import AutoConfig, AutoTokenizer
from retrieval.models.mhop_retriever import RobertaRetriever
from retrieval.utils.utils import load_saved
from qa.qa_model import QAModel
from mdr.qa.qa_dataset import qa_collate, QAEvalDataset
from .train_qa import eval_final
from qa.hotpot_evaluate_v1 import f1_score, exact_match_score
from qa.utils import set_global_logging_level
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (logger.hasHandlers()):
logger.handlers.clear()
console = logging.StreamHandler()
logger.addHandler(console)
set_global_logging_level(logging.ERROR, ["transformers", "nlp", "torch", "tensorflow", "tensorboard", "wandb"])
def convert_hnsw_query(query_vectors):
aux_dim = np.zeros(len(query_vectors), dtype='float32')
query_nhsw_vectors = np.hstack((query_vectors, aux_dim.reshape(-1, 1)))
return query_nhsw_vectors
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('raw_data', type=str, default=None)
parser.add_argument('--indexpath', type=str, default="retrieval/index/wiki_index_hnsw_roberta")
parser.add_argument('--corpus_dict', type=str, default='retrieval/index/hotpotQA_corpus_dict.json')
parser.add_argument('--retriever_path', type=str, default="retrieval/logs/08-16-2020/roberta_momentum_freeze_k-seed16-bsz150-fp16True-lr1e-05-decay0.0-warm0-valbsz3000-m0.999-k76800/checkpoint_q_best.pt")
parser.add_argument('--reader_path', type=str, default="qa/logs/08-10-2020/electra_val_top30-epoch7-lr5e-05-seed42-rdrop0-qadrop0-decay0-qpergpu2-aggstep8-clip2-evalper250-evalbsize1024-negnum5-warmup0.1-adamTrue-spweight0.025/checkpoint_best.pt")
parser.add_argument('--topk', type=int, default=1, help="topk paths")
parser.add_argument('--num-workers', type=int, default=10)
parser.add_argument('--max-q-len', type=int, default=70)
parser.add_argument('--max-q-sp-len', type=int, default=350)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument("--max-ans-len", default=35, type=int)
parser.add_argument("--save-prediction", default="", type=str)
parser.add_argument("--model-name", type=str, default="")
parser.add_argument("--sp-pred", action="store_true", help="whether to predict sentence sp")
parser.add_argument("--sp-weight", default=0, type=float, help="weight of the sp loss")
# parser.add_argument('--hnsw', action="store_true")
args = parser.parse_args()
logger.info("Loading trained models...")
retrieval_config = AutoConfig.from_pretrained('roberta-base')
retrieval_tokenizer = AutoTokenizer.from_pretrained('roberta-base')
args.model_name = "roberta-base"
retriever = RobertaRetriever(retrieval_config, args)
retriever = load_saved(retriever, args.retriever_path)
retriever.eval()
qa_config = AutoConfig.from_pretrained('google/electra-large-discriminator')
qa_tokenizer = AutoTokenizer.from_pretrained('google/electra-large-discriminator')
args.model_name = "google/electra-large-discriminator"
reader = QAModel(qa_config, args)
reader = load_saved(reader, args.reader_path, False)
reader.eval()
logger.info("Loading index...")
index = faiss.read_index(args.indexpath)
logger.info(f"Loading corpus...")
id2doc = json.load(open(args.corpus_dict))
logger.info(f"Corpus size {len(id2doc)}")
logger.info("Loading queries...")
qas_items = [json.loads(_) for _ in open(args.raw_data).readlines()[:5]]
questions = [_["question"][:-1] if _["question"].endswith("?") else _["question"] for _ in qas_items]
id2gold_ans = {_["_id"]: _["answer"][0] for _ in qas_items}
start = time.time()
logger.info("Retrieving...")
retrieval_results = []
encode_times = []
search_times = []
with torch.no_grad():
for b_start in tqdm(range(0, len(questions), args.batch_size)):
# 1-hop retrieval
batch_q = questions[b_start:b_start + args.batch_size]
batch_qas = qas_items[b_start:b_start + args.batch_size]
batch_q_encodes = retrieval_tokenizer.batch_encode_plus(batch_q, max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt")
q_embeds = retriever.encode_q(batch_q_encodes["input_ids"], batch_q_encodes["attention_mask"], batch_q_encodes.get("token_type_ids", None))
q_embeds_numpy = q_embeds.numpy()
q_embeds_numpy = convert_hnsw_query(q_embeds_numpy)
scores_1, docid_1 = index.search(q_embeds_numpy, args.topk)
# construct 2hop queries
bsize = len(batch_q)
query_pairs = []
for b_idx in range(bsize):
for _, doc_id in enumerate(docid_1[b_idx]):
doc = id2doc[str(doc_id)]["text"]
if doc.strip() == "":
# roberta tokenizer does not accept empty string as segment B
doc = id2doc[str(doc_id)]["title"]
scores_1[b_idx][_] = float("-inf")
query_pairs.append((batch_q[b_idx], doc))
# 2-hop retrieval
s1 = time.time()
batch_q_sp_encodes = retrieval_tokenizer.batch_encode_plus(query_pairs, max_length=args.max_q_sp_len, pad_to_max_length=True, return_tensors="pt")
q_sp_embeds = retriever.encode_q(batch_q_sp_encodes["input_ids"], batch_q_sp_encodes["attention_mask"], batch_q_sp_encodes.get("token_type_ids", None))
encode_times.append(time.time() - s1)
s2 = time.time()
q_sp_embeds = q_sp_embeds.numpy()
q_sp_embeds = convert_hnsw_query(q_sp_embeds)
scores_2, docid_2 = index.search(q_sp_embeds, args.topk)
search_times.append(time.time() - s2)
# aggregate chain scores
scores_2 = scores_2.reshape(bsize, args.topk, args.topk)
docid_2 = docid_2.reshape(bsize, args.topk, args.topk)
path_scores = - (np.expand_dims(scores_1, axis=2) + scores_2)
for idx in range(bsize):
search_scores = path_scores[idx]
ranked_pairs = np.vstack(np.unravel_index(np.argsort(search_scores.ravel())[::-1], (args.topk, args.topk))).transpose()
chains = []
for _ in range(args.topk):
path_ids = ranked_pairs[_]
doc1_id = str(docid_1[idx, path_ids[0]])
doc2_id = str(docid_2[idx, path_ids[0], path_ids[1]])
chains.append([id2doc[doc1_id], id2doc[doc2_id]])
retrieval_results.append({
"_id": batch_qas[idx]["_id"],
"question": batch_qas[idx]["question"],
"candidate_chains": chains
})
logger.info("Reading...")
collate_fc = partial(qa_collate, pad_id=qa_tokenizer.pad_token_id)
qa_eval_dataset = QAEvalDataset(qa_tokenizer, retrieval_results, max_seq_len=512, max_q_len=64)
qa_eval_dataloader = DataLoader(qa_eval_dataset, batch_size=args.topk, collate_fn=collate_fc, pin_memory=True, num_workers=0)
qa_results = eval_final(args, reader, qa_eval_dataloader, gpu=False)
print(f"Finishing evaluation in {time.time() - start}s")
ems = [exact_match_score(qa_results["answer"][k], id2gold_ans[k]) for k in qa_results["answer"].keys()]
f1s = [f1_score(qa_results["answer"][k], id2gold_ans[k]) for k in qa_results["answer"].keys()]
logger.info(f"Answer EM {np.mean(ems)}, F1 {np.mean(f1s)}")
| 8,165 | 44.116022 | 251 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/train_mhop.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Description: train a multi-hop dense retrieval from pretrained BERT/RoBERTa encoder
Usage:
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python scripts/train_mhop.py \
--do_train \
--prefix ${RUN_ID} \
--predict_batch_size 3000 \
--model_name roberta-base \
--train_batch_size 150 \
--learning_rate 2e-5 \
--fp16 \
--train_file ${TRAIN_DATA_PATH} \
--predict_file ${DEV_DATA_PATH} \
--seed 16 \
--eval-period -1 \
--max_c_len 300 \
--max_q_len 70 \
--max_q_sp_len 350 \
--shared-encoder \
--warmup-ratio 0.1
"""
import logging
import os
import random
from datetime import date
from functools import partial
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from transformers import (AdamW, AutoConfig, AutoTokenizer,
get_linear_schedule_with_warmup)
from mdr.retrieval.config import train_args
from mdr.retrieval.criterions import (mhop_eval, mhop_loss)
from mdr.retrieval.data.mhop_dataset import MhopDataset, mhop_collate
from mdr.retrieval.models.mhop_retriever import RobertaRetriever
from mdr.retrieval.utils.utils import AverageMeter, move_to_cuda, load_saved
def main():
args = train_args()
if args.fp16:
import apex
apex.amp.register_half_function(torch, 'einsum')
date_curr = date.today().strftime("%m-%d-%Y")
model_name = f"{args.prefix}-seed{args.seed}-bsz{args.train_batch_size}-fp16{args.fp16}-lr{args.learning_rate}-decay{args.weight_decay}-warm{args.warmup_ratio}-valbsz{args.predict_batch_size}-shared{args.shared_encoder}-multi{args.multi_vector}-scheme{args.scheme}"
args.output_dir = os.path.join(args.output_dir, date_curr, model_name)
tb_logger = SummaryWriter(os.path.join(args.output_dir.replace("logs","tflogs")))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print(
f"output directory {args.output_dir} already exists and is not empty.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, "log.txt")),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r",
device, n_gpu, bool(args.local_rank != -1))
if args.accumulate_gradients < 1:
raise ValueError("Invalid accumulate_gradients parameter: {}, should be >= 1".format(
args.accumulate_gradients))
args.train_batch_size = int(
args.train_batch_size / args.accumulate_gradients)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
bert_config = AutoConfig.from_pretrained(args.model_name)
model = RobertaRetriever(bert_config, args)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
collate_fc = partial(mhop_collate, pad_id=tokenizer.pad_token_id)
if args.do_train and args.max_c_len > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_c_len, bert_config.max_position_embeddings))
eval_dataset = MhopDataset(
tokenizer, args.predict_file, args.max_q_len, args.max_q_sp_len, args.max_c_len)
eval_dataloader = DataLoader(
eval_dataset, batch_size=args.predict_batch_size, collate_fn=collate_fc, pin_memory=True, num_workers=args.num_workers)
logger.info(f"Num of dev batches: {len(eval_dataloader)}")
if args.init_checkpoint != "":
model = load_saved(model, args.init_checkpoint)
model.to(device)
print(f"number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
if args.do_train:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = Adam(optimizer_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
if args.fp16:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
else:
if args.fp16:
from apex import amp
model = amp.initialize(model, opt_level=args.fp16_opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
global_step = 0 # gradient update step
batch_step = 0 # forward batch count
best_mrr = 0
train_loss_meter = AverageMeter()
model.train()
train_dataset = MhopDataset(tokenizer, args.train_file, args.max_q_len, args.max_q_sp_len, args.max_c_len, train=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, pin_memory=True, collate_fn=collate_fc, num_workers=args.num_workers, shuffle=True)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
warmup_steps = t_total * args.warmup_ratio
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
logger.info('Start training....')
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_dataloader):
batch_step += 1
batch = move_to_cuda(batch)
loss = mhop_loss(model, batch, args)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
if (batch_step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
if args.eval_period != -1 and global_step % args.eval_period == 0:
mrrs = predict(args, model, eval_dataloader,
device, logger)
mrr = mrrs["mrr_avg"]
logger.info("Step %d Train loss %.2f MRR %.2f on epoch=%d" % (global_step, train_loss_meter.avg, mrr*100, epoch))
if best_mrr < mrr:
logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" %
(best_mrr*100, mrr*100, epoch))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
model = model.to(device)
best_mrr = mrr
mrrs = predict(args, model, eval_dataloader, device, logger)
mrr = mrrs["mrr_avg"]
logger.info("Step %d Train loss %.2f MRR-AVG %.2f on epoch=%d" % (
global_step, train_loss_meter.avg, mrr*100, epoch))
for k, v in mrrs.items():
tb_logger.add_scalar(k, v*100, epoch)
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_last.pt"))
if best_mrr < mrr:
logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" % (best_mrr*100, mrr*100, epoch))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
best_mrr = mrr
logger.info("Training finished!")
elif args.do_predict:
acc = predict(args, model, eval_dataloader, device, logger)
logger.info(f"test performance {acc}")
def predict(args, model, eval_dataloader, device, logger):
model.eval()
rrs_1, rrs_2 = [], [] # reciprocal rank
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch)
with torch.no_grad():
outputs = model(batch_to_feed)
eval_results = mhop_eval(outputs, args)
_rrs_1, _rrs_2 = eval_results["rrs_1"], eval_results["rrs_2"]
rrs_1 += _rrs_1
rrs_2 += _rrs_2
mrr_1 = np.mean(rrs_1)
mrr_2 = np.mean(rrs_2)
logger.info(f"evaluated {len(rrs_1)} examples...")
logger.info(f'MRR-1: {mrr_1}')
logger.info(f'MRR-2: {mrr_2}')
model.train()
return {"mrr_1": mrr_1, "mrr_2": mrr_2, "mrr_avg": (mrr_1 + mrr_2) / 2}
if __name__ == "__main__":
main()
| 10,731 | 41.086275 | 269 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/demo.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import streamlit as st
import torch
import os
import numpy as np
from apex import amp
import faiss
import json
import argparse
from functools import partial
from transformers import AutoConfig, AutoTokenizer
from torch.utils.data import DataLoader
from mdr.retrieval.models.mhop_retriever import RobertaRetriever
from mdr.retrieval.utils.basic_tokenizer import SimpleTokenizer
from mdr.retrieval.utils.utils import load_saved, move_to_cuda
from mdr.qa.qa_model import QAModel
from mdr.qa.qa_dataset import qa_collate, QAEvalDataset
from train_qa import eval_final
@st.cache(allow_output_mutation=True)
def init_retrieval(args):
print("Initializing retrieval module...")
bert_config = AutoConfig.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
retriever = RobertaRetriever(bert_config, args)
retriever = load_saved(retriever, args.model_path, exact=False)
cuda = torch.device('cuda')
retriever.to(cuda)
retriever = amp.initialize(retriever, opt_level='O1')
retriever.eval()
print("Loading index...")
index = faiss.IndexFlatIP(768)
xb = np.load(args.indexpath).astype('float32')
index.add(xb)
if args.index_gpu != -1:
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, args.index_gpu, index)
print("Loading documents...")
id2doc = json.load(open(args.corpus_dict))
print("Index ready...")
return retriever, index, id2doc, tokenizer
@st.cache(allow_output_mutation=True)
def init_reader(args):
qa_config = AutoConfig.from_pretrained(
'google/electra-large-discriminator')
qa_tokenizer = AutoTokenizer.from_pretrained(
'google/electra-large-discriminator')
retriever_name = args.model_name
args.model_name = "google/electra-large-discriminator"
reader = QAModel(qa_config, args)
reader = load_saved(reader, args.reader_path, False)
cuda = torch.device('cuda')
reader.to(cuda)
reader = amp.initialize(reader, opt_level='O1')
reader.eval()
args.model_name = retriever_name
return reader, qa_tokenizer
st.markdown(
"# Multi-hop Open-domain QA with [MDR](https://github.com/facebookresearch/multihop_dense_retrieval)")
parser = argparse.ArgumentParser()
parser.add_argument('--indexpath', type=str,
default='data/hotpot_index/wiki_index.npy')
parser.add_argument('--corpus_dict', type=str,
default='data/hotpot_index/wiki_id2doc.json')
parser.add_argument('--model_path', type=str, default='models/q_encoder.pt')
parser.add_argument('--topk', type=int, default=20, help="topk paths")
parser.add_argument('--max-q-len', type=int, default=70)
parser.add_argument('--max-c-len', type=int, default=300)
parser.add_argument('--max-q-sp-len', type=int, default=350)
parser.add_argument('--model-name', type=str, default='roberta-base')
parser.add_argument('--reader_path', type=str, default="models/qa_electra.pt")
parser.add_argument("--sp-pred", action="store_true",
help="whether to predict sentence sp")
parser.add_argument("--sp-weight", default=0, type=float,
help="weight of the sp loss")
parser.add_argument("--max-ans-len", default=30, type=int)
parser.add_argument("--save-prediction", default="", type=str)
parser.add_argument("--index-gpu", default=-1, type=int)
args = parser.parse_args()
reader, qa_tokenizer = init_reader(args)
retriever, index, id2doc, retriever_tokenizer = init_retrieval(args)
st.markdown("*Trick: Due to the case sensitive tokenization we used during training, try to use capitalized entity names in your question, e.g., type United States instead of united states.*")
query = st.text_input('Enter your question')
if query:
query = query[:-1] if query.endswith("?") else query
with torch.no_grad():
print("Retrieving")
q_encodes = retriever_tokenizer.batch_encode_plus(
[query], max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt")
q_encodes = move_to_cuda(dict(q_encodes))
q_embeds = retriever.encode_q(
q_encodes["input_ids"], q_encodes["attention_mask"], q_encodes.get("token_type_ids", None)).cpu().numpy()
scores_1, docid_1 = index.search(q_embeds, args.topk)
query_pairs = [] # for 2nd hop
for _, doc_id in enumerate(docid_1[0]):
doc = id2doc[str(doc_id)]["text"]
if doc.strip() == "":
# roberta tokenizer does not accept empty string as segment B
doc = id2doc[str(doc_id)]["title"]
scores_1[b_idx][_] = float("-inf")
query_pairs.append((query, doc))
q_sp_encodes = retriever_tokenizer.batch_encode_plus(
query_pairs, max_length=args.max_q_sp_len, pad_to_max_length=True, return_tensors="pt")
q_sp_encodes = move_to_cuda(dict(q_sp_encodes))
q_sp_embeds = retriever.encode_q(
q_sp_encodes["input_ids"], q_sp_encodes["attention_mask"],q_sp_encodes.get("token_type_ids", None)).cpu().numpy()
scores_2, docid_2 = index.search(q_sp_embeds, args.topk)
scores_2 = scores_2.reshape(1, args.topk, args.topk)
docid_2 = docid_2.reshape(1, args.topk, args.topk)
path_scores = np.expand_dims(scores_1, axis=2) + scores_2
search_scores = path_scores[0]
ranked_pairs = np.vstack(np.unravel_index(np.argsort(search_scores.ravel())[::-1], (args.topk, args.topk))).transpose()
chains = []
topk_docs = {}
for _ in range(args.topk):
path_ids = ranked_pairs[_]
doc1_id = str(docid_1[0, path_ids[0]])
doc2_id = str(docid_2[0, path_ids[0], path_ids[1]])
chains.append([id2doc[doc1_id], id2doc[doc2_id]])
topk_docs[id2doc[doc1_id]['title']] = id2doc[doc1_id]['text']
topk_docs[id2doc[doc2_id]['title']] = id2doc[doc2_id]['text']
reader_input = [{
"_id": 0,
"question": query,
"candidate_chains": chains
}]
print(f"Reading {len(chains)} chains...")
collate_fc = partial(qa_collate, pad_id=qa_tokenizer.pad_token_id)
qa_eval_dataset = QAEvalDataset(
qa_tokenizer, reader_input, max_seq_len=512, max_q_len=64)
qa_eval_dataloader = DataLoader(
qa_eval_dataset, batch_size=args.topk, collate_fn=collate_fc, pin_memory=True, num_workers=0)
qa_results = eval_final(args, reader, qa_eval_dataloader, gpu=True)
answer_pred = qa_results['answer'][0]
sp_pred = qa_results['sp'][0]
titles_pred = qa_results['titles'][0]
st.markdown(f'**Answer**: {answer_pred}')
st.markdown(f'**Supporting passages**:')
st.markdown(f'> **{titles_pred[0]}**: {topk_docs[titles_pred[0]].replace(answer_pred, "**" + answer_pred + "**")}')
st.markdown(
f'> **{titles_pred[1]}**: {topk_docs[titles_pred[1]].replace(answer_pred, "**" + answer_pred + "**")}')
# st.write(qa_results)
| 7,262 | 40.741379 | 192 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/train_qa.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import json
import logging
import os
import random
from datetime import date
from functools import partial
import numpy as np
from numpy.core.defchararray import encode
import torch
from torch import sparse_coo_tensor
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim import Adam
from tqdm import tqdm
from transformers import (AdamW, AutoConfig, AutoTokenizer,
get_linear_schedule_with_warmup)
from mdr.qa.config import train_args
from mdr.qa.qa_dataset import QADataset, qa_collate, MhopSampler
from mdr.qa.qa_model import QAModel
from mdr.qa.utils import AverageMeter, move_to_cuda, get_final_text
from mdr.qa.hotpot_evaluate_v1 import f1_score, exact_match_score, update_sp
def load_saved(model, path):
state_dict = torch.load(path)
def filter(x): return x[7:] if x.startswith('module.') else x
state_dict = {filter(k): v for (k, v) in state_dict.items()}
model.load_state_dict(state_dict)
return model
def main():
args = train_args()
if args.fp16:
import apex
apex.amp.register_half_function(torch, 'einsum')
date_curr = date.today().strftime("%m-%d-%Y")
model_name = f"{args.prefix}-seed{args.seed}-bsz{args.train_batch_size}-fp16{args.fp16}-lr{args.learning_rate}-decay{args.weight_decay}-neg{args.neg_num}-sn{args.shared_norm}-adam{args.use_adam}-warm{args.warmup_ratio}-sp{args.sp_weight}"
args.output_dir = os.path.join(args.output_dir, date_curr, model_name)
tb_logger = SummaryWriter(os.path.join(args.output_dir.replace("logs","tflogs")))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print(
f"output directory {args.output_dir} already exists and is not empty.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, "log.txt")),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info(args)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r",
device, n_gpu, bool(args.local_rank != -1))
if args.shared_norm:
# chains of each question are on the same gpu
assert (args.train_batch_size // n_gpu) == args.neg_num + 1
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# define model
if args.model_name == "spanbert":
bert_config = AutoConfig.from_pretrained("/private/home/span-bert")
tokenizer = AutoTokenizer.from_pretrained('bert-large-cased')
else:
bert_config = AutoConfig.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
model = QAModel(bert_config, args)
collate_fc = partial(qa_collate, pad_id=tokenizer.pad_token_id)
eval_dataset = QADataset(tokenizer, args.predict_file, args.max_seq_len, args.max_q_len)
eval_dataloader = DataLoader(eval_dataset, batch_size=args.predict_batch_size, collate_fn=collate_fc, pin_memory=True, num_workers=args.num_workers)
logger.info(f"Num of dev batches: {len(eval_dataloader)}")
if args.init_checkpoint != "":
logger.info(f"Loading model from {args.init_checkpoint}")
model = load_saved(model, args.init_checkpoint)
model.to(device)
print(f"number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
if args.do_train:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.use_adam:
optimizer = Adam(optimizer_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
if args.fp16:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
else:
if args.fp16:
from apex import amp
model = amp.initialize(model, opt_level=args.fp16_opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
global_step = 0 # gradient update step
batch_step = 0 # forward batch count
best_em = 0
train_loss_meter = AverageMeter()
model.train()
train_dataset = QADataset(tokenizer, args.train_file, args.max_seq_len, args.max_q_len, train=True)
train_sampler = MhopSampler(train_dataset, num_neg=args.neg_num)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, pin_memory=True, collate_fn=collate_fc, num_workers=args.num_workers, sampler=train_sampler)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
warmup_steps = t_total * args.warmup_ratio
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
logger.info('Start training....')
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_dataloader):
batch_step += 1
batch_inputs = move_to_cuda(batch["net_inputs"])
loss = model(batch_inputs)
if n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
if (batch_step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
# logger.info(f"current batch loss: {loss.item()}")
tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
if args.eval_period != -1 and global_step % args.eval_period == 0:
metrics = predict(args, model, eval_dataloader, logger)
em = metrics["em"]
logger.info("Step %d Train loss %.2f em %.2f on epoch=%d" % (global_step, train_loss_meter.avg, em*100, epoch))
if best_em < em:
logger.info("Saving model with best em %.2f -> em %.2f on step=%d" %
(best_em*100, em*100, global_step))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
model = model.to(device)
best_em = em
metrics = predict(args, model, eval_dataloader, logger)
em = metrics["em"]
logger.info("Step %d Train loss %.2f em %.2f" % (
global_step, train_loss_meter.avg, em*100))
tb_logger.add_scalar('dev_em', em*100, global_step)
if best_em < em:
logger.info("Saving model with best em %.2f -> em %.2f on epoch=%d" % (best_em*100, em*100, epoch))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
best_em = em
logger.info("Training finished!")
elif args.do_predict:
metrics = predict(args, model, eval_dataloader, logger, fixed_thresh=0.8)
logger.info(f"test performance {metrics}")
elif args.do_test:
eval_final(args, model, eval_dataloader, weight=0.8)
def predict(args, model, eval_dataloader, logger, fixed_thresh=None):
model.eval()
id2result = collections.defaultdict(list)
id2answer = collections.defaultdict(list)
id2gold = {}
id2goldsp = {}
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch["net_inputs"])
batch_qids = batch["qids"]
batch_labels = batch["net_inputs"]["label"].view(-1).tolist()
with torch.no_grad():
outputs = model(batch_to_feed)
scores = outputs["rank_score"]
scores = scores.view(-1).tolist()
if args.sp_pred:
sp_scores = outputs["sp_score"]
sp_scores = sp_scores.float().masked_fill(batch_to_feed["sent_offsets"].eq(0), float("-inf")).type_as(sp_scores)
batch_sp_scores = sp_scores.sigmoid()
# ans_type_predicted = torch.argmax(outputs["ans_type_logits"], dim=1).view(-1).tolist()
outs = [outputs["start_logits"], outputs["end_logits"]]
for qid, label, score in zip(batch_qids, batch_labels, scores):
id2result[qid].append((label, score))
# answer prediction
span_scores = outs[0][:, :, None] + outs[1][:, None]
max_seq_len = span_scores.size(1)
span_mask = np.tril(np.triu(np.ones((max_seq_len, max_seq_len)), 0), args.max_ans_len)
span_mask = span_scores.data.new(max_seq_len, max_seq_len).copy_(torch.from_numpy(span_mask))
span_scores_masked = span_scores.float().masked_fill((1 - span_mask[None].expand_as(span_scores)).bool(), -1e10).type_as(span_scores)
start_position = span_scores_masked.max(dim=2)[0].max(dim=1)[1]
end_position = span_scores_masked.max(dim=2)[1].gather(
1, start_position.unsqueeze(1)).squeeze(1)
answer_scores = span_scores_masked.max(dim=2)[0].max(dim=1)[0].tolist()
para_offset = batch['para_offsets']
start_position_ = list(
np.array(start_position.tolist()) - np.array(para_offset))
end_position_ = list(
np.array(end_position.tolist()) - np.array(para_offset))
for idx, qid in enumerate(batch_qids):
id2gold[qid] = batch["gold_answer"][idx]
id2goldsp[qid] = batch["sp_gold"][idx]
rank_score = scores[idx]
start = start_position_[idx]
end = end_position_[idx]
span_score = answer_scores[idx]
tok_to_orig_index = batch['tok_to_orig_index'][idx]
doc_tokens = batch['doc_tokens'][idx]
wp_tokens = batch['wp_tokens'][idx]
orig_doc_start = tok_to_orig_index[start]
orig_doc_end = tok_to_orig_index[end]
orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_tokens = wp_tokens[start:end+1]
tok_text = " ".join(tok_tokens)
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
pred_str = get_final_text(tok_text, orig_text, do_lower_case=True, verbose_logging=False)
# get the sp sentences
pred_sp = []
if args.sp_pred:
sp_score = batch_sp_scores[idx].tolist()
passages = batch["passages"][idx]
for passage, sent_offset in zip(passages, [0, len(passages[0]["sents"])]):
for idx, _ in enumerate(passage["sents"]):
try:
if sp_score[idx + sent_offset] >= 0.5:
pred_sp.append([passage["title"], idx])
except:
# logger.info(f"sentence exceeds max lengths")
continue
id2answer[qid].append({
"pred_str": pred_str.strip(),
"rank_score": rank_score,
"span_score": span_score,
"pred_sp": pred_sp
})
acc = []
for qid, res in id2result.items():
res.sort(key=lambda x: x[1], reverse=True)
acc.append(res[0][0] == 1)
logger.info(f"evaluated {len(id2result)} questions...")
logger.info(f'chain ranking em: {np.mean(acc)}')
best_em, best_f1, best_joint_em, best_joint_f1, best_sp_em, best_sp_f1 = 0, 0, 0, 0, 0, 0
best_res = None
if fixed_thresh:
lambdas = [fixed_thresh]
else:
# selecting threshhold on the dev data
lambdas = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
for lambda_ in lambdas:
ems, f1s, sp_ems, sp_f1s, joint_ems, joint_f1s = [], [], [], [], [], []
results = collections.defaultdict(dict)
for qid, res in id2result.items():
ans_res = id2answer[qid]
ans_res.sort(key=lambda x: lambda_ * x["rank_score"] + (1 - lambda_) * x["span_score"], reverse=True)
top_pred = ans_res[0]["pred_str"]
top_pred_sp = ans_res[0]["pred_sp"]
results["answer"][qid] = top_pred
results["sp"][qid] = top_pred_sp
ems.append(exact_match_score(top_pred, id2gold[qid][0]))
f1, prec, recall = f1_score(top_pred, id2gold[qid][0])
f1s.append(f1)
if args.sp_pred:
metrics = {'sp_em': 0, 'sp_f1': 0, 'sp_prec': 0, 'sp_recall': 0}
update_sp(metrics, top_pred_sp, id2goldsp[qid])
sp_ems.append(metrics['sp_em'])
sp_f1s.append(metrics['sp_f1'])
# joint metrics
joint_prec = prec * metrics["sp_prec"]
joint_recall = recall * metrics["sp_recall"]
if joint_prec + joint_recall > 0:
joint_f1 = 2 * joint_prec * joint_recall / (joint_prec + joint_recall)
else:
joint_f1 = 0.
joint_em = ems[-1] * sp_ems[-1]
joint_ems.append(joint_em)
joint_f1s.append(joint_f1)
if args.sp_pred:
if best_joint_f1 < np.mean(joint_f1s):
best_joint_f1 = np.mean(joint_f1s)
best_joint_em = np.mean(joint_ems)
best_sp_f1 = np.mean(sp_f1s)
best_sp_em = np.mean(sp_ems)
best_f1 = np.mean(f1s)
best_em = np.mean(ems)
best_res = results
else:
if best_f1 < np.mean(f1s):
best_f1 = np.mean(f1s)
best_em = np.mean(ems)
logger.info(f".......Using combination factor {lambda_}......")
logger.info(f'answer em: {np.mean(ems)}, count: {len(ems)}')
logger.info(f'answer f1: {np.mean(f1s)}, count: {len(f1s)}')
logger.info(f'sp em: {np.mean(sp_ems)}, count: {len(sp_ems)}')
logger.info(f'sp f1: {np.mean(sp_f1s)}, count: {len(sp_f1s)}')
logger.info(f'joint em: {np.mean(joint_ems)}, count: {len(joint_ems)}')
logger.info(f'joint f1: {np.mean(joint_f1s)}, count: {len(joint_f1s)}')
logger.info(f"Best joint F1 from combination {best_f1}")
if args.save_prediction != "":
json.dump(best_res, open(f"{args.save_prediction}", "w"))
model.train()
return {"em": best_em, "f1": best_f1, "joint_em": best_joint_em, "joint_f1": best_joint_f1, "sp_em": best_sp_em, "sp_f1": best_sp_f1}
import time
def eval_final(args, model, eval_dataloader, weight=0.8, gpu=True):
"""
for final submission
"""
model.eval()
id2answer = collections.defaultdict(list)
encode_times = []
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch["net_inputs"]) if gpu else batch["net_inputs"]
batch_qids = batch["qids"]
with torch.no_grad():
start = time.time()
outputs = model(batch_to_feed)
encode_times.append(time.time() - start)
scores = outputs["rank_score"]
scores = scores.view(-1).tolist()
if args.sp_pred:
sp_scores = outputs["sp_score"]
sp_scores = sp_scores.float().masked_fill(batch_to_feed["sent_offsets"].eq(0), float("-inf")).type_as(sp_scores)
batch_sp_scores = sp_scores.sigmoid()
# ans_type_predicted = torch.argmax(outputs["ans_type_logits"], dim=1).view(-1).tolist()
outs = [outputs["start_logits"], outputs["end_logits"]]
# answer prediction
span_scores = outs[0][:, :, None] + outs[1][:, None]
max_seq_len = span_scores.size(1)
span_mask = np.tril(np.triu(np.ones((max_seq_len, max_seq_len)), 0), args.max_ans_len)
span_mask = span_scores.data.new(max_seq_len, max_seq_len).copy_(torch.from_numpy(span_mask))
span_scores_masked = span_scores.float().masked_fill((1 - span_mask[None].expand_as(span_scores)).bool(), -1e10).type_as(span_scores)
start_position = span_scores_masked.max(dim=2)[0].max(dim=1)[1]
end_position = span_scores_masked.max(dim=2)[1].gather(
1, start_position.unsqueeze(1)).squeeze(1)
answer_scores = span_scores_masked.max(dim=2)[0].max(dim=1)[0].tolist()
para_offset = batch['para_offsets']
start_position_ = list(
np.array(start_position.tolist()) - np.array(para_offset))
end_position_ = list(
np.array(end_position.tolist()) - np.array(para_offset))
for idx, qid in enumerate(batch_qids):
rank_score = scores[idx]
start = start_position_[idx]
end = end_position_[idx]
span_score = answer_scores[idx]
tok_to_orig_index = batch['tok_to_orig_index'][idx]
doc_tokens = batch['doc_tokens'][idx]
wp_tokens = batch['wp_tokens'][idx]
orig_doc_start = tok_to_orig_index[start]
orig_doc_end = tok_to_orig_index[end]
orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_tokens = wp_tokens[start:end+1]
tok_text = " ".join(tok_tokens)
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
pred_str = get_final_text(tok_text, orig_text, do_lower_case=True, verbose_logging=False)
chain_titles = [_["title"] for _ in batch["passages"][idx]]
# get the sp sentences
pred_sp = []
if args.sp_pred:
sp_score = batch_sp_scores[idx].tolist()
passages = batch["passages"][idx]
for passage, sent_offset in zip(passages, [0, len(passages[0]["sents"])]):
for idx, _ in enumerate(passage["sents"]):
try:
if sp_score[idx + sent_offset] > 0.5:
pred_sp.append([passage["title"], idx])
except:
# logger.info(f"sentence exceeds max lengths")
continue
id2answer[qid].append({
"pred_str": pred_str.strip(),
"rank_score": rank_score,
"span_score": span_score,
"pred_sp": pred_sp,
"chain_titles": chain_titles
})
lambda_ = weight
results = collections.defaultdict(dict)
for qid in id2answer.keys():
ans_res = id2answer[qid]
ans_res.sort(key=lambda x: lambda_ * x["rank_score"] + (1 - lambda_) * x["span_score"], reverse=True)
top_pred = ans_res[0]["pred_str"]
top_pred_sp = ans_res[0]["pred_sp"]
results["answer"][qid] = top_pred
results["sp"][qid] = top_pred_sp
results["titles"][qid] = ans_res[0]["chain_titles"]
if args.save_prediction != "":
json.dump(results, open(f"{args.save_prediction}", "w"))
return results
if __name__ == "__main__":
main()
| 21,946 | 44.251546 | 242 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/eval/eval_mhop_retrieval.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluating trained retrieval model.
Usage:
python eval_mhop_retrieval.py ${EVAL_DATA} ${CORPUS_VECTOR_PATH} ${CORPUS_DICT} ${MODEL_CHECKPOINT} \
--batch-size 50 \
--beam-size-1 20 \
--beam-size-2 5 \
--topk 20 \
--shared-encoder \
--gpu \
--save-path ${PATH_TO_SAVE_RETRIEVAL}
"""
import argparse
import collections
import json
import logging
from os import path
import time
import faiss
import numpy as np
import torch
from tqdm import tqdm
from transformers import AutoConfig, AutoTokenizer
from mdr.retrieval.models.mhop_retriever import RobertaRetriever
from mdr.retrieval.utils.basic_tokenizer import SimpleTokenizer
from mdr.retrieval.utils.utils import (load_saved, move_to_cuda, para_has_answer)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (logger.hasHandlers()):
logger.handlers.clear()
console = logging.StreamHandler()
logger.addHandler(console)
def convert_hnsw_query(query_vectors):
aux_dim = np.zeros(len(query_vectors), dtype='float32')
query_nhsw_vectors = np.hstack((query_vectors, aux_dim.reshape(-1, 1)))
return query_nhsw_vectors
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('raw_data', type=str, default=None)
parser.add_argument('indexpath', type=str, default=None)
parser.add_argument('corpus_dict', type=str, default=None)
parser.add_argument('model_path', type=str, default=None)
parser.add_argument('--topk', type=int, default=2, help="topk paths")
parser.add_argument('--num-workers', type=int, default=10)
parser.add_argument('--max-q-len', type=int, default=70)
parser.add_argument('--max-c-len', type=int, default=300)
parser.add_argument('--max-q-sp-len', type=int, default=350)
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--beam-size', type=int, default=5)
parser.add_argument('--model-name', type=str, default='roberta-base')
parser.add_argument('--gpu', action="store_true")
parser.add_argument('--save-index', action="store_true")
parser.add_argument('--only-eval-ans', action="store_true")
parser.add_argument('--shared-encoder', action="store_true")
parser.add_argument("--save-path", type=str, default="")
parser.add_argument("--stop-drop", default=0, type=float)
parser.add_argument('--hnsw', action="store_true")
args = parser.parse_args()
logger.info("Loading data...")
ds_items = [json.loads(_) for _ in open(args.raw_data).readlines()]
# filter
if args.only_eval_ans:
ds_items = [_ for _ in ds_items if _["answer"][0] not in ["yes", "no"]]
logger.info("Loading trained model...")
bert_config = AutoConfig.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
model = RobertaRetriever(bert_config, args)
model = load_saved(model, args.model_path, exact=False)
simple_tokenizer = SimpleTokenizer()
cuda = torch.device('cuda')
model.to(cuda)
from apex import amp
model = amp.initialize(model, opt_level='O1')
model.eval()
logger.info("Building index...")
d = 768
xb = np.load(args.indexpath).astype('float32')
if args.hnsw:
if path.exists("data/hotpot_index/wiki_index_hnsw.index"):
index = faiss.read_index("index/wiki_index_hnsw.index")
else:
index = faiss.IndexHNSWFlat(d + 1, 512)
index.hnsw.efSearch = 128
index.hnsw.efConstruction = 200
phi = 0
for i, vector in enumerate(xb):
norms = (vector ** 2).sum()
phi = max(phi, norms)
logger.info('HNSWF DotProduct -> L2 space phi={}'.format(phi))
data = xb
buffer_size = 50000
n = len(data)
print(n)
for i in tqdm(range(0, n, buffer_size)):
vectors = [np.reshape(t, (1, -1)) for t in data[i:i + buffer_size]]
norms = [(doc_vector ** 2).sum() for doc_vector in vectors]
aux_dims = [np.sqrt(phi - norm) for norm in norms]
hnsw_vectors = [np.hstack((doc_vector, aux_dims[idx].reshape(-1, 1))) for idx, doc_vector in enumerate(vectors)]
hnsw_vectors = np.concatenate(hnsw_vectors, axis=0)
index.add(hnsw_vectors)
else:
index = faiss.IndexFlatIP(d)
index.add(xb)
if args.gpu:
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 6, index)
if args.save_index:
faiss.write_index(index, "data/hotpot_index/wiki_index_hnsw_roberta")
logger.info(f"Loading corpus...")
id2doc = json.load(open(args.corpus_dict))
if isinstance(id2doc["0"], list):
id2doc = {k: {"title":v[0], "text": v[1]} for k, v in id2doc.items()}
# title2text = {v[0]:v[1] for v in id2doc.values()}
logger.info(f"Corpus size {len(id2doc)}")
logger.info("Encoding questions and searching")
questions = [_["question"][:-1] if _["question"].endswith("?") else _["question"] for _ in ds_items]
metrics = []
retrieval_outputs = []
for b_start in tqdm(range(0, len(questions), args.batch_size)):
with torch.no_grad():
batch_q = questions[b_start:b_start + args.batch_size]
batch_ann = ds_items[b_start:b_start + args.batch_size]
bsize = len(batch_q)
batch_q_encodes = tokenizer.batch_encode_plus(batch_q, max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt")
batch_q_encodes = move_to_cuda(dict(batch_q_encodes))
q_embeds = model.encode_q(batch_q_encodes["input_ids"], batch_q_encodes["attention_mask"], batch_q_encodes.get("token_type_ids", None))
q_embeds_numpy = q_embeds.cpu().contiguous().numpy()
if args.hnsw:
q_embeds_numpy = convert_hnsw_query(q_embeds_numpy)
D, I = index.search(q_embeds_numpy, args.beam_size)
# 2hop search
query_pairs = []
for b_idx in range(bsize):
for _, doc_id in enumerate(I[b_idx]):
doc = id2doc[str(doc_id)]["text"]
if "roberta" in args.model_name and doc.strip() == "":
# doc = "fadeaxsaa" * 100
doc = id2doc[str(doc_id)]["title"]
D[b_idx][_] = float("-inf")
query_pairs.append((batch_q[b_idx], doc))
batch_q_sp_encodes = tokenizer.batch_encode_plus(query_pairs, max_length=args.max_q_sp_len, pad_to_max_length=True, return_tensors="pt")
batch_q_sp_encodes = move_to_cuda(dict(batch_q_sp_encodes))
s1 = time.time()
q_sp_embeds = model.encode_q(batch_q_sp_encodes["input_ids"], batch_q_sp_encodes["attention_mask"], batch_q_sp_encodes.get("token_type_ids", None))
# print("Encoding time:", time.time() - s1)
q_sp_embeds = q_sp_embeds.contiguous().cpu().numpy()
s2 = time.time()
if args.hnsw:
q_sp_embeds = convert_hnsw_query(q_sp_embeds)
D_, I_ = index.search(q_sp_embeds, args.beam_size)
D_ = D_.reshape(bsize, args.beam_size, args.beam_size)
I_ = I_.reshape(bsize, args.beam_size, args.beam_size)
# aggregate path scores
path_scores = np.expand_dims(D, axis=2) + D_
if args.hnsw:
path_scores = - path_scores
for idx in range(bsize):
search_scores = path_scores[idx]
ranked_pairs = np.vstack(np.unravel_index(np.argsort(search_scores.ravel())[::-1],
(args.beam_size, args.beam_size))).transpose()
retrieved_titles = []
hop1_titles = []
paths, path_titles = [], []
for _ in range(args.topk):
path_ids = ranked_pairs[_]
hop_1_id = I[idx, path_ids[0]]
hop_2_id = I_[idx, path_ids[0], path_ids[1]]
retrieved_titles.append(id2doc[str(hop_1_id)]["title"])
retrieved_titles.append(id2doc[str(hop_2_id)]["title"])
paths.append([str(hop_1_id), str(hop_2_id)])
path_titles.append([id2doc[str(hop_1_id)]["title"], id2doc[str(hop_2_id)]["title"]])
hop1_titles.append(id2doc[str(hop_1_id)]["title"])
if args.only_eval_ans:
gold_answers = batch_ann[idx]["answer"]
concat_p = "yes no "
for p in paths:
concat_p += " ".join([id2doc[doc_id]["title"] + " " + id2doc[doc_id]["text"] for doc_id in p])
metrics.append({
"question": batch_ann[idx]["question"],
"ans_recall": int(para_has_answer(gold_answers, concat_p, simple_tokenizer)),
"type": batch_ann[idx].get("type", "single")
})
else:
sp = batch_ann[idx]["sp"]
assert len(set(sp)) == 2
type_ = batch_ann[idx]["type"]
question = batch_ann[idx]["question"]
p_recall, p_em = 0, 0
sp_covered = [sp_title in retrieved_titles for sp_title in sp]
if np.sum(sp_covered) > 0:
p_recall = 1
if np.sum(sp_covered) == len(sp_covered):
p_em = 1
path_covered = [int(set(p) == set(sp)) for p in path_titles]
path_covered = np.sum(path_covered) > 0
recall_1 = 0
covered_1 = [sp_title in hop1_titles for sp_title in sp]
if np.sum(covered_1) > 0: recall_1 = 1
metrics.append({
"question": question,
"p_recall": p_recall,
"p_em": p_em,
"type": type_,
'recall_1': recall_1,
'path_covered': int(path_covered)
})
# saving when there's no annotations
candidaite_chains = []
for path in paths:
candidaite_chains.append([id2doc[path[0]], id2doc[path[1]]])
retrieval_outputs.append({
"_id": batch_ann[idx]["_id"],
"question": batch_ann[idx]["question"],
"candidate_chains": candidaite_chains,
# "sp": sp_chain,
# "answer": gold_answers,
# "type": type_,
# "coverd_k": covered_k
})
if args.save_path != "":
with open(args.save_path, "w") as out:
for l in retrieval_outputs:
out.write(json.dumps(l) + "\n")
logger.info(f"Evaluating {len(metrics)} samples...")
type2items = collections.defaultdict(list)
for item in metrics:
type2items[item["type"]].append(item)
if args.only_eval_ans:
logger.info(f'Ans Recall: {np.mean([m["ans_recall"] for m in metrics])}')
for t in type2items.keys():
logger.info(f"{t} Questions num: {len(type2items[t])}")
logger.info(f'Ans Recall: {np.mean([m["ans_recall"] for m in type2items[t]])}')
else:
logger.info(f'\tAvg PR: {np.mean([m["p_recall"] for m in metrics])}')
logger.info(f'\tAvg P-EM: {np.mean([m["p_em"] for m in metrics])}')
logger.info(f'\tAvg 1-Recall: {np.mean([m["recall_1"] for m in metrics])}')
logger.info(f'\tPath Recall: {np.mean([m["path_covered"] for m in metrics])}')
for t in type2items.keys():
logger.info(f"{t} Questions num: {len(type2items[t])}")
logger.info(f'\tAvg PR: {np.mean([m["p_recall"] for m in type2items[t]])}')
logger.info(f'\tAvg P-EM: {np.mean([m["p_em"] for m in type2items[t]])}')
logger.info(f'\tAvg 1-Recall: {np.mean([m["recall_1"] for m in type2items[t]])}')
logger.info(f'\tPath Recall: {np.mean([m["path_covered"] for m in type2items[t]])}')
| 12,631 | 43.322807 | 159 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/eval/eval_mhop_fever.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
python eval_mhop_fever.py /private/home/xwhan/data/fever/retrieval/dev_multi_evidence.txt index/fever.npy index/fever_corpus_id2doc.json logs/08-27-2020/fever-seed16-bsz96-fp16True-lr2e-05-decay0.0-warm0.1-valbsz3000-sharedTrue/checkpoint_best.pt --batch-size 100 --beam-size-1 2 --beam-size-2 10 --topk 20 --shared-encoder --model-name roberta-base --gpu --save-path dense_fever_b2_10_k20.json
# unified retrieval
python eval_mhop_fever.py /private/home/xwhan/data/fever/retrieval/dev.txt index/fever_unified.npy index/fever_corpus_id2doc.json logs/08-30-2020/fever_unified_roberta-seed16-bsz96-fp16True-lr2e-05-decay0.0-adamTrue/checkpoint_best.pt --batch-size 100 --beam-size-1 1 --beam-size-2 20 --topk 20 --shared-encoder --model-name roberta-base --gpu --save-path dense_all_b1_k10_unified.json
python eval_mhop_fever.py /private/home/xwhan/data/fever/retrieval/dev_multi_evidence.txt index/fever_unified.npy index/fever_corpus_id2doc.json logs/08-30-2020/fever_unified_roberta-seed16-bsz96-fp16True-lr2e-05-decay0.0-adamTrue/checkpoint_best.pt --batch-size 100 --beam-size-1 1 --beam-size-2 20 --topk 20 --shared-encoder --model-name roberta-base --gpu --save-path dense_multi_b1_k10_unified.json
# fix parenthesis
python eval_mhop_fever.py /private/home/xwhan/data/fever/retrieval/multi_dev.txt index/fever_.npy index/fever_corpus_id2doc.json logs/08-27-2020/fever_-seed16-bsz96-fp16True-lr2e-05-decay0.0-warm0.1-valbsz3000-sharedTrue/checkpoint_best.pt --batch-size 100 --beam-size-1 1 --beam-size-2 20 --topk 20 --shared-encoder --model-name roberta-base --gpu --save-path dense_fever_b1_k20_fix_brc.json
"""
import argparse
import json
import logging
from os import path
import faiss
import numpy as np
import torch
from tqdm import tqdm
from transformers import AutoConfig, AutoTokenizer
from models.mhop_retriever import RobertaRetriever
from models.unified_retriever import UnifiedRetriever
from utils.basic_tokenizer import SimpleTokenizer
from utils.utils import (load_saved, move_to_cuda, para_has_answer)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (logger.hasHandlers()):
logger.handlers.clear()
console = logging.StreamHandler()
logger.addHandler(console)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('raw_data', type=str, default=None)
parser.add_argument('indexpath', type=str, default=None)
parser.add_argument('corpus_dict', type=str, default=None)
parser.add_argument('model_path', type=str, default=None)
parser.add_argument('--topk', type=int, default=2, help="topk paths")
parser.add_argument('--num-workers', type=int, default=10)
parser.add_argument('--max-q-len', type=int, default=45)
parser.add_argument('--max-c-len', type=int, default=350)
parser.add_argument('--max-q-sp-len', type=int, default=400)
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--beam-size-1', type=int, default=5)
parser.add_argument('--beam-size-2', type=int, default=5)
parser.add_argument('--model-name', type=str, default='bert-base-uncased')
parser.add_argument('--gpu', action="store_true")
parser.add_argument('--shared-encoder', action="store_true")
parser.add_argument("--save-path", type=str, default="")
parser.add_argument("--stop-drop", default=0, type=float)
args = parser.parse_args()
logger.info("Loading data...")
ds_items = [json.loads(_) for _ in open(args.raw_data).readlines()]
logger.info("Building index...")
d = 768
xb = np.load(args.indexpath).astype('float32')
print(xb.shape)
index = faiss.IndexFlatIP(d)
index.add(xb)
if args.gpu:
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 1, index)
logger.info(f"Loading corpus...")
id2doc = json.load(open(args.corpus_dict))
title2doc = {item[0]:item[1] for item in id2doc.values()}
logger.info(f"Corpus size {len(id2doc)}")
logger.info("Loading trained model...")
bert_config = AutoConfig.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
model = RobertaRetriever(bert_config, args)
# model = UnifiedRetriever(bert_config, args)
model = load_saved(model, args.model_path, exact=False)
simple_tokenizer = SimpleTokenizer()
cuda = torch.device('cuda')
model.to(cuda)
from apex import amp
model = amp.initialize(model, opt_level='O1')
model.eval()
logger.info("Encoding claims and searching")
questions = [_["claim"] for _ in ds_items]
metrics = []
retrieval_outputs = []
for b_start in tqdm(range(0, len(questions), args.batch_size)):
with torch.no_grad():
batch_q = questions[b_start:b_start + args.batch_size]
batch_ann = ds_items[b_start:b_start + args.batch_size]
bsize = len(batch_q)
batch_q_encodes = tokenizer.batch_encode_plus(batch_q, max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt")
batch_q_encodes = move_to_cuda(dict(batch_q_encodes))
q_embeds = model.encode_q(batch_q_encodes["input_ids"], batch_q_encodes["attention_mask"], batch_q_encodes.get("token_type_ids", None))
q_embeds_numpy = q_embeds.cpu().contiguous().numpy()
D, I = index.search(q_embeds_numpy, args.beam_size_1)
# 2hop search
query_pairs = []
for b_idx in range(bsize):
for _, doc_id in enumerate(I[b_idx]):
doc = id2doc[str(doc_id)][1]
if "roberta" in args.model_name and doc.strip() == "":
# doc = "fadeaxsaa" * 100
doc = id2doc[str(doc_id)][0]
D[b_idx][_] = float("-inf")
query_pairs.append((batch_q[b_idx], doc))
batch_q_sp_encodes = tokenizer.batch_encode_plus(query_pairs, max_length=args.max_q_sp_len, pad_to_max_length=True, return_tensors="pt")
batch_q_sp_encodes = move_to_cuda(dict(batch_q_sp_encodes))
q_sp_embeds = model.encode_q(batch_q_sp_encodes["input_ids"], batch_q_sp_encodes["attention_mask"], batch_q_sp_encodes.get("token_type_ids", None))
q_sp_embeds = q_sp_embeds.contiguous().cpu().numpy()
# search_start = time.time()
D_, I_ = index.search(q_sp_embeds, args.beam_size_2)
# logger.info(f"MIPS searching: {time.time() - search_start}")
D_ = D_.reshape(bsize, args.beam_size_1, args.beam_size_2)
I_ = I_.reshape(bsize, args.beam_size_1, args.beam_size_2)
# aggregate path scores
path_scores = np.expand_dims(D, axis=2) + D_
# path_scores = D_
# eval
for idx in range(bsize):
search_scores = path_scores[idx]
ranked_pairs = np.vstack(np.unravel_index(np.argsort(search_scores.ravel())[::-1], (args.beam_size_1, args.beam_size_2))).transpose()
retrieved_titles = []
hop1_titles = []
paths, path_titles = [], []
paths_both_are_intro = []
for _ in range(args.topk):
path_ids = ranked_pairs[_]
hop_1_id = I[idx, path_ids[0]]
hop_2_id = I_[idx, path_ids[0], path_ids[1]]
retrieved_titles.append(id2doc[str(hop_1_id)][0])
retrieved_titles.append(id2doc[str(hop_2_id)][0])
paths.append([str(hop_1_id), str(hop_2_id)])
path_titles.append([id2doc[str(hop_1_id)][0], id2doc[str(hop_2_id)][0]])
paths_both_are_intro.append(id2doc[str(hop_1_id)][2] and id2doc[str(hop_2_id)][2])
hop1_titles.append(id2doc[str(hop_1_id)][0])
# saving when there's no annotations
if args.save_path != "":
candidaite_chains = []
for path in path_titles:
candidaite_chains.append([(path[0], title2doc[path[0]]), (path[1], title2doc[path[1]])])
retrieval_outputs.append({
"id": batch_ann[idx]["id"],
"claim": batch_ann[idx]["claim"],
"candidate_chains": candidaite_chains,
})
if args.save_path != "":
with open(f"/private/home/xwhan/data/fever/retrieval/{args.save_path}", "w") as out:
for l in retrieval_outputs:
out.write(json.dumps(l) + "\n")
| 8,812 | 49.073864 | 402 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/eval/eval_retrieval.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Single-hop retrieval evaluation
## Use the unified model (trained with both hotpotQA and NQ)
python eval_retrieval.py /private/home/xwhan/data/nq-dpr/nq-val-simplified.txt index/psg100_unified.npy index/psgs_w100_id2doc.json logs/07-24-2020/unified_continue-seed16-bsz150-fp16True-lr1e-05-decay0.0/checkpoint_best.pt --batch-size 1000 --shared-encoder --model-name bert-base-uncased --unified --save-pred nq-val-filtered-top50.txt --topk 50
# DPR shared-encoder baseline bsz256
python eval_retrieval.py /private/home/xwhan/data/nq-dpr/nq-test-qas.txt index/psg100_dpr_shared_baseline.npy index/psgs_w100_id2doc.json logs/08-23-2020/nq_dpr_shared-seed16-bsz256-fp16True-lr2e-05-decay0.0-warm0.1-bert-base-uncased/checkpoint_best.pt --batch-size 1000 --model-name bert-base-uncased --shared-encoder --max-q-len 50 --save-pred nq-test-dpr-shared-b256-res.txt
# shared encoder on merged corpus
python eval_retrieval.py /private/home/xwhan/data/nq-dpr/nq-test-qas.txt index/merged_all_single_only.npy index/merged_all_id2doc.json logs/08-23-2020/nq_dpr_shared-seed16-bsz256-fp16True-lr2e-05-decay0.0-warm0.1-bert-base-uncased/checkpoint_best.pt --batch-size 1000 --model-name bert-base-uncased --shared-encoder --max-q-len 50
# to get negatives from DPR shared baseline
python eval_retrieval.py /private/home/xwhan/data/nq-dpr/nq-val-simplified.txt index/psg100_dpr_shared_baseline.npy index/psgs_w100_id2doc.json logs/08-25-2020/wq_mhop_1_shared_dpr_neg_from_scratch-seed16-bsz150-fp16True-lr2e-05-decay0.0-warm0.1-bert-base-uncased/checkpoint_best.pt --batch-size 1000 --model-name bert-base-uncased --shared-encoder --save-pred nq-val-shared-dpr-top100.txt --topk 100
python eval_retrieval.py /private/home/xwhan/data/WebQ/WebQuestions-test.txt index/psg100_mhop_wq_1_from_baseline.npy index/psgs_w100_id2doc.json logs/08-26-2020/wq_mhop_1_shared_dpr_neg_from_scratch-seed16-bsz150-fp16True-lr2e-05-decay0.0-warm0.1-bert-base-uncased/checkpoint_best.pt --batch-size 1000 --model-name bert-base-uncased --shared-encoder --save-pred wq-test-res-type1.txt
python eval_retrieval.py /private/home/xwhan/data/nq-dpr/nq-test-qas.txt index/merged_all.npy index/merged_all_id2doc.json logs/07-24-2020/unified_continue-seed16-bsz150-fp16True-lr1e-05-decay0.0/checkpoint_best.pt --batch-size 1000 --shared-encoder --model-name bert-base-uncased --unified
"""
import numpy as np
import json
import faiss
import argparse
import logging
import torch
from tqdm import tqdm
from multiprocessing import Pool as ProcessPool
from multiprocessing.util import Finalize
from functools import partial
from collections import defaultdict
from utils.utils import load_saved, move_to_cuda, para_has_answer
from utils.basic_tokenizer import SimpleTokenizer
from transformers import AutoConfig, AutoTokenizer
from models.retriever import BertRetrieverSingle, RobertaRetrieverSingle
from models.unified_retriever import UnifiedRetriever, BertNQRetriever
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (logger.hasHandlers()):
logger.handlers.clear()
console = logging.StreamHandler()
logger.addHandler(console)
PROCESS_TOK = None
def init():
global PROCESS_TOK
PROCESS_TOK = SimpleTokenizer()
Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)
def get_score(answer_doc, topk=20):
"""Search through all the top docs to see if they have the answer."""
question, answer, docs = answer_doc
top5doc_covered = 0
global PROCESS_TOK
topkpara_covered = []
for p in docs:
topkpara_covered.append(int(para_has_answer(answer, p["title"] + " " + p["text"], PROCESS_TOK)))
return {
"5": int(np.sum(topkpara_covered[:5]) > 0),
"10": int(np.sum(topkpara_covered[:10]) > 0),
"20": int(np.sum(topkpara_covered[:20]) > 0),
"50": int(np.sum(topkpara_covered[:50]) > 0),
"100": int(np.sum(topkpara_covered[:100]) > 0),
"covered": topkpara_covered
}
def add_marker_q(tokenizer, q):
q_toks = tokenizer.tokenize(q)
return ['[unused0]'] + q_toks
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('raw_data', type=str, default=None)
parser.add_argument('indexpath', type=str, default=None)
parser.add_argument('corpus_dict', type=str, default=None)
parser.add_argument('model_path', type=str, default=None)
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--topk', type=int, default=100)
parser.add_argument('--max-q-len', type=int, default=100)
parser.add_argument('--num-workers', type=int, default=10)
parser.add_argument('--shared-encoder', action="store_true")
parser.add_argument('--model-name', type=str, default='bert-base-uncased')
parser.add_argument("--stop-drop", default=0, type=float)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--save-pred", default="", type=str)
parser.add_argument("--unified", action="store_true", help="test with unified trained model")
args = parser.parse_args()
logger.info(f"Loading questions")
qas = [json.loads(line) for line in open(args.raw_data).readlines()]
questions = [_["question"][:-1] if _["question"].endswith("?") else _["question"] for _ in qas]
answers = [item["answer"] for item in qas]
logger.info(f"Loading index")
d = 768
xb = np.load(args.indexpath).astype('float32')
index = faiss.IndexFlatIP(d)
index.add(xb)
if args.gpu:
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 1, index)
# logger.info(f"Building GPU index")
# co = faiss.GpuMultipleClonerOptions()
# co.useFloat16 = True
# co.shards = True
# index = faiss.index_cpu_to_gpus_list(index, co, [1,2,3,4,5,6,7])
# index.add(xb)
logger.info("Loading trained model...")
bert_config = AutoConfig.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
if args.unified:
model = UnifiedRetriever(bert_config, args)
elif "roberta" in args.model_name:
model = RobertaRetrieverSingle(bert_config, args)
else:
model = BertRetrieverSingle(bert_config, args)
model = load_saved(model, args.model_path, exact=False)
cuda = torch.device('cuda')
model.to(cuda)
from apex import amp
model = amp.initialize(model, opt_level='O1')
model.eval()
logger.info(f"Loading corpus")
id2doc = json.load(open(args.corpus_dict))
logger.info(f"Corpus size {len(id2doc)}")
retrieved_results = []
retrieved_docids = []
for b_start in tqdm(range(0, len(questions), args.batch_size)):
with torch.no_grad():
batch_q = questions[b_start:b_start + args.batch_size]
batch_ans = answers[b_start:b_start + args.batch_size]
# test retrieval model with marker
# batch_q_toks = [add_marker_q(tokenizer, q) for q in batch_q]
# batch_q_encodes = tokenizer.batch_encode_plus(batch_q_toks, max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt", is_pretokenized=True)
batch_q_encodes = tokenizer.batch_encode_plus(batch_q, max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt", is_pretokenized=True)
batch_q_encodes = move_to_cuda(dict(batch_q_encodes))
q_embeds = model.encode_q(batch_q_encodes["input_ids"], batch_q_encodes["attention_mask"], batch_q_encodes.get("token_type_ids", None))
q_embeds_numpy = q_embeds.cpu().contiguous().numpy()
D, I = index.search(q_embeds_numpy, args.topk)
for b_idx in range(len(batch_q)):
topk_docs = [{"title": id2doc[str(doc_id)][0],"text": id2doc[str(doc_id)][1]} for doc_id in I[b_idx]]
retrieved_results.append(topk_docs)
retrieved_docids.append([str(doc_id) for doc_id in I[b_idx]])
answers_docs = list(zip(questions, answers, retrieved_results))
processes = ProcessPool(
processes=args.num_workers,
initializer=init
)
get_score_partial = partial(
get_score, topk=args.topk)
results = processes.map(get_score_partial, answers_docs)
if args.save_pred != "":
to_save = []
for inputs, metrics, topk_ids in zip(answers_docs, results, retrieved_docids):
q, ans, topk_doc = inputs
topk_covered = metrics["covered"]
assert len(topk_doc) == len(topk_covered)
assert len(topk_doc) == len(topk_ids)
to_save.append({
"question": q,
"ans": ans,
"topk": list(zip(topk_doc, topk_covered)),
"topkdocs": topk_doc,
"metrics": metrics,
"topk_ids": topk_ids
})
print(f"Saving {len(to_save)} instances...")
with open("/private/home/xwhan/data/nq-dpr/results/" + args.save_pred, "w") as g:
for l in to_save:
g.write(json.dumps(l) + "\n")
aggregate = defaultdict(list)
for r in results:
for k, v in r.items():
aggregate[k].append(v)
for k in aggregate:
results = aggregate[k]
print('Top {} Recall for {} QA pairs: {} ...'.format(
k, len(results), np.mean(results)))
| 9,540 | 44.650718 | 401 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/scripts/eval/eval_single_fever.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
python eval_single_fever.py /private/home/xwhan/data/fever/retrieval/dev_single_evidence.txt index/fever_single.npy index/fever_corpus_id2doc.json logs/08-30-2020/fever_single-seed16-bsz256-fp16True-lr2e-05-decay0.0-warm0-bert-base-uncased/checkpoint_best.pt --batch-size 1000 --shared-encoder --model-name bert-base-uncased --gpu --save-path dense_dev_single_k10.json --topk 10
python eval_single_fever.py /private/home/xwhan/data/fever/retrieval/dev_single_evidence.txt index/fever_unified.npy index/fever_corpus_id2doc.json logs/08-30-2020/fever_unified_roberta-seed16-bsz96-fp16True-lr2e-05-decay0.0-adamTrue/checkpoint_best.pt --batch-size 1000 --shared-encoder --model-name roberta-base --gpu --save-path dense_unified_dev_single_k10.json --topk 10
"""
import argparse
import json
import logging
import faiss
import numpy as np
import torch
from tqdm import tqdm
from transformers import AutoConfig, AutoTokenizer
from models.retriever import BertRetrieverSingle
from models.unified_retriever import UnifiedRetriever
from utils.basic_tokenizer import SimpleTokenizer
from utils.utils import (load_saved, move_to_cuda)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (logger.hasHandlers()):
logger.handlers.clear()
console = logging.StreamHandler()
logger.addHandler(console)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('raw_data', type=str, default=None)
parser.add_argument('indexpath', type=str, default=None)
parser.add_argument('corpus_dict', type=str, default=None)
parser.add_argument('model_path', type=str, default=None)
parser.add_argument('--topk', type=int, default=2, help="topk paths")
parser.add_argument('--num-workers', type=int, default=10)
parser.add_argument('--max-q-len', type=int, default=45)
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--model-name', type=str, default='bert-base-uncased')
parser.add_argument('--gpu', action="store_true")
parser.add_argument('--shared-encoder', action="store_true")
parser.add_argument("--save-path", type=str, default="")
parser.add_argument("--stop-drop", default=0, type=float)
args = parser.parse_args()
logger.info("Loading data...")
ds_items = [json.loads(_) for _ in open(args.raw_data).readlines()]
logger.info("Building index...")
d = 768
xb = np.load(args.indexpath).astype('float32')
print(xb.shape)
index = faiss.IndexFlatIP(d)
index.add(xb)
if args.gpu:
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 1, index)
logger.info(f"Loading corpus...")
id2doc = json.load(open(args.corpus_dict))
title2doc = {item[0]:item[1] for item in id2doc.values()}
logger.info(f"Corpus size {len(id2doc)}")
logger.info("Loading trained model...")
bert_config = AutoConfig.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
# model = BertRetrieverSingle(bert_config, args)
model = UnifiedRetriever(bert_config, args)
model = load_saved(model, args.model_path, exact=False)
simple_tokenizer = SimpleTokenizer()
cuda = torch.device('cuda')
model.to(cuda)
from apex import amp
model = amp.initialize(model, opt_level='O1')
model.eval()
logger.info("Encoding claims and searching")
questions = [_["claim"] for _ in ds_items]
metrics = []
retrieval_outputs = []
for b_start in tqdm(range(0, len(questions), args.batch_size)):
with torch.no_grad():
batch_q = questions[b_start:b_start + args.batch_size]
batch_ann = ds_items[b_start:b_start + args.batch_size]
bsize = len(batch_q)
batch_q_encodes = tokenizer.batch_encode_plus(batch_q, max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt")
batch_q_encodes = move_to_cuda(dict(batch_q_encodes))
q_embeds = model.encode_q(batch_q_encodes["input_ids"], batch_q_encodes["attention_mask"], batch_q_encodes.get("token_type_ids", None))
q_embeds_numpy = q_embeds.cpu().contiguous().numpy()
D, I = index.search(q_embeds_numpy, args.topk)
for b_idx in range(bsize):
topk_docs = []
for _, doc_id in enumerate(I[b_idx]):
doc = id2doc[str(doc_id)]
topk_docs.append({"title": doc[0], "text": doc[1]})
# saving when there's no annotations
if args.save_path != "":
candidaite_chains = []
retrieval_outputs.append({
"id": batch_ann[b_idx]["id"],
"claim": batch_ann[b_idx]["claim"],
"topk": topk_docs,
})
if args.save_path != "":
with open(f"/private/home/xwhan/data/fever/retrieval/{args.save_path}", "w") as out:
for l in retrieval_outputs:
out.write(json.dumps(l) + "\n")
| 5,252 | 41.707317 | 378 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/qa/train_ranker.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import json
import logging
import os
import random
from datetime import date
from functools import partial
import copy
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim import Adam
from tqdm import tqdm
from transformers import (AdamW, AutoConfig, AutoTokenizer,
get_linear_schedule_with_warmup)
from config import train_args
from reranking_datasets import RankingDataset, rank_collate
from reranking_model import RankModel
from utils import AverageMeter, convert_to_half, move_to_cuda
def load_saved(model, path):
state_dict = torch.load(path)
def filter(x): return x[7:] if x.startswith('module.') else x
state_dict = {filter(k): v for (k, v) in state_dict.items()}
model.load_state_dict(state_dict)
return model
def main():
args = train_args()
if args.fp16:
import apex
apex.amp.register_half_function(torch, 'einsum')
date_curr = date.today().strftime("%m-%d-%Y")
model_name = f"{args.prefix}-seed{args.seed}-bsz{args.train_batch_size}-fp16{args.fp16}-lr{args.learning_rate}-decay{args.weight_decay}"
args.output_dir = os.path.join(args.output_dir, date_curr, model_name)
tb_logger = SummaryWriter(os.path.join(args.output_dir.replace("logs","tflogs")))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print(
f"output directory {args.output_dir} already exists and is not empty.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, "log.txt")),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r",
device, n_gpu, bool(args.local_rank != -1))
if args.accumulate_gradients < 1:
raise ValueError("Invalid accumulate_gradients parameter: {}, should be >= 1".format(
args.accumulate_gradients))
args.train_batch_size = int(
args.train_batch_size / args.accumulate_gradients)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
bert_config = AutoConfig.from_pretrained(args.model_name)
model = RankModel(bert_config, args)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
collate_fc = partial(rank_collate, pad_id=tokenizer.pad_token_id)
if args.do_train and args.max_seq_len > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_seq_len, bert_config.max_position_embeddings))
eval_dataset = RankingDataset(
tokenizer, args.predict_file, args.max_seq_len, args.max_q_len)
eval_dataloader = DataLoader(
eval_dataset, batch_size=args.predict_batch_size, collate_fn=collate_fc, pin_memory=True, num_workers=args.num_workers)
logger.info(f"Num of dev batches: {len(eval_dataloader)}")
if args.init_checkpoint != "":
model = load_saved(model, args.init_checkpoint)
model.to(device)
print(f"number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
if args.do_train:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
if args.fp16:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
else:
if args.fp16:
from apex import amp
model = amp.initialize(model, opt_level=args.fp16_opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
global_step = 0 # gradient update step
batch_step = 0 # forward batch count
best_acc = 0
train_loss_meter = AverageMeter()
model.train()
train_dataset = RankingDataset(tokenizer, args.train_file, args.max_seq_len, args.max_q_len, train=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, pin_memory=True, collate_fn=collate_fc, num_workers=args.num_workers, shuffle=True)
logger.info('Start training....')
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_dataloader):
batch_step += 1
batch_inputs = move_to_cuda(batch["net_inputs"])
loss = model(batch_inputs)
if n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
if (batch_step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
optimizer.step() # We have accumulated enought gradients
model.zero_grad()
global_step += 1
if args.eval_period != -1 and global_step % args.eval_period == 0:
acc = predict(args, model, eval_dataloader,
device, logger)
logger.info("Step %d Train loss %.2f acc %.2f on epoch=%d" % (global_step, train_loss_meter.avg, acc*100, epoch))
# save most recent model
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_last.pt"))
if best_acc < acc:
logger.info("Saving model with best acc %.2f -> acc %.2f on epoch=%d" %
(best_acc*100, acc*100, epoch))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
model = model.to(device)
best_acc = acc
acc = predict(args, model, eval_dataloader, device, logger)
logger.info("Step %d Train loss %.2f acc %.2f on epoch=%d" % (
global_step, train_loss_meter.avg, acc*100, epoch))
tb_logger.add_scalar('dev_acc', acc*100, epoch)
torch.save(model.state_dict(), os.path.join(args.output_dir, f"checkpoint_last.pt"))
if best_acc < acc:
logger.info("Saving model with best acc %.2f -> acc %.2f on epoch=%d" % (best_acc*100, acc*100, epoch))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
best_acc = acc
logger.info("Training finished!")
elif args.do_predict:
acc = predict(args, model, eval_dataloader, device, logger)
logger.info(f"test performance {acc}")
def predict(args, model, eval_dataloader, device, logger):
model.eval()
id2result = collections.defaultdict(list)
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch["net_inputs"])
batch_qids = batch["qids"]
batch_labels = batch["net_inputs"]["label"].view(-1).tolist()
with torch.no_grad():
scores = model(batch_to_feed)
scores = scores.view(-1).tolist()
for qid, label, score in zip(batch_qids, batch_labels, scores):
id2result[qid].append((label, score))
acc = []
top_pred = {}
for qid, res in id2result.items():
res.sort(key=lambda x: x[1], reverse=True)
acc.append(res[0][0] == 1)
logger.info(f"evaluated {len(id2result)} questions...")
logger.info(f'acc: {np.mean(acc)}')
model.train()
return np.mean(acc)
if __name__ == "__main__":
main()
| 10,232 | 41.995798 | 170 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/qa/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sqlite3
import unicodedata
import collections
import logging
import re
def set_global_logging_level(level=logging.ERROR, prefices=[""]):
"""
Override logging levels of different modules based on their name as a prefix.
It needs to be invoked after the modules have been loaded so that their loggers have been initialized.
Args:
- level: desired level. e.g. logging.INFO. Optional. Default is logging.ERROR
- prefices: list of one or more str prefices to match (e.g. ["transformers", "torch"]). Optional.
Default is `[""]` to match all active loggers.
The match is a case-sensitive `module_name.startswith(prefix)`
"""
prefix_re = re.compile(fr'^(?:{ "|".join(prefices) })')
for name in logging.root.manager.loggerDict:
if re.match(prefix_re, name):
logging.getLogger(name).setLevel(level)
def load_saved(model, path, exact=True):
try:
state_dict = torch.load(path)
except:
state_dict = torch.load(path, map_location=torch.device('cpu'))
def filter(x): return x[7:] if x.startswith('module.') else x
if exact:
state_dict = {filter(k): v for (k, v) in state_dict.items()}
else:
state_dict = {filter(k): v for (
k, v) in state_dict.items() if filter(k) in model.state_dict()}
model.load_state_dict(state_dict)
return model
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
def convert_to_half(sample):
if len(sample) == 0:
return {}
def _convert_to_half(maybe_floatTensor):
if torch.is_tensor(maybe_floatTensor) and maybe_floatTensor.type() == "torch.FloatTensor":
return maybe_floatTensor.half()
elif isinstance(maybe_floatTensor, dict):
return {
key: _convert_to_half(value)
for key, value in maybe_floatTensor.items()
}
elif isinstance(maybe_floatTensor, list):
return [_convert_to_half(x) for x in maybe_floatTensor]
else:
return maybe_floatTensor
return _convert_to_half(sample)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def para_has_answer(answer, para, tokenizer):
text = normalize(para)
tokens = tokenizer.tokenize(text)
text = tokens.words(uncased=True)
assert len(text) == len(tokens)
for single_answer in answer:
single_answer = normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
def match_answer_span(p, answer, tokenizer, match="string"):
# p has been normalized
if match == 'string':
tokens = tokenizer.tokenize(p)
text = tokens.words(uncased=True)
matched = set()
for single_answer in answer:
single_answer = normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
matched.add(tokens.slice(
i, i + len(single_answer)).untokenize())
return list(matched)
elif match == 'regex':
# Answer is a regex
single_answer = normalize(answer[0])
return regex_match(p, single_answer)
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def find_ans_span_with_char_offsets(detected_ans, char_to_word_offset, doc_tokens, all_doc_tokens, orig_to_tok_index, tokenizer):
# could return mutiple spans for an answer string
ans_text = detected_ans["text"]
char_spans = detected_ans["char_spans"]
ans_subtok_spans = []
for char_start, char_end in char_spans:
tok_start = char_to_word_offset[char_start]
# char_end points to the last char of the answer, not one after
tok_end = char_to_word_offset[char_end]
sub_tok_start = orig_to_tok_index[tok_start]
if tok_end < len(doc_tokens) - 1:
sub_tok_end = orig_to_tok_index[tok_end + 1] - 1
else:
sub_tok_end = len(all_doc_tokens) - 1
actual_text = " ".join(doc_tokens[tok_start:(tok_end + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(ans_text))
if actual_text.find(cleaned_answer_text) == -1:
print("Could not find answer: '{}' vs. '{}'".format(
actual_text, cleaned_answer_text))
(sub_tok_start, sub_tok_end) = _improve_answer_span(
all_doc_tokens, sub_tok_start, sub_tok_end, tokenizer, ans_text)
ans_subtok_spans.append((sub_tok_start, sub_tok_end))
return ans_subtok_spans
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def get_final_text(pred_text, orig_text, do_lower_case=False, verbose_logging=True):
"""Project the tokenized prediction back to the original text."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
print(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
print("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
print("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
print("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
| 13,636 | 33.350126 | 129 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/qa/config.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from ast import parse
from typing import NamedTuple
from torch.nn import parallel
class ClusterConfig(NamedTuple):
dist_backend: str
dist_url: str
def common_args():
parser = argparse.ArgumentParser()
# task
parser.add_argument("--train_file", type=str,
default="../data/nq-with-neg-train.txt")
parser.add_argument("--predict_file", type=str,
default="../data/nq-with-neg-dev.txt")
parser.add_argument("--num_workers", default=10, type=int)
parser.add_argument("--do_train", default=False,
action='store_true', help="Whether to run training.")
parser.add_argument("--do_predict", default=False,
action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", default=False, action="store_true", help="for final test submission")
# model
parser.add_argument("--model_name",
default="bert-base-uncased", type=str)
parser.add_argument("--init_checkpoint", type=str,
help="Initial checkpoint (usually from a pre-trained BERT model).",
default="")
parser.add_argument("--max_seq_len", default=512, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--max_q_len", default=64, type=int)
parser.add_argument("--max_ans_len", default=35, type=int)
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--no_cuda", default=False, action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--predict_batch_size", default=256,
type=int, help="Total batch size for predictions.")
parser.add_argument("--save-prediction", default="", type=str)
parser.add_argument("--sp-pred", action="store_true", help="whether to predict sentence sp")
return parser
def train_args():
parser = common_args()
# optimization
parser.add_argument('--prefix', type=str, default="eval")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--output_dir", default="./logs", type=str,
help="The output directory where the model checkpoints will be written.")
parser.add_argument("--train_batch_size", default=128,
type=int, help="Total batch size for training.")
parser.add_argument("--num_q_per_gpu", default=1)
parser.add_argument("--learning_rate", default=1e-5,
type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=5, type=float,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=3,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--eval-period', type=int, default=2500)
parser.add_argument("--max_grad_norm", default=2.0, type=float, help="Max gradient norm.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--neg-num", type=int, default=9, help="how many neg/distant passage chains to use")
parser.add_argument("--shared-norm", action="store_true")
parser.add_argument("--qa-drop", default=0, type=float)
parser.add_argument("--rank-drop", default=0, type=float)
parser.add_argument("--sp-drop", default=0, type=float)
parser.add_argument("--final-metric", default="joint_f1")
parser.add_argument("--use-adam", action="store_true", help="use adam or adamW")
parser.add_argument("--warmup-ratio", default=0, type=float, help="Linear warmup over warmup_steps.")
parser.add_argument("--sp-weight", default=0, type=float, help="weight of the sp loss")
return parser.parse_args()
| 4,948 | 54.606742 | 115 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/qa/qa_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import json
import random
import torch
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from .basic_tokenizer import SimpleTokenizer
from .utils import (find_ans_span_with_char_offsets, match_answer_span, para_has_answer, _is_whitespace)
def collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):
"""Convert a list of 1d tensors into a padded 2d tensor."""
if len(values[0].size()) > 1:
values = [v.view(-1) for v in values]
size = max(v.size(0) for v in values)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def prepare(item, tokenizer, special_toks=["[SEP]", "[unused1]", "[unused2]"]):
"""
tokenize the passages chains, add sentence start markers for SP sentence identification
"""
def _process_p(para):
"""
handle each para
"""
title, sents = para["title"].strip(), para["sents"]
# return "[unused1] " + title + " [unused1] " + text # mark title
# return title + " " + text
pre_sents = []
for idx, sent in enumerate(sents):
pre_sents.append("[unused1] " + sent.strip())
return title + " " + " ".join(pre_sents)
# return " ".join(pre_sents)
# mark passage boundary
contexts = []
for para in item["passages"]:
contexts.append(_process_p(para))
context = " [SEP] ".join(contexts)
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
context = "yes no [SEP] " + context
for c in context:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
sent_starts = []
orig_to_tok_index = []
tok_to_orig_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
if token in special_toks:
if token == "[unused1]":
sent_starts.append(len(all_doc_tokens))
sub_tokens = [token]
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
item["context_processed"] = {
"doc_tokens": doc_tokens,
"char_to_word_offset": char_to_word_offset,
"orig_to_tok_index": orig_to_tok_index,
"tok_to_orig_index": tok_to_orig_index,
"all_doc_tokens": all_doc_tokens,
"context": context,
"sent_starts": sent_starts
}
return item
class QAEvalDataset(Dataset):
def __init__(self,
tokenizer,
retrievel_results,
max_seq_len,
max_q_len,
):
retriever_outputs = retrievel_results
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.max_q_len = max_q_len
self.data = []
for item in retriever_outputs:
if item["question"].endswith("?"):
item["question"] = item["question"][:-1]
# for validation, add target predictions
sp_titles = None
gold_answer = item.get("answer", [])
sp_gold = []
for chain in item["candidate_chains"]:
chain_titles = [_["title"] for _ in chain]
if sp_titles:
label = int(set(chain_titles) == sp_titles)
else:
label = -1
self.data.append({
"question": item["question"],
"passages": chain,
"label": label,
"qid": item["_id"],
"gold_answer": gold_answer,
"sp_gold": sp_gold
})
print(f"Total instances size {len(self.data)}")
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = prepare(self.data[index], self.tokenizer)
context_ann = item["context_processed"]
q_toks = self.tokenizer.tokenize(item["question"])[:self.max_q_len]
para_offset = len(q_toks) + 2 # cls and seq
item["wp_tokens"] = context_ann["all_doc_tokens"]
assert item["wp_tokens"][0] == "yes" and item["wp_tokens"][1] == "no"
item["para_offset"] = para_offset
max_toks_for_doc = self.max_seq_len - para_offset - 1
if len(item["wp_tokens"]) > max_toks_for_doc:
item["wp_tokens"] = item["wp_tokens"][:max_toks_for_doc]
item["encodings"] = self.tokenizer.encode_plus(q_toks, text_pair=item["wp_tokens"], max_length=self.max_seq_len, return_tensors="pt", is_pretokenized=True)
item["paragraph_mask"] = torch.zeros(item["encodings"]["input_ids"].size()).view(-1)
item["paragraph_mask"][para_offset:-1] = 1
item["doc_tokens"] = context_ann["doc_tokens"]
item["tok_to_orig_index"] = context_ann["tok_to_orig_index"]
# filter sentence offsets exceeding max sequence length
sent_labels, sent_offsets = [], []
for idx, s in enumerate(item["context_processed"]["sent_starts"]):
if s >= len(item["wp_tokens"]):
break
if "sp_sent_labels" in item:
sent_labels.append(item["sp_sent_labels"][idx])
sent_offsets.append(s + para_offset)
assert item["encodings"]["input_ids"].view(-1)[s+para_offset] == self.tokenizer.convert_tokens_to_ids("[unused1]")
# supporting fact label
item["sent_offsets"] = sent_offsets
item["sent_offsets"] = torch.LongTensor(item["sent_offsets"])
item["label"] = torch.LongTensor([item["label"]])
return item
class QADataset(Dataset):
def __init__(self,
tokenizer,
data_path,
max_seq_len,
max_q_len,
train=False,
no_sent_label=False
):
retriever_outputs = [json.loads(l) for l in tqdm(open(data_path).readlines())]
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.max_q_len = max_q_len
self.train = train
self.no_sent_label = no_sent_label
self.simple_tok = SimpleTokenizer()
self.data = []
if train:
self.qid2gold = collections.defaultdict(list) # idx
self.qid2neg = collections.defaultdict(list)
for item in retriever_outputs:
if item["question"].endswith("?"):
item["question"] = item["question"][:-1]
sp_sent_labels = []
sp_gold = []
if not self.no_sent_label:
for sp in item["sp"]:
for _ in sp["sp_sent_ids"]:
sp_gold.append([sp["title"], _])
for idx in range(len(sp["sents"])):
sp_sent_labels.append(int(idx in sp["sp_sent_ids"]))
question_type = item["type"]
self.data.append({
"question": item["question"],
"passages": item["sp"],
"label": 1,
"qid": item["_id"],
"gold_answer": item["answer"],
"sp_sent_labels": sp_sent_labels,
"ans_covered": 1, # includes partial chains.
"sp_gold": sp_gold
})
self.qid2gold[item["_id"]].append(len(self.data) - 1)
sp_titles = set([_["title"] for _ in item["sp"]])
if question_type == "bridge":
ans_titles = set([p["title"] for p in item["sp"] if para_has_answer(item["answer"], "".join(p["sents"]), self.simple_tok)])
else:
ans_titles = set()
# top ranked negative chains
ds_count = 0 # track how many distant supervised chain to use
ds_limit = 5
for chain in item["candidate_chains"]:
chain_titles = [_["title"] for _ in chain]
if set(chain_titles) == sp_titles:
continue
if question_type == "bridge":
answer_covered = int(len(set(chain_titles) & ans_titles) > 0)
ds_count += answer_covered
else:
answer_covered = 0
self.data.append({
"question": item["question"],
"passages": chain,
"label": 0,
"qid": item["_id"],
"gold_answer": item["answer"],
"ans_covered": answer_covered,
"sp_gold": sp_gold
})
self.qid2neg[item["_id"]].append(len(self.data) - 1)
else:
for item in retriever_outputs:
if item["question"].endswith("?"):
item["question"] = item["question"][:-1]
# for validation, add target predictions
sp_titles = set([_["title"] for _ in item["sp"]]) if "sp" in item else None
gold_answer = item.get("answer", [])
sp_gold = []
if "sp" in item:
for sp in item["sp"]:
for _ in sp["sp_sent_ids"]:
sp_gold.append([sp["title"], _])
chain_seen = set()
for chain in item["candidate_chains"]:
chain_titles = [_["title"] for _ in chain]
# title_set = frozenset(chain_titles)
# if len(title_set) == 0 or title_set in chain_seen:
# continue
# chain_seen.add(title_set)
if sp_titles:
label = int(set(chain_titles) == sp_titles)
else:
label = -1
self.data.append({
"question": item["question"],
"passages": chain,
"label": label,
"qid": item["_id"],
"gold_answer": gold_answer,
"sp_gold": sp_gold
})
print(f"Data size {len(self.data)}")
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = prepare(self.data[index], self.tokenizer)
context_ann = item["context_processed"]
q_toks = self.tokenizer.tokenize(item["question"])[:self.max_q_len]
para_offset = len(q_toks) + 2 # cls and seq
item["wp_tokens"] = context_ann["all_doc_tokens"]
assert item["wp_tokens"][0] == "yes" and item["wp_tokens"][1] == "no"
item["para_offset"] = para_offset
max_toks_for_doc = self.max_seq_len - para_offset - 1
if len(item["wp_tokens"]) > max_toks_for_doc:
item["wp_tokens"] = item["wp_tokens"][:max_toks_for_doc]
item["encodings"] = self.tokenizer.encode_plus(q_toks, text_pair=item["wp_tokens"], max_length=self.max_seq_len, return_tensors="pt", is_pretokenized=True)
item["paragraph_mask"] = torch.zeros(item["encodings"]["input_ids"].size()).view(-1)
item["paragraph_mask"][para_offset:-1] = 1
if self.train:
# if item["label"] == 1:
if item["ans_covered"]:
if item["gold_answer"][0] == "yes":
# ans_type = 0
starts, ends= [para_offset], [para_offset]
elif item["gold_answer"][0] == "no":
# ans_type = 1
starts, ends= [para_offset + 1], [para_offset + 1]
else:
# ans_type = 2
matched_spans = match_answer_span(context_ann["context"], item["gold_answer"], self.simple_tok)
ans_starts, ans_ends= [], []
for span in matched_spans:
char_starts = [i for i in range(len(context_ann["context"])) if context_ann["context"].startswith(span, i)]
if len(char_starts) > 0:
char_ends = [start + len(span) - 1 for start in char_starts]
answer = {"text": span, "char_spans": list(zip(char_starts, char_ends))}
ans_spans = find_ans_span_with_char_offsets(
answer, context_ann["char_to_word_offset"], context_ann["doc_tokens"], context_ann["all_doc_tokens"], context_ann["orig_to_tok_index"], self.tokenizer)
for s, e in ans_spans:
ans_starts.append(s)
ans_ends.append(e)
starts, ends = [], []
for s, e in zip(ans_starts, ans_ends):
if s >= len(item["wp_tokens"]):
continue
else:
s = min(s, len(item["wp_tokens"]) - 1) + para_offset
e = min(e, len(item["wp_tokens"]) - 1) + para_offset
starts.append(s)
ends.append(e)
if len(starts) == 0:
starts, ends = [-1], [-1]
else:
starts, ends= [-1], [-1]
# ans_type = -1
item["starts"] = torch.LongTensor(starts)
item["ends"] = torch.LongTensor(ends)
# item["ans_type"] = torch.LongTensor([ans_type])
if item["label"]:
assert len(item["sp_sent_labels"]) == len(item["context_processed"]["sent_starts"])
else:
# # for answer extraction
item["doc_tokens"] = context_ann["doc_tokens"]
item["tok_to_orig_index"] = context_ann["tok_to_orig_index"]
# filter sentence offsets exceeding max sequence length
sent_labels, sent_offsets = [], []
for idx, s in enumerate(item["context_processed"]["sent_starts"]):
if s >= len(item["wp_tokens"]):
break
if "sp_sent_labels" in item:
sent_labels.append(item["sp_sent_labels"][idx])
sent_offsets.append(s + para_offset)
assert item["encodings"]["input_ids"].view(-1)[s+para_offset] == self.tokenizer.convert_tokens_to_ids("[unused1]")
# supporting fact label
item["sent_offsets"] = sent_offsets
item["sent_offsets"] = torch.LongTensor(item["sent_offsets"])
if self.train:
item["sent_labels"] = sent_labels if len(sent_labels) != 0 else [0] * len(sent_offsets)
item["sent_labels"] = torch.LongTensor(item["sent_labels"])
item["ans_covered"] = torch.LongTensor([item["ans_covered"]])
item["label"] = torch.LongTensor([item["label"]])
return item
class MhopSampler(Sampler):
"""
Shuffle QA pairs not context, make sure data within the batch are from the same QA pair
"""
def __init__(self, data_source, num_neg=9, n_gpu=8):
# for each QA pair, sample negative paragraphs
self.qid2gold = data_source.qid2gold
self.qid2neg = data_source.qid2neg
self.neg_num = num_neg
self.n_gpu = n_gpu
self.all_qids = list(self.qid2gold.keys())
assert len(self.qid2gold) == len(self.qid2neg)
self.q_num_per_epoch = len(self.qid2gold) - len(self.qid2gold) % self.n_gpu
self._num_samples = self.q_num_per_epoch * (self.neg_num + 1)
def __len__(self):
return self._num_samples
def __iter__(self):
sample_indice = []
random.shuffle(self.all_qids)
# when use shared-normalization, passages for each question should be on the same GPU
qids_to_use = self.all_qids[:self.q_num_per_epoch]
for qid in qids_to_use:
neg_samples = self.qid2neg[qid]
random.shuffle(neg_samples)
sample_indice += self.qid2gold[qid]
sample_indice += neg_samples[:self.neg_num]
return iter(sample_indice)
def qa_collate(samples, pad_id=0):
if len(samples) == 0:
return {}
batch = {
'input_ids': collate_tokens([s["encodings"]['input_ids'] for s in samples], pad_id),
'attention_mask': collate_tokens([s["encodings"]['attention_mask'] for s in samples], 0),
'paragraph_mask': collate_tokens([s['paragraph_mask'] for s in samples], 0),
'label': collate_tokens([s["label"] for s in samples], -1),
"sent_offsets": collate_tokens([s["sent_offsets"] for s in samples], 0),
}
# training labels
if "starts" in samples[0]:
batch["starts"] = collate_tokens([s['starts'] for s in samples], -1)
batch["ends"] = collate_tokens([s['ends'] for s in samples], -1)
# batch["ans_types"] = collate_tokens([s['ans_type'] for s in samples], -1)
batch["sent_labels"] = collate_tokens([s['sent_labels'] for s in samples], 0)
batch["ans_covered"] = collate_tokens([s['ans_covered'] for s in samples], 0)
# roberta does not use token_type_ids
if "token_type_ids" in samples[0]["encodings"]:
batch["token_type_ids"] = collate_tokens([s["encodings"]['token_type_ids']for s in samples], 0)
batched = {
"qids": [s["qid"] for s in samples],
"passages": [s["passages"] for s in samples],
"gold_answer": [s["gold_answer"] for s in samples],
"sp_gold": [s["sp_gold"] for s in samples],
"para_offsets": [s["para_offset"] for s in samples],
"net_inputs": batch,
}
# for answer extraction
if "doc_tokens" in samples[0]:
batched["doc_tokens"] = [s["doc_tokens"] for s in samples]
batched["tok_to_orig_index"] = [s["tok_to_orig_index"] for s in samples]
batched["wp_tokens"] = [s["wp_tokens"] for s in samples]
return batched
| 18,839 | 39.603448 | 179 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/qa/qa_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from transformers import AutoModel, BertModel
import torch.nn as nn
from torch.nn import CrossEntropyLoss
import torch
import torch.nn.functional as F
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class QAModel(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.model_name = args.model_name
self.sp_weight = args.sp_weight
self.sp_pred = args.sp_pred
self.encoder = AutoModel.from_pretrained(args.model_name)
if "electra" in args.model_name:
self.pooler = BertPooler(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.rank = nn.Linear(config.hidden_size, 1) # noan
if self.sp_pred:
self.sp = nn.Linear(config.hidden_size, 1)
self.loss_fct = CrossEntropyLoss(ignore_index=-1, reduction="none")
def forward(self, batch):
outputs = self.encoder(batch['input_ids'], batch['attention_mask'], batch.get('token_type_ids', None))
if "electra" in self.model_name:
sequence_output = outputs[0]
pooled_output = self.pooler(sequence_output)
else:
sequence_output, pooled_output = outputs[0], outputs[1]
logits = self.qa_outputs(sequence_output)
outs = [o.squeeze(-1) for o in logits.split(1, dim=-1)]
outs = [o.float().masked_fill(batch["paragraph_mask"].ne(1), float("-inf")).type_as(o) for o in outs]
start_logits, end_logits = outs[0], outs[1]
rank_score = self.rank(pooled_output)
if self.sp_pred:
gather_index = batch["sent_offsets"].unsqueeze(2).expand(-1, -1, sequence_output.size()[-1])
sent_marker_rep = torch.gather(sequence_output, 1, gather_index)
sp_score = self.sp(sent_marker_rep).squeeze(2)
else:
sp_score = None
if self.training:
rank_target = batch["label"]
if self.sp_pred:
sp_loss = F.binary_cross_entropy_with_logits(sp_score, batch["sent_labels"].float(), reduction="none")
sp_loss = (sp_loss * batch["sent_offsets"]) * batch["label"]
sp_loss = sp_loss.sum()
start_positions, end_positions = batch["starts"], batch["ends"]
rank_loss = F.binary_cross_entropy_with_logits(rank_score, rank_target.float(), reduction="sum")
start_losses = [self.loss_fct(start_logits, starts) for starts in torch.unbind(start_positions, dim=1)]
end_losses = [self.loss_fct(end_logits, ends) for ends in torch.unbind(end_positions, dim=1)]
loss_tensor = torch.cat([t.unsqueeze(1) for t in start_losses], dim=1) + torch.cat([t.unsqueeze(1) for t in end_losses], dim=1)
log_prob = - loss_tensor
log_prob = log_prob.float().masked_fill(log_prob == 0, float('-inf')).type_as(log_prob)
marginal_probs = torch.sum(torch.exp(log_prob), dim=1)
m_prob = [marginal_probs[idx] for idx in marginal_probs.nonzero()]
if len(m_prob) == 0:
span_loss = self.loss_fct(start_logits, start_logits.new_zeros(
start_logits.size(0)).long()-1).sum()
else:
span_loss = - torch.log(torch.cat(m_prob)).sum()
if self.sp_pred:
loss = rank_loss + span_loss + sp_loss * self.sp_weight
else:
loss = rank_loss + span_loss
return loss.unsqueeze(0)
return {
'start_logits': start_logits,
'end_logits': end_logits,
'rank_score': rank_score,
"sp_score": sp_score
}
| 4,377 | 38.8 | 139 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/qa/qa_trainer.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import os.path as osp
import random
from functools import partial
from pathlib import Path
from typing import NamedTuple, Optional
import collections
from torch.optim import lr_scheduler
from tqdm import tqdm
import apex
import attr
import numpy as np
import submitit
import torch
import torch.distributed
import torch.nn as nn
import torch.optim as optim
from apex import amp
from torch.utils.tensorboard import SummaryWriter
from transformers import (AdamW, AutoConfig, AutoTokenizer,
get_linear_schedule_with_warmup)
from config import ClusterConfig
from hotpot_evaluate_v1 import exact_match_score, f1_score, update_sp
from qa_model import QAModel
from reranking_datasets import RankingDataset, rank_collate, MhopSampler
from utils import AverageMeter, move_to_cuda, get_final_text
apex.amp.register_half_function(torch, 'einsum')
@attr.s(auto_attribs=True)
class TrainerState:
"""
Contains the state of the Trainer.
It can be saved to checkpoint the training and loaded to resume it.
"""
epoch: int
model: nn.Module
optimizer: optim.Optimizer
lr_scheduler: torch.optim.lr_scheduler._LRScheduler
global_step: int
def save(self, filename: str) -> None:
data = attr.asdict(self)
# store only the state dict
data["model"] = self.model.state_dict()
data["optimizer"] = self.optimizer.state_dict()
data["lr_scheduler"] = self.lr_scheduler.state_dict()
torch.save(data, filename)
@classmethod
def load(cls, filename: str, default: "TrainerState", gpu: int) -> "TrainerState":
data = torch.load(filename, map_location=lambda storage, loc: storage.cuda(gpu))
# We need this default to load the state dict
model = default.model
model.load_state_dict(data["model"])
data["model"] = model
optimizer = default.optimizer
optimizer.load_state_dict(data["optimizer"])
data["optimizer"] = optimizer
lr_scheduler = default.lr_scheduler
lr_scheduler.load_state_dict(data["lr_scheduler"])
data["lr_scheduler"] = lr_scheduler
return cls(**data)
class Trainer:
def __init__(self, train_cfg: NamedTuple, cluster_cfg: ClusterConfig) -> None:
self._train_cfg = train_cfg
self._cluster_cfg = cluster_cfg
def __call__(self) -> Optional[float]:
"""
Called by submitit for each task.
:return: The master task return the final accuracy of the model.
"""
self._setup_process_group()
self._init_state()
final_acc = self._train()
return final_acc
def log(self, log_data: dict):
job_env = submitit.JobEnvironment()
# z = {**vars(self._train_cfg), **log_data}
save_dir = Path(self._train_cfg.output_dir)
os.makedirs(save_dir, exist_ok=True)
with open(save_dir / 'log.txt', 'a') as f:
f.write(json.dumps(log_data) + '\n')
def checkpoint(self, rm_init=True) -> submitit.helpers.DelayedSubmission:
# will be called by submitit in case of preemption
job_env = submitit.JobEnvironment()
save_dir = osp.join(self._train_cfg.output_dir, str(job_env.job_id))
os.makedirs(save_dir, exist_ok=True)
self._state.save(osp.join(save_dir, "checkpoint.pth"))
# Trick here: when the job will be requeue, we will use the same init file
# but it must not exist when we initialize the process group
# so we delete it, but only when this method is called by submitit for requeue
if rm_init and osp.exists(self._cluster_cfg.dist_url[7:]):
os.remove(self._cluster_cfg.dist_url[7:]) # remove file:// at the beginning
# This allow to remove any non-pickable part of the Trainer instance.
empty_trainer = Trainer(self._train_cfg, self._cluster_cfg)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_process_group(self) -> None:
job_env = submitit.JobEnvironment()
torch.cuda.set_device(job_env.local_rank)
torch.distributed.init_process_group(
backend=self._cluster_cfg.dist_backend,
init_method=self._cluster_cfg.dist_url,
world_size=job_env.num_tasks,
rank=job_env.global_rank,
)
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def _init_state(self) -> None:
"""
Initialize the state and load it from an existing checkpoint if any
"""
job_env = submitit.JobEnvironment()
if job_env.global_rank == 0:
# config_path = Path(args.save_folder) / str(job_env.job_id) / 'config.json'
os.makedirs(self._train_cfg.output_dir, exist_ok=True)
config_path = Path(self._train_cfg.output_dir) / 'config.json'
with open(config_path, "w") as g:
g.write(json.dumps(self._train_cfg._asdict()))
print(f"Setting random seed {self._train_cfg.seed}", flush=True)
random.seed(self._train_cfg.seed)
np.random.seed(self._train_cfg.seed)
torch.manual_seed(self._train_cfg.seed)
print("Create data loaders", flush=True)
tokenizer = AutoTokenizer.from_pretrained(self._train_cfg.model_name)
collate_fc = partial(rank_collate, pad_id=tokenizer.pad_token_id)
train_set = RankingDataset(tokenizer, self._train_cfg.train_file, self._train_cfg.max_seq_len, self._train_cfg.max_q_len, train=True)
train_sampler = MhopSampler(train_set, num_neg=self._train_cfg.neg_num)
batch_size_per_gpu = (1 + self._train_cfg.neg_num) * self._train_cfg.num_q_per_gpu
n_gpu = torch.cuda.device_count()
print(f"Number of GPUs: {n_gpu}", flush=True)
print(f"Batch size per node: {batch_size_per_gpu * n_gpu}", flush=True)
self._train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size_per_gpu * n_gpu, num_workers=self._train_cfg.num_workers, collate_fn=collate_fc, sampler=train_sampler)
test_set = RankingDataset(tokenizer, self._train_cfg.predict_file, self._train_cfg.max_seq_len, self._train_cfg.max_q_len)
self._test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=self._train_cfg.predict_batch_size,
num_workers=self._train_cfg.num_workers, collate_fn=collate_fc
)
print("Create model", flush=True)
print(f"Local rank {job_env.local_rank}", flush=True)
bert_config = AutoConfig.from_pretrained(self._train_cfg.model_name)
model = QAModel(bert_config, self._train_cfg)
model.cuda(job_env.local_rank)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self._train_cfg.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if self._train_cfg.use_adam:
optimizer = optim.Adam(optimizer_parameters, lr=self._train_cfg.learning_rate)
else:
optimizer = AdamW(optimizer_parameters, lr=self._train_cfg.learning_rate)
# lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=2)
if self._train_cfg.fp16:
model, optimizer = amp.initialize(
model, optimizer, opt_level=self._train_cfg.fp16_opt_level)
t_total = len(self._train_loader) // self._train_cfg.gradient_accumulation_steps * self._train_cfg.num_train_epochs
warmup_steps = t_total * self._train_cfg.warmup_ratio
lr_scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
model = torch.nn.DataParallel(model)
self._state = TrainerState(
epoch=0, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, global_step=0
)
self.tb_logger = SummaryWriter(self._train_cfg.output_dir.replace("logs", "tflogs"))
checkpoint_fn = osp.join(self._train_cfg.output_dir, str(job_env.job_id), "checkpoint.pth")
# checkpoint_fn = osp.join(self._train_cfg.output_dir, "checkpoint.pth")
if os.path.isfile(checkpoint_fn):
print(f"Load existing checkpoint from {checkpoint_fn}", flush=True)
self._state = TrainerState.load(
checkpoint_fn, default=self._state, gpu=job_env.local_rank)
def _train(self) -> Optional[float]:
job_env = submitit.JobEnvironment()
batch_step = 0 # forward batch count
best_metric = 0
train_loss_meter = AverageMeter()
print(f"Start training", flush=True)
# Start from the loaded epoch
start_epoch = self._state.epoch
global_step = self._state.global_step
for epoch in range(start_epoch, self._train_cfg.num_train_epochs):
print(f"Start epoch {epoch}", flush=True)
self._state.model.train()
self._state.epoch = epoch
for batch in self._train_loader:
batch_step += 1
batch_inputs = move_to_cuda(batch["net_inputs"])
loss = self._state.model(batch_inputs)
if torch.cuda.device_count() > 1:
loss = loss.mean()
if self._train_cfg.gradient_accumulation_steps > 1:
loss = loss / self._train_cfg.gradient_accumulation_steps
if self._train_cfg.fp16:
with amp.scale_loss(loss, self._state.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
if (batch_step + 1) % self._train_cfg.gradient_accumulation_steps == 0:
if self._train_cfg.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(self._state.optimizer), self._train_cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
self._state.model.parameters(), self._train_cfg.max_grad_norm)
self._state.optimizer.step()
self._state.lr_scheduler.step()
self._state.model.zero_grad()
global_step += 1
self._state.global_step = global_step
self.tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
self.tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
if job_env.global_rank == 0:
if self._train_cfg.eval_period != -1 and global_step % self._train_cfg.eval_period == 0:
metrics = self._eval()
for k, v in metrics.items():
self.tb_logger.add_scalar(k, v*100, global_step)
score = metrics[self._train_cfg.final_metric]
if best_metric < score:
print("Saving model with best %s %.2f -> em %.2f" % (self._train_cfg.final_metric, best_metric*100, score*100), flush=True)
torch.save(self._state.model.state_dict(), os.path.join(self._train_cfg.output_dir, f"checkpoint_best.pt"))
best_metric = score
# Checkpoint only on the master
if job_env.global_rank == 0:
self.checkpoint(rm_init=False)
metrics = self._eval()
for k, v in metrics.items():
self.tb_logger.add_scalar(k, v*100, global_step)
score = metrics[self._train_cfg.final_metric]
if best_metric < score:
print("Saving model with best %s %.2f -> em %.2f" % (self._train_cfg.final_metric, best_metric*100, score*100), flush=True)
torch.save(self._state.model.state_dict(), os.path.join(self._train_cfg.output_dir, f"checkpoint_best.pt"))
best_metric = score
self.log({
"best_score": best_metric,
"curr_score": score,
"smoothed_loss": train_loss_meter.avg,
"epoch": epoch
})
return best_metric
def _eval(self) -> dict:
print("Start evaluation of the model", flush=True)
job_env = submitit.JobEnvironment()
args = self._train_cfg
eval_dataloader = self._test_loader
model = self._state.model
model.eval()
id2result = collections.defaultdict(list)
id2answer = collections.defaultdict(list)
id2gold = {}
id2goldsp = {}
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch["net_inputs"])
batch_qids = batch["qids"]
batch_labels = batch["net_inputs"]["label"].view(-1).tolist()
with torch.no_grad():
outputs = model(batch_to_feed)
scores = outputs["rank_score"]
scores = scores.view(-1).tolist()
sp_scores = outputs["sp_score"]
sp_scores = sp_scores.float().masked_fill(batch_to_feed["sent_offsets"].eq(0), float("-inf")).type_as(sp_scores)
batch_sp_scores = sp_scores.sigmoid()
# ans_type_predicted = torch.argmax(outputs["ans_type_logits"], dim=1).view(-1).tolist()
outs = [outputs["start_logits"], outputs["end_logits"]]
for qid, label, score in zip(batch_qids, batch_labels, scores):
id2result[qid].append((label, score))
# answer prediction
span_scores = outs[0][:, :, None] + outs[1][:, None]
max_seq_len = span_scores.size(1)
span_mask = np.tril(np.triu(np.ones((max_seq_len, max_seq_len)), 0), args.max_ans_len)
span_mask = span_scores.data.new(max_seq_len, max_seq_len).copy_(torch.from_numpy(span_mask))
span_scores_masked = span_scores.float().masked_fill((1 - span_mask[None].expand_as(span_scores)).bool(), -1e10).type_as(span_scores)
start_position = span_scores_masked.max(dim=2)[0].max(dim=1)[1]
end_position = span_scores_masked.max(dim=2)[1].gather(
1, start_position.unsqueeze(1)).squeeze(1)
answer_scores = span_scores_masked.max(dim=2)[0].max(dim=1)[0].tolist()
para_offset = batch['para_offsets']
start_position_ = list(
np.array(start_position.tolist()) - np.array(para_offset))
end_position_ = list(
np.array(end_position.tolist()) - np.array(para_offset))
for idx, qid in enumerate(batch_qids):
id2gold[qid] = batch["gold_answer"][idx]
id2goldsp[qid] = batch["sp_gold"][idx]
rank_score = scores[idx]
sp_score = batch_sp_scores[idx].tolist()
start = start_position_[idx]
end = end_position_[idx]
span_score = answer_scores[idx]
tok_to_orig_index = batch['tok_to_orig_index'][idx]
doc_tokens = batch['doc_tokens'][idx]
wp_tokens = batch['wp_tokens'][idx]
orig_doc_start = tok_to_orig_index[start]
orig_doc_end = tok_to_orig_index[end]
orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_tokens = wp_tokens[start:end+1]
tok_text = " ".join(tok_tokens)
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
pred_str = get_final_text(tok_text, orig_text, do_lower_case=True, verbose_logging=False)
pred_sp = []
passages = batch["passages"][idx]
for passage, sent_offset in zip(passages, [0, len(passages[0]["sents"])]):
for idx, _ in enumerate(passage["sents"]):
try:
if sp_score[idx + sent_offset] > 0.5:
pred_sp.append([passage["title"], idx])
except:
continue
id2answer[qid].append((pred_str.strip(), rank_score, span_score, pred_sp))
acc = []
for qid, res in id2result.items():
res.sort(key=lambda x: x[1], reverse=True)
acc.append(res[0][0] == 1)
print(f"evaluated {len(id2result)} questions...", flush=True)
print(f'chain ranking em: {np.mean(acc)}', flush=True)
best_em, best_f1, best_joint_em, best_joint_f1 = 0, 0, 0, 0
lambdas = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
for lambda_ in lambdas:
ems, f1s = [], []
sp_ems, sp_f1s = [], []
joint_ems, joint_f1s = [], []
for qid, res in id2result.items():
ans_res = id2answer[qid]
ans_res.sort(key=lambda x: lambda_ * x[1] + (1 - lambda_) * x[2], reverse=True)
top_pred = ans_res[0][0]
ems.append(exact_match_score(top_pred, id2gold[qid][0]))
f1, prec, recall = f1_score(top_pred, id2gold[qid][0])
f1s.append(f1)
top_pred_sp = ans_res[0][3]
metrics = {'sp_em': 0, 'sp_f1': 0, 'sp_prec': 0, 'sp_recall': 0}
update_sp(metrics, top_pred_sp, id2goldsp[qid])
sp_ems.append(metrics['sp_em'])
sp_f1s.append(metrics['sp_f1'])
# joint metrics
joint_prec = prec * metrics["sp_prec"]
joint_recall = recall * metrics["sp_recall"]
if joint_prec + joint_recall > 0:
joint_f1 = 2 * joint_prec * joint_recall / (joint_prec + joint_recall)
else:
joint_f1 = 0
joint_em = ems[-1] * sp_ems[-1]
joint_ems.append(joint_em)
joint_f1s.append(joint_f1)
if best_joint_f1 < np.mean(joint_f1s):
best_joint_f1 = np.mean(joint_f1s)
best_joint_em = np.mean(joint_ems)
best_f1 = np.mean(f1s)
best_em = np.mean(ems)
print(f".......Using combination factor {lambda_}......", flush=True)
print(f'answer em: {np.mean(ems)}, count: {len(ems)}', flush=True)
print(f'answer f1: {np.mean(f1s)}, count: {len(f1s)}', flush=True)
print(f'sp em: {np.mean(sp_ems)}, count: {len(sp_ems)}', flush=True)
print(f'sp f1: {np.mean(sp_f1s)}, count: {len(sp_f1s)}', flush=True)
print(f'joint em: {np.mean(joint_ems)}, count: {len(joint_ems)}', flush=True)
print(f'joint f1: {np.mean(joint_f1s)}, count: {len(joint_f1s)}', flush=True)
print(f"Best joint EM/F1 from combination {best_em}/{best_f1}", flush=True)
model.train()
return {"em": best_em, "f1": best_f1, "joint_em": best_joint_em, "joint_f1": best_joint_f1}
| 19,844 | 46.589928 | 193 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/interactive_retrieval.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from models.mhop_retriever import MhopRetriever
import faiss
import numpy as np
import torch
from tqdm import tqdm
from transformers import AutoConfig, AutoTokenizer
import json
import logging
import argparse
from .utils.utils import (load_saved, move_to_cuda)
parser = argparse.ArgumentParser()
parser.add_argument('--topk', type=int, default=2, help="topk paths")
parser.add_argument('--num-workers', type=int, default=10)
parser.add_argument('--max-q-len', type=int, default=70)
parser.add_argument('--max-c-len', type=int, default=300)
parser.add_argument('--max-q-sp-len', type=int, default=350)
parser.add_argument('--model-name', type=str, default='bert-base-uncased')
parser.add_argument('--gpu', action="store_true")
parser.add_argument('--shared-encoder', action="store_true")
parser.add_argument("--stop-drop", default=0, type=float)
args = parser.parse_args()
index_path = "index/abstracts_v0_fixed.npy"
corpus_path = "index/abstracts_id2doc.json"
model_path = "logs/08-05-2020/baseline_v0_fixed-seed16-bsz150-fp16True-lr2e-05-decay0.0-warm0.1-valbsz3000-sharedTrue-multi1-schemenone/checkpoint_best.pt"
print(f"Loading corpus and index...")
id2doc = json.load(open(corpus_path))
index_vectors = np.load(index_path).astype('float32')
index = faiss.IndexFlatIP(768)
index.add(index_vectors)
res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(res, 1, index)
print(f"Loading retrieval model...")
bert_config = AutoConfig.from_pretrained("bert-base-uncased")
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
model = MhopRetriever(bert_config, args)
model = load_saved(model, args.model_path, exact=False)
cuda = torch.device('cuda')
model.to(cuda)
from apex import amp
model = amp.initialize(model, opt_level='O1')
model.eval()
while True:
question = input("Type Question:")
question = "the Danish musicians who died in 1931"
batch_q_encodes = tokenizer.batch_encode_plus(["question"], max_length=args.max_q_len, pad_to_max_length=True, return_tensors="pt")
batch_q_encodes = move_to_cuda(dict(batch_q_encodes))
q_embeds = model.encode_q(batch_q_encodes["input_ids"], batch_q_encodes["attention_mask"], batch_q_encodes.get("token_type_ids", None))
q_embeds_numpy = q_embeds.cpu().contiguous().numpy()
D, I = index.search(q_embeds_numpy, 1)
print(I)
| 2,527 | 36.731343 | 155 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/mhop_trainer.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
submitit trainer for hyperparameter tuning
"""
import os
import os.path as osp
from typing import Optional, NamedTuple
import torch
import torch.distributed
import torch.nn as nn
import torch.optim as optim
import attr
import submitit
from functools import partial
import numpy as np
import random
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
import json
from transformers import (
AdamW, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup)
from torch.optim import Adam
from .utils.utils import move_to_cuda, AverageMeter
from .config import ClusterConfig
from .data.mhop_dataset import MhopDataset, mhop_collate
from .models.mhop_retriever import (MhopRetriever, RobertaRetriever)
from .criterions import (mhop_loss, mhop_eval)
from tqdm import tqdm
import apex
apex.amp.register_half_function(torch, 'einsum')
from apex import amp
@attr.s(auto_attribs=True)
class TrainerState:
"""
Contains the state of the Trainer.
It can be saved to checkpoint the training and loaded to resume it.
"""
epoch: int
model: nn.Module
optimizer: optim.Optimizer
lr_scheduler: torch.optim.lr_scheduler._LRScheduler
global_step: int
def save(self, filename: str) -> None:
data = attr.asdict(self)
# store only the state dict
data["model"] = self.model.state_dict()
data["optimizer"] = self.optimizer.state_dict()
data["lr_scheduler"] = self.lr_scheduler.state_dict()
torch.save(data, filename)
@classmethod
def load(cls, filename: str, default: "TrainerState", gpu: int) -> "TrainerState":
data = torch.load(filename, map_location=lambda storage, loc: storage.cuda(gpu))
# We need this default to load the state dict
model = default.model
model.load_state_dict(data["model"])
data["model"] = model
optimizer = default.optimizer
optimizer.load_state_dict(data["optimizer"])
data["optimizer"] = optimizer
lr_scheduler = default.lr_scheduler
lr_scheduler.load_state_dict(data["lr_scheduler"])
data["lr_scheduler"] = lr_scheduler
return cls(**data)
class Trainer:
def __init__(self, train_cfg: NamedTuple, cluster_cfg: ClusterConfig) -> None:
self._train_cfg = train_cfg
self._cluster_cfg = cluster_cfg
def __call__(self) -> Optional[float]:
"""
Called by submitit for each task.
:return: The master task return the final accuracy of the model.
"""
self._setup_process_group()
self._init_state()
final_acc = self._train()
return final_acc
def log(self, log_data: dict):
job_env = submitit.JobEnvironment()
# z = {**vars(self._train_cfg), **log_data}
save_dir = Path(self._train_cfg.output_dir)
os.makedirs(save_dir, exist_ok=True)
with open(save_dir / 'log.txt', 'a') as f:
f.write(json.dumps(log_data) + '\n')
def checkpoint(self, rm_init=True) -> submitit.helpers.DelayedSubmission:
# will be called by submitit in case of preemption
job_env = submitit.JobEnvironment()
save_dir = osp.join(self._train_cfg.output_dir, str(job_env.job_id))
os.makedirs(save_dir, exist_ok=True)
self._state.save(osp.join(save_dir, "checkpoint.pth"))
# Trick here: when the job will be requeue, we will use the same init file
# but it must not exist when we initialize the process group
# so we delete it, but only when this method is called by submitit for requeue
if rm_init and osp.exists(self._cluster_cfg.dist_url[7:]):
os.remove(self._cluster_cfg.dist_url[7:]) # remove file:// at the beginning
# This allow to remove any non-pickable part of the Trainer instance.
empty_trainer = Trainer(self._train_cfg, self._cluster_cfg)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_process_group(self) -> None:
job_env = submitit.JobEnvironment()
torch.cuda.set_device(job_env.local_rank)
torch.distributed.init_process_group(
backend=self._cluster_cfg.dist_backend,
init_method=self._cluster_cfg.dist_url,
world_size=job_env.num_tasks,
rank=job_env.global_rank,
)
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def _init_state(self) -> None:
"""
Initialize the state and load it from an existing checkpoint if any
"""
job_env = submitit.JobEnvironment()
if job_env.global_rank == 0:
# config_path = Path(args.save_folder) / str(job_env.job_id) / 'config.json'
os.makedirs(self._train_cfg.output_dir, exist_ok=True)
config_path = Path(self._train_cfg.output_dir) / 'config.json'
with open(config_path, "w") as g:
g.write(json.dumps(self._train_cfg._asdict()))
print(f"Setting random seed {self._train_cfg.seed}", flush=True)
random.seed(self._train_cfg.seed)
np.random.seed(self._train_cfg.seed)
torch.manual_seed(self._train_cfg.seed)
torch.cuda.manual_seed_all(self._train_cfg.seed)
print("Create data loaders", flush=True)
tokenizer = AutoTokenizer.from_pretrained(self._train_cfg.model_name)
collate_fc = partial(mhop_collate, pad_id=tokenizer.pad_token_id)
train_set = MhopDataset(tokenizer, self._train_cfg.train_file, self._train_cfg.max_q_len, self._train_cfg.max_q_sp_len, self._train_cfg.max_c_len, train=True)
self._train_loader = torch.utils.data.DataLoader(train_set, batch_size=self._train_cfg.train_batch_size, num_workers=self._train_cfg.num_workers, collate_fn=collate_fc, shuffle=True)
test_set = MhopDataset(tokenizer, self._train_cfg.predict_file, self._train_cfg.max_q_len, self._train_cfg.max_q_sp_len, self._train_cfg.max_c_len)
self._test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=self._train_cfg.predict_batch_size,
num_workers=self._train_cfg.num_workers, collate_fn=collate_fc, pin_memory=True
)
print("Create model", flush=True)
print(f"Local rank {job_env.local_rank}", flush=True)
bert_config = AutoConfig.from_pretrained(self._train_cfg.model_name)
if "roberta" in self._train_cfg.model_name:
model = RobertaRetriever(bert_config, self._train_cfg)
else:
model = MhopRetriever(bert_config, self._train_cfg)
model.cuda(job_env.local_rank)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self._train_cfg.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = Adam(optimizer_parameters, lr=self._train_cfg.learning_rate, eps=self._train_cfg.adam_epsilon)
if self._train_cfg.fp16:
model, optimizer = amp.initialize(
model, optimizer, opt_level=self._train_cfg.fp16_opt_level)
t_total = len(self._train_loader) // self._train_cfg.gradient_accumulation_steps * self._train_cfg.num_train_epochs
warmup_steps = t_total * self._train_cfg.warmup_ratio
lr_scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
model = torch.nn.DataParallel(model)
self._state = TrainerState(
epoch=0, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, global_step=0
)
self.tb_logger = SummaryWriter(self._train_cfg.output_dir.replace("logs", "tflogs"))
checkpoint_fn = osp.join(self._train_cfg.output_dir, str(job_env.job_id), "checkpoint.pth")
# checkpoint_fn = osp.join(self._train_cfg.output_dir, "checkpoint.pth")
if os.path.isfile(checkpoint_fn):
print(f"Load existing checkpoint from {checkpoint_fn}", flush=True)
self._state = TrainerState.load(
checkpoint_fn, default=self._state, gpu=job_env.local_rank)
def _train(self) -> Optional[float]:
job_env = submitit.JobEnvironment()
batch_step = 0 # forward batch count
best_mrr = 0
train_loss_meter = AverageMeter()
print(f"Start training", flush=True)
# Start from the loaded epoch
start_epoch = self._state.epoch
global_step = self._state.global_step
for epoch in range(start_epoch, self._train_cfg.num_train_epochs):
print(f"Start epoch {epoch}", flush=True)
self._state.model.train()
self._state.epoch = epoch
for batch in self._train_loader:
batch_step += 1
batch = move_to_cuda(batch)
loss = mhop_loss(self._state.model, batch, self._train_cfg)
if self._train_cfg.gradient_accumulation_steps > 1:
loss = loss / self._train_cfg.gradient_accumulation_steps
if self._train_cfg.fp16:
with amp.scale_loss(loss, self._state.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
if (batch_step + 1) % self._train_cfg.gradient_accumulation_steps == 0:
if self._train_cfg.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(self._state.optimizer), self._train_cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
self._state.model.parameters(), self._train_cfg.max_grad_norm)
self._state.optimizer.step()
self._state.lr_scheduler.step()
self._state.model.zero_grad()
global_step += 1
self._state.global_step = global_step
self.tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
self.tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
# Checkpoint only on the master
# if job_env.global_rank == 0:
self.checkpoint(rm_init=False)
mrrs = self._eval()
mrr = mrrs["mrr_avg"]
self.tb_logger.add_scalar('dev_mrr', mrr*100, epoch)
self._state.lr_scheduler.step(mrr)
if best_mrr < mrr:
print("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" % (best_mrr*100, mrr*100, epoch))
torch.save(self._state.model.state_dict(), os.path.join(self._train_cfg.output_dir, f"checkpoint_best.pt"))
best_mrr = mrr
self.log({
"best_mrr": best_mrr,
"curr_mrr": mrr,
"smoothed_loss": train_loss_meter.avg,
"epoch": epoch
})
return best_mrr
def _eval(self) -> float:
print("Start evaluation of the model", flush=True)
job_env = submitit.JobEnvironment()
args = self._train_cfg
eval_dataloader = self._test_loader
self._state.model.eval()
rrs_1, rrs_2 = [], [] # reciprocal rank
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch)
with torch.no_grad():
outputs = self._state.model(batch_to_feed)
eval_results = mhop_eval(outputs, args)
_rrs_1, _rrs_2 = eval_results["rrs_1"], eval_results["rrs_2"]
rrs_1 += _rrs_1
rrs_2 += _rrs_2
mrr_1 = np.mean(rrs_1)
mrr_2 = np.mean(rrs_2)
print(f"evaluated {len(rrs_1)} examples...")
print(f'MRR-1: {mrr_1}')
print(f'MRR-2: {mrr_2}')
self._state.model.train()
return {"mrr_1": mrr_1, "mrr_2": mrr_2, "mrr_avg": (mrr_1 + mrr_2) / 2}
| 12,533 | 41.778157 | 190 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/criterions.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
# def loss_single(model, batch, momentum=False):
# outputs = model(batch)
# q = outputs['q']
# c = outputs['c']
# neg_c = outputs['neg_c']
# product_in_batch = torch.mm(q, c.t())
# product_neg = (q * neg_c).sum(-1).unsqueeze(1)
# product = torch.cat([product_in_batch, product_neg], dim=-1)
# if momentum:
# queue_c = model.module.encode_queue_ctx()
# product_queue = torch.mm(q, queue_c.t())
# product = torch.cat([product, product_queue], dim=-1)
# model.module.dequeue_and_enqueue(batch)
# target = torch.arange(product.size(0)).to(product.device)
# loss = F.cross_entropy(product, target)
# return loss
# """
# multi-hop retrieval for NQ, train the model to recover from
# """
# def loss_nq_mhop(model, batch, momentum=False):
# outputs = model(batch)
# product_in_batch = torch.mm(outputs['q'], outputs['c'].t())
# product_neg = (outputs['q'] * outputs['neg']).sum(-1).unsqueeze(1)
# # product_neg1 = (outputs['q'] * outputs['dense_neg1']).sum(-1).unsqueeze(1)
# # product_neg2 = (outputs['q'] * outputs['dense_neg2']).sum(-1).unsqueeze(1)
# scores1 = torch.cat([product_in_batch, product_neg], dim=-1)
# product_in_batch_from_error = torch.mm(outputs["q_neg1"], outputs['c'].t())
# dense_neg = torch.cat([outputs["dense_neg1"].unsqueeze(1), outputs["dense_neg2"].unsqueeze(1)], dim=1)
# product_neg_from_error = torch.bmm(outputs["q_neg1"].unsqueeze(1), dense_neg.transpose(1,2)).squeeze(1)
# scores2 = torch.cat([product_in_batch_from_error, product_neg_from_error], dim=-1)
# if momentum:
# queue_neg_scores_1 = torch.mm(outputs['q'], model.module.queue.clone().detach().t())
# queue_neg_scores_2 = torch.mm(outputs["q_neg1"], model.module.queue.clone().detach().t())
# scores1 = torch.cat([scores1, queue_neg_scores_1], dim=1)
# scores2 = torch.cat([scores2, queue_neg_scores_2], dim=1)
# model.module.dequeue_and_enqueue(outputs["c"].detach())
# # model.module.momentum_update_key_encoder()
# target = torch.arange(scores1.size(0)).to(scores1.device)
# loss = F.cross_entropy(scores1, target) + F.cross_entropy(scores2, target)
# # loss = F.cross_entropy(scores1, target)
# return loss
# def eval_nq_mhop(model, batch):
# outputs = model(batch)
# product_in_batch = torch.mm(outputs['q'], outputs['c'].t())
# product_neg = (outputs['q'] * outputs['neg']).sum(-1).unsqueeze(1)
# # product_neg1 = (outputs['q'] * outputs['dense_neg1']).sum(-1).unsqueeze(1)
# # product_neg2 = (outputs['q'] * outputs['dense_neg2']).sum(-1).unsqueeze(1)
# scores1 = torch.cat([product_in_batch, product_neg], dim=-1)
# product_in_batch_from_error = torch.mm(outputs["q_neg1"], outputs['c'].t())
# dense_neg = torch.cat([outputs["dense_neg1"].unsqueeze(1), outputs["dense_neg2"].unsqueeze(1)], dim=1)
# product_neg_from_error = torch.bmm(outputs["q_neg1"].unsqueeze(1), dense_neg.transpose(1,2)).squeeze(1)
# scores2 = torch.cat([product_in_batch_from_error, product_neg_from_error], dim=-1)
# target = torch.arange(scores1.size(0)).to(scores1.device)
# rrs, rrs_2hop = [], []
# ranked = scores1.argsort(dim=1, descending=True)
# ranked_2hop = scores2.argsort(dim=1, descending=True)
# idx2rank = ranked.argsort(dim=1)
# for idx, t in enumerate(target.tolist()):
# rrs.append(1 / (idx2rank[idx][t].item() +1))
# idx2rank2hop = ranked_2hop.argsort(dim=1)
# for idx, t in enumerate(target.tolist()):
# rrs_2hop.append(1 / (idx2rank2hop[idx][t].item() +1))
# return rrs, rrs_2hop
# def eval_vanilla(outputs):
# """
# view the two sp passages as the same, no multi-hop modeling;
# select the passages from all passages in the batch
# """
# rrs = []
# q = outputs['q']
# c1 = outputs['c1']
# c2 = outputs['c2']
# c = torch.cat([c1.unsqueeze(1), c2.unsqueeze(1)], dim=1) # B x 2 x D
# c = c.view(-1, q.size(-1)) # 2B x D
# product_in_batch = torch.mm(q, c.t()) # Bx2B
# neg_c = outputs['neg_c']
# product_neg = (q * neg_c).sum(-1).unsqueeze(1)
# product = torch.cat([product_in_batch, product_neg], dim=-1)
# target = torch.arange(product.size(0)).to(product.device).unsqueeze(1)
# target = torch.cat([target*2, target*2+1], dim=1)
# ranked = product.argsort(dim=1, descending=True)
# # MRR
# idx2rank = ranked.argsort(dim=1)
# for idx, t in enumerate(target):
# correct_idx = t.tolist()
# for _ in correct_idx:
# rrs.append(1 / (idx2rank[idx][_].item() + 1))
# return rrs
def mhop_loss(model, batch, args):
outputs = model(batch)
loss_fct = CrossEntropyLoss(ignore_index=-1)
all_ctx = torch.cat([outputs['c1'], outputs['c2']], dim=0)
neg_ctx = torch.cat([outputs["neg_1"].unsqueeze(1), outputs["neg_2"].unsqueeze(1)], dim=1) # B x 2 x M x h
scores_1_hop = torch.mm(outputs["q"], all_ctx.t())
neg_scores_1 = torch.bmm(outputs["q"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
scores_2_hop = torch.mm(outputs["q_sp1"], all_ctx.t())
neg_scores_2 = torch.bmm(outputs["q_sp1"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
# mask the 1st hop
bsize = outputs["q"].size(0)
scores_1_mask = torch.cat([torch.zeros(bsize, bsize), torch.eye(bsize)], dim=1).to(outputs["q"].device)
scores_1_hop = scores_1_hop.float().masked_fill(scores_1_mask.bool(), float('-inf')).type_as(scores_1_hop)
scores_1_hop = torch.cat([scores_1_hop, neg_scores_1], dim=1)
scores_2_hop = torch.cat([scores_2_hop, neg_scores_2], dim=1)
if args.momentum:
queue_neg_scores_1 = torch.mm(outputs["q"], model.module.queue.clone().detach().t())
queue_neg_scores_2 = torch.mm(outputs["q_sp1"], model.module.queue.clone().detach().t())
# queue_neg_scores_1 = queue_neg_scores_1 / args.temperature
# queue_neg_scores_2 = queue_neg_scores_2 / args.temperature
scores_1_hop = torch.cat([scores_1_hop, queue_neg_scores_1], dim=1)
scores_2_hop = torch.cat([scores_2_hop, queue_neg_scores_2], dim=1)
model.module.dequeue_and_enqueue(all_ctx.detach())
# model.module.momentum_update_key_encoder()
target_1_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device)
target_2_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device) + outputs["q"].size(0)
retrieve_loss = loss_fct(scores_1_hop, target_1_hop) + loss_fct(scores_2_hop, target_2_hop)
return retrieve_loss
def mhop_eval(outputs, args):
all_ctx = torch.cat([outputs['c1'], outputs['c2']], dim=0)
neg_ctx = torch.cat([outputs["neg_1"].unsqueeze(1), outputs["neg_2"].unsqueeze(1)], dim=1)
scores_1_hop = torch.mm(outputs["q"], all_ctx.t())
neg_scores_1 = torch.bmm(outputs["q"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
scores_2_hop = torch.mm(outputs["q_sp1"], all_ctx.t())
neg_scores_2 = torch.bmm(outputs["q_sp1"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
bsize = outputs["q"].size(0)
scores_1_mask = torch.cat([torch.zeros(bsize, bsize), torch.eye(bsize)], dim=1).to(outputs["q"].device)
scores_1_hop = scores_1_hop.float().masked_fill(scores_1_mask.bool(), float('-inf')).type_as(scores_1_hop)
scores_1_hop = torch.cat([scores_1_hop, neg_scores_1], dim=1)
scores_2_hop = torch.cat([scores_2_hop, neg_scores_2], dim=1)
target_1_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device)
target_2_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device) + outputs["q"].size(0)
ranked_1_hop = scores_1_hop.argsort(dim=1, descending=True)
ranked_2_hop = scores_2_hop.argsort(dim=1, descending=True)
idx2ranked_1 = ranked_1_hop.argsort(dim=1)
idx2ranked_2 = ranked_2_hop.argsort(dim=1)
rrs_1, rrs_2 = [], []
for t, idx2ranked in zip(target_1_hop, idx2ranked_1):
rrs_1.append(1 / (idx2ranked[t].item() + 1))
for t, idx2ranked in zip(target_2_hop, idx2ranked_2):
rrs_2.append(1 / (idx2ranked[t].item() + 1))
return {"rrs_1": rrs_1, "rrs_2": rrs_2}
def unified_loss(model, batch, args):
outputs = model(batch)
all_ctx = torch.cat([outputs['c1'], outputs['c2']], dim=0)
neg_ctx = torch.cat([outputs["neg_1"].unsqueeze(1), outputs["neg_2"].unsqueeze(1)], dim=1)
scores_1_hop = torch.mm(outputs["q"], all_ctx.t())
neg_scores_1 = torch.bmm(outputs["q"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
scores_2_hop = torch.mm(outputs["q_sp1"], all_ctx.t())
neg_scores_2 = torch.bmm(outputs["q_sp1"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
# mask for 1st hop
bsize = outputs["q"].size(0)
scores_1_mask = torch.cat([torch.zeros(bsize, bsize), torch.eye(bsize)], dim=1).to(outputs["q"].device)
scores_1_hop = scores_1_hop.float().masked_fill(scores_1_mask.bool(), float('-inf')).type_as(scores_1_hop)
scores_1_hop = torch.cat([scores_1_hop, neg_scores_1], dim=1)
scores_2_hop = torch.cat([scores_2_hop, neg_scores_2], dim=1)
stop_loss = F.cross_entropy(outputs["stop_logits"], batch["stop_targets"].view(-1), reduction="sum")
target_1_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device)
target_2_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device) + outputs["q"].size(0)
retrieve_loss = F.cross_entropy(scores_1_hop, target_1_hop, reduction="sum") + (F.cross_entropy(scores_2_hop, target_2_hop, reduction="none") * batch["stop_targets"].view(-1)).sum()
return retrieve_loss + stop_loss
def unified_eval(outputs, batch):
all_ctx = torch.cat([outputs['c1'], outputs['c2']], dim=0)
neg_ctx = torch.cat([outputs["neg_1"].unsqueeze(1), outputs["neg_2"].unsqueeze(1)], dim=1)
scores_1_hop = torch.mm(outputs["q"], all_ctx.t())
scores_2_hop = torch.mm(outputs["q_sp1"], all_ctx.t())
neg_scores_1 = torch.bmm(outputs["q"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
neg_scores_2 = torch.bmm(outputs["q_sp1"].unsqueeze(1), neg_ctx.transpose(1,2)).squeeze(1)
bsize = outputs["q"].size(0)
scores_1_mask = torch.cat([torch.zeros(bsize, bsize), torch.eye(bsize)], dim=1).to(outputs["q"].device)
scores_1_hop = scores_1_hop.float().masked_fill(scores_1_mask.bool(), float('-inf')).type_as(scores_1_hop)
scores_1_hop = torch.cat([scores_1_hop, neg_scores_1], dim=1)
scores_2_hop = torch.cat([scores_2_hop, neg_scores_2], dim=1)
target_1_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device)
target_2_hop = torch.arange(outputs["q"].size(0)).to(outputs["q"].device) + outputs["q"].size(0)
# stop accuracy
stop_pred = outputs["stop_logits"].argmax(dim=1)
stop_targets = batch["stop_targets"].view(-1)
stop_acc = (stop_pred == stop_targets).float().tolist()
ranked_1_hop = scores_1_hop.argsort(dim=1, descending=True)
ranked_2_hop = scores_2_hop.argsort(dim=1, descending=True)
idx2ranked_1 = ranked_1_hop.argsort(dim=1)
idx2ranked_2 = ranked_2_hop.argsort(dim=1)
rrs_1_mhop, rrs_2_mhop, rrs_nq = [], [], []
for t1, idx2ranked1, t2, idx2ranked2, stop in zip(target_1_hop, idx2ranked_1, target_2_hop, idx2ranked_2, stop_targets):
if stop: #
rrs_1_mhop.append(1 / (idx2ranked1[t1].item() + 1))
rrs_2_mhop.append(1 / (idx2ranked2[t2].item() + 1))
else:
rrs_nq.append(1 / (idx2ranked1[t1].item() + 1))
return {
"stop_acc": stop_acc,
"rrs_1_mhop": rrs_1_mhop,
"rrs_2_mhop": rrs_2_mhop,
"rrs_nq": rrs_nq
}
| 11,894 | 46.390438 | 185 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/train_single.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
# DPR baseline shared encoder
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python train_single.py \
--do_train \
--prefix nq_dpr_shared \
--predict_batch_size 5000 \
--model_name bert-base-uncased \
--train_batch_size 256 \
--gradient_accumulation_steps 1 \
--accumulate_gradients 1 \
--learning_rate 2e-5 \
--fp16 \
--train_file /private/home/xwhan/data/nq-dpr/nq-with-neg-train.txt \
--predict_file /private/home/xwhan/data/nq-dpr/nq-with-neg-dev.txt \
--seed 16 \
--eval-period -1 \
--max_c_len 300 \
--max_q_len 50 \
--warmup-ratio 0.1 \
--shared-encoder \
--num_train_epochs 50
# WebQ single train
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python train_single.py \
--do_train \
--prefix wq_dpr_shared \
--predict_batch_size 5000 \
--model_name bert-base-uncased \
--train_batch_size 256 \
--gradient_accumulation_steps 1 \
--accumulate_gradients 1 \
--learning_rate 2e-5 \
--fp16 \
--train_file /private/home/xwhan/data/WebQ/wq-train-simplified.txt \
--predict_file /private/home/xwhan/data/WebQ/wq-dev-simplified.txt \
--seed 16 \
--eval-period -1 \
--max_c_len 300 \
--max_q_len 50 \
--warmup-ratio 0.1 \
--shared-encoder \
--num_train_epochs 50
# FEVER single-hop retrieval
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python train_single.py \
--do_train \
--prefix fever_single \
--predict_batch_size 5000 \
--model_name bert-base-uncased \
--train_batch_size 256 \
--gradient_accumulation_steps 1 \
--accumulate_gradients 1 \
--learning_rate 2e-5 \
--fp16 \
--train_file /private/home/xwhan/data/fever/retrieval/train_tfidf_neg.txt \
--predict_file /private/home/xwhan/data/fever/retrieval/dev_tfidf_neg.txt \
--seed 16 \
--eval-period -1 \
--max_c_len 400 \
--max_q_len 45 \
--shared-encoder \
--num_train_epochs 40
# HotpotQA single-hop
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python train_single.py \
--do_train \
--prefix hotpot_single \
--predict_batch_size 5000 \
--model_name roberta-base \
--train_batch_size 256 \
--gradient_accumulation_steps 1 \
--accumulate_gradients 1 \
--learning_rate 2e-5 \
--fp16 \
--train_file /private/home/xwhan/data/hotpot/hotpot_train_with_neg_v0.json \
--predict_file /private/home/xwhan/data/hotpot/hotpot_val_with_neg_v0.json \
--seed 16 \
--eval-period -1 \
--max_c_len 300 \
--max_q_len 70 \
--shared-encoder \
--warmup-ratio 0.1 \
--num_train_epochs 50
"""
import logging
import os
import random
from tqdm import tqdm
import numpy as np
import torch
from datetime import date
from torch.utils.data import DataLoader
from models.retriever import BertRetrieverSingle, RobertaRetrieverSingle, MomentumRetriever
from transformers import AdamW, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
from data.sp_datasets import SPDataset, sp_collate, NQMhopDataset, FeverSingleDataset
from utils.utils import move_to_cuda, AverageMeter, load_saved
from config import train_args
from criterions import loss_single
from torch.optim import Adam
from functools import partial
import apex
def main():
args = train_args()
if args.fp16:
apex.amp.register_half_function(torch, 'einsum')
date_curr = date.today().strftime("%m-%d-%Y")
model_name = f"{args.prefix}-seed{args.seed}-bsz{args.train_batch_size}-fp16{args.fp16}-lr{args.learning_rate}-decay{args.weight_decay}-warm{args.warmup_ratio}-{args.model_name}"
args.output_dir = os.path.join(args.output_dir, date_curr, model_name)
tb_logger = SummaryWriter(os.path.join(args.output_dir.replace("logs","tflogs")))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print(
f"output directory {args.output_dir} already exists and is not empty.")
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, "log.txt")),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r",
device, n_gpu, bool(args.local_rank != -1))
args.train_batch_size = int(
args.train_batch_size / args.accumulate_gradients)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError(
"At least one of `do_train` or `do_predict` must be True.")
bert_config = AutoConfig.from_pretrained(args.model_name)
if args.momentum:
model = MomentumRetriever(bert_config, args)
elif "roberta" in args.model_name:
model = RobertaRetrieverSingle(bert_config, args)
else:
model = BertRetrieverSingle(bert_config, args)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
collate_fc = partial(sp_collate, pad_id=tokenizer.pad_token_id)
if args.do_train and args.max_c_len > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_c_len, bert_config.max_position_embeddings))
if "fever" in args.predict_file:
eval_dataset = FeverSingleDataset(tokenizer, args.predict_file, args.max_q_len, args.max_c_len)
else:
eval_dataset = SPDataset(tokenizer, args.predict_file, args.max_q_len, args.max_c_len)
eval_dataloader = DataLoader(
eval_dataset, batch_size=args.predict_batch_size, collate_fn=collate_fc, pin_memory=True, num_workers=args.num_workers)
logger.info(f"Num of dev batches: {len(eval_dataloader)}")
if args.init_checkpoint != "":
model = load_saved(model, args.init_checkpoint)
model.to(device)
print(f"number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
if args.do_train:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = Adam(optimizer_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if args.fp16:
model, optimizer = apex.amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
else:
if args.fp16:
model = apex.amp.initialize(model, opt_level=args.fp16_opt_level)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
global_step = 0 # gradient update step
batch_step = 0 # forward batch count
best_mrr = 0
train_loss_meter = AverageMeter()
model.train()
if "fever" in args.predict_file:
train_dataset = FeverSingleDataset(tokenizer, args.train_file, args.max_q_len, args.max_c_len, train=True)
else:
train_dataset = SPDataset(tokenizer, args.train_file, args.max_q_len, args.max_c_len, train=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, pin_memory=True, collate_fn=collate_fc, num_workers=args.num_workers, shuffle=True)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
warmup_steps = t_total * args.warmup_ratio
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
logger.info('Start training....')
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_dataloader):
batch_step += 1
batch = move_to_cuda(batch)
loss = loss_single(model, batch, args.momentum)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
if (batch_step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
apex.amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
if args.eval_period != -1 and global_step % args.eval_period == 0:
mrr = predict(args, model, eval_dataloader,
device, logger)
logger.info("Step %d Train loss %.2f MRR %.2f on epoch=%d" % (global_step, train_loss_meter.avg, mrr*100, epoch))
if best_mrr < mrr:
logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" %
(best_mrr*100, mrr*100, epoch))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
model = model.to(device)
best_mrr = mrr
mrr = predict(args, model, eval_dataloader, device, logger)
logger.info("Step %d Train loss %.2f MRR %.2f on epoch=%d" % (
global_step, train_loss_meter.avg, mrr*100, epoch))
tb_logger.add_scalar('dev_mrr', mrr*100, epoch)
if best_mrr < mrr:
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_last.pt"))
logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" %
(best_mrr*100, mrr*100, epoch))
torch.save(model.state_dict(), os.path.join(
args.output_dir, f"checkpoint_best.pt"))
model = model.to(device)
best_mrr = mrr
logger.info("Training finished!")
elif args.do_predict:
acc = predict(args, model, eval_dataloader, device, logger)
logger.info(f"test performance {acc}")
def predict(args, model, eval_dataloader, device, logger):
model.eval()
num_correct = 0
num_total = 0.0
rrs = [] # reciprocal rank
for batch in tqdm(eval_dataloader):
batch_to_feed = move_to_cuda(batch)
with torch.no_grad():
outputs = model(batch_to_feed)
q = outputs['q']
c = outputs['c']
neg_c = outputs['neg_c']
product_in_batch = torch.mm(q, c.t())
product_neg = (q * neg_c).sum(-1).unsqueeze(1)
product = torch.cat([product_in_batch, product_neg], dim=-1)
target = torch.arange(product.size(0)).to(product.device)
ranked = product.argsort(dim=1, descending=True)
prediction = product.argmax(-1)
# MRR
idx2rank = ranked.argsort(dim=1)
for idx, t in enumerate(target.tolist()):
rrs.append(1 / (idx2rank[idx][t].item() +1))
pred_res = prediction == target
num_total += pred_res.size(0)
num_correct += pred_res.sum(0)
acc = num_correct/num_total
mrr = np.mean(rrs)
logger.info(f"evaluated {num_total} examples...")
logger.info(f"avg. Acc: {acc}")
logger.info(f'MRR: {mrr}')
model.train()
return mrr
if __name__ == "__main__":
main() | 13,227 | 38.369048 | 182 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/single_trainer.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
trainer defined for submitit hyperparameter tuning
"""
import os
import os.path as osp
from typing import Optional, NamedTuple
import torch
import torch.distributed
import torch.nn as nn
import torch.optim as optim
import attr
import submitit
import argparse
from functools import partial
from torch.nn import CrossEntropyLoss
import numpy as np
import random
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
from .utils import move_to_cuda, convert_to_half, AverageMeter
from .config import ClusterConfig
from .data.sp_datasets import SPDataset, sp_collate
from .models.retriever import BertForRetrieverSP
from transformers import AdamW, BertConfig, BertTokenizer
import json
import apex
apex.amp.register_half_function(torch, 'einsum')
from apex import amp
@attr.s(auto_attribs=True)
class TrainerState:
"""
Contains the state of the Trainer.
It can be saved to checkpoint the training and loaded to resume it.
"""
epoch: int
model: nn.Module
optimizer: optim.Optimizer
lr_scheduler: torch.optim.lr_scheduler._LRScheduler
global_step: int
def save(self, filename: str) -> None:
data = attr.asdict(self)
# store only the state dict
data["model"] = self.model.state_dict()
data["optimizer"] = self.optimizer.state_dict()
data["lr_scheduler"] = self.lr_scheduler.state_dict()
torch.save(data, filename)
@classmethod
def load(cls, filename: str, default: "TrainerState", gpu: int) -> "TrainerState":
data = torch.load(filename, map_location=lambda storage, loc: storage.cuda(gpu))
# We need this default to load the state dict
model = default.model
model.load_state_dict(data["model"])
data["model"] = model
optimizer = default.optimizer
optimizer.load_state_dict(data["optimizer"])
data["optimizer"] = optimizer
lr_scheduler = default.lr_scheduler
lr_scheduler.load_state_dict(data["lr_scheduler"])
data["lr_scheduler"] = lr_scheduler
return cls(**data)
class Trainer:
def __init__(self, train_cfg: NamedTuple, cluster_cfg: ClusterConfig) -> None:
self._train_cfg = train_cfg
self._cluster_cfg = cluster_cfg
def __call__(self) -> Optional[float]:
"""
Called by submitit for each task.
:return: The master task return the final accuracy of the model.
"""
self._setup_process_group()
self._init_state()
final_acc = self._train()
return final_acc
def log(self, log_data: dict):
job_env = submitit.JobEnvironment()
# z = {**vars(self._train_cfg), **log_data}
save_dir = Path(self._train_cfg.output_dir)
os.makedirs(save_dir, exist_ok=True)
with open(save_dir / 'log.txt', 'a') as f:
f.write(json.dumps(log_data) + '\n')
def checkpoint(self, rm_init=True) -> submitit.helpers.DelayedSubmission:
# will be called by submitit in case of preemption
job_env = submitit.JobEnvironment()
save_dir = osp.join(self._train_cfg.output_dir, str(job_env.job_id))
os.makedirs(save_dir, exist_ok=True)
self._state.save(osp.join(save_dir, "checkpoint.pth"))
# Trick here: when the job will be requeue, we will use the same init file
# but it must not exist when we initialize the process group
# so we delete it, but only when this method is called by submitit for requeue
if rm_init and osp.exists(self._cluster_cfg.dist_url[7:]):
os.remove(self._cluster_cfg.dist_url[7:]) # remove file:// at the beginning
# This allow to remove any non-pickable part of the Trainer instance.
empty_trainer = Trainer(self._train_cfg, self._cluster_cfg)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_process_group(self) -> None:
job_env = submitit.JobEnvironment()
torch.cuda.set_device(job_env.local_rank)
torch.distributed.init_process_group(
backend=self._cluster_cfg.dist_backend,
init_method=self._cluster_cfg.dist_url,
world_size=job_env.num_tasks,
rank=job_env.global_rank,
)
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def _init_state(self) -> None:
"""
Initialize the state and load it from an existing checkpoint if any
"""
job_env = submitit.JobEnvironment()
if job_env.global_rank == 0:
# config_path = Path(args.save_folder) / str(job_env.job_id) / 'config.json'
os.makedirs(self._train_cfg.output_dir, exist_ok=True)
config_path = Path(self._train_cfg.output_dir) / 'config.json'
with open(config_path, "w") as g:
g.write(json.dumps(self._train_cfg._asdict()))
print(f"Setting random seed {self._train_cfg.seed}", flush=True)
random.seed(self._train_cfg.seed)
np.random.seed(self._train_cfg.seed)
torch.manual_seed(self._train_cfg.seed)
print("Create data loaders", flush=True)
tokenizer = BertTokenizer.from_pretrained(self._train_cfg.bert_model_name)
collate_fc = sp_collate
train_set = SPDataset(tokenizer, self._train_cfg.train_file, self._train_cfg.max_q_len, self._train_cfg.max_c_len, train=True)
# train_sampler = torch.utils.data.distributed.DistributedSampler(
# train_set, num_replicas=job_env.num_tasks, rank=job_env.global_rank
# )
# self._train_loader = torch.utils.data.DataLoader(
# train_set,
# batch_size=self._train_cfg.train_batch_size,
# num_workers=4,
# sampler=train_sampler, collate_fn=collate_fc
# )
self._train_loader = torch.utils.data.DataLoader(train_set, batch_size=self._train_cfg.train_batch_size, num_workers=4, collate_fn=collate_fc)
test_set = SPDataset(tokenizer, self._train_cfg.predict_file, self._train_cfg.max_q_len, self._train_cfg.max_c_len)
self._test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=self._train_cfg.predict_batch_size,
num_workers=4, collate_fn=collate_fc
)
print(f"Per Node batch_size: {self._train_cfg.train_batch_size // job_env.num_tasks}", flush=True)
print("Create model", flush=True)
print(f"Local rank {job_env.local_rank}", flush=True)
bert_config = BertConfig.from_pretrained(self._train_cfg.bert_model_name)
model = BertForRetrieverSP(bert_config, self._train_cfg)
model.cuda(job_env.local_rank)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self._train_cfg.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_parameters,
lr=self._train_cfg.learning_rate)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5)
if self._train_cfg.fp16:
model, optimizer = amp.initialize(
model, optimizer, opt_level=self._train_cfg.fp16_opt_level)
model = torch.nn.DataParallel(model) #
self._state = TrainerState(
epoch=0, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, global_step=0
)
self.tb_logger = SummaryWriter(os.path.join(self._train_cfg.output_dir, "tblog"))
checkpoint_fn = osp.join(self._train_cfg.output_dir, str(job_env.job_id), "checkpoint.pth")
# checkpoint_fn = osp.join(self._train_cfg.output_dir, "checkpoint.pth")
if os.path.isfile(checkpoint_fn):
print(f"Load existing checkpoint from {checkpoint_fn}", flush=True)
self._state = TrainerState.load(
checkpoint_fn, default=self._state, gpu=job_env.local_rank)
def _train(self) -> Optional[float]:
job_env = submitit.JobEnvironment()
loss_fct = CrossEntropyLoss()
batch_step = 0 # forward batch count
best_mrr = 0
train_loss_meter = AverageMeter()
print(f"Start training", flush=True)
# Start from the loaded epoch
start_epoch = self._state.epoch
global_step = self._state.global_step
for epoch in range(start_epoch, self._train_cfg.num_train_epochs):
print(f"Start epoch {epoch}", flush=True)
self._state.model.train()
self._state.epoch = epoch
for batch in self._train_loader:
batch_step += 1
batch = move_to_cuda(batch)
outputs = self._state.model(batch)
q = outputs['q']
c = outputs['c']
neg_c = outputs['neg_c']
product_in_batch = torch.mm(q, c.t())
product_neg = (q * neg_c).sum(-1).unsqueeze(1)
product = torch.cat([product_in_batch, product_neg], dim=-1)
target = torch.arange(product.size(0)).to(product.device)
loss = loss_fct(product, target)
if self._train_cfg.gradient_accumulation_steps > 1:
loss = loss / self._train_cfg.gradient_accumulation_steps
if self._train_cfg.fp16:
with amp.scale_loss(loss, self._state.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
self.tb_logger.add_scalar('batch_train_loss',
loss.item(), global_step)
self.tb_logger.add_scalar('smoothed_train_loss',
train_loss_meter.avg, global_step)
if (batch_step + 1) % self._train_cfg.gradient_accumulation_steps == 0:
if self._train_cfg.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(self._state.optimizer), self._train_cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
self._state.model.parameters(), self._train_cfg.max_grad_norm)
self._state.optimizer.step() # We have accumulated enought gradients
self._state.model.zero_grad()
global_step += 1
self._state.global_step = global_step
# Checkpoint only on the master
# if job_env.global_rank == 0:
self.checkpoint(rm_init=False)
mrr = self._eval()
self.tb_logger.add_scalar('dev_mrr', mrr*100, epoch)
self._state.lr_scheduler.step(mrr)
if best_mrr < mrr:
print("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" % (best_mrr*100, mrr*100, epoch))
torch.save(self._state.model.state_dict(), os.path.join(self._train_cfg.output_dir, f"checkpoint_best.pt"))
best_mrr = mrr
self.log({
"best_mrr": best_mrr,
"curr_mrr": mrr,
"smoothed_loss": train_loss_meter.avg,
"epoch": epoch
})
return best_mrr
def _eval(self) -> float:
print("Start evaluation of the model", flush=True)
job_env = submitit.JobEnvironment()
args = self._train_cfg
eval_dataloader = self._test_loader
num_correct = 0
num_total = 0.0
rrs = [] # reciprocal rank
self._state.model.eval()
for batch in self._test_loader:
batch_to_feed = move_to_cuda(batch)
with torch.no_grad():
outputs = self._state.model(batch_to_feed)
q = outputs['q']
c = outputs['c']
neg_c = outputs['neg_c']
product_in_batch = torch.mm(q, c.t())
product_neg = (q * neg_c).sum(-1).unsqueeze(1)
product = torch.cat([product_in_batch, product_neg], dim=-1)
target = torch.arange(product.size(0)).to(product.device)
ranked = product.argsort(dim=1, descending=True)
# MRR
idx2rank = ranked.argsort(dim=1)
for idx, t in enumerate(target.tolist()):
rrs.append(1 / (idx2rank[idx][t].item() +1))
prediction = product.argmax(-1)
pred_res = prediction == target
num_total += pred_res.size(0)
num_correct += pred_res.sum(0)
acc = num_correct/num_total
mrr = np.mean(rrs)
print(f"evaluated {num_total} examples...", flush=True)
print(f"avg. Acc: {acc}", flush=True)
print(f'MRR: {mrr}', flush=True)
self._state.model.train()
return mrr
| 13,371 | 41.050314 | 150 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/models/retriever.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
single hop retrieval models
"""
from transformers import AutoModel
import torch.nn as nn
import torch
class BertRetrieverSingle(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.shared_encoder = args.shared_encoder
self.encoder = AutoModel.from_pretrained(args.model_name)
if not self.shared_encoder:
self.encoder_q = AutoModel.from_pretrained(args.model_name)
def forward(self, batch):
c_cls = self.encoder(batch['c_input_ids'], batch['c_mask'], batch['c_type_ids'])[0][:, 0, :]
neg_c_cls = self.encoder(batch['neg_input_ids'], batch['neg_mask'], batch['neg_type_ids'])[0][:, 0, :]
if self.shared_encoder:
q_cls = self.encoder(batch['q_input_ids'], batch['q_mask'], batch['q_type_ids'])[0][:, 0, :]
else:
q_cls = self.encoder_q(batch['q_input_ids'], batch['q_mask'], batch['q_type_ids'])[0][:, 0, :]
return {'q': q_cls, 'c':c_cls, 'neg_c':neg_c_cls}
def encode_q(self, input_ids, q_mask, q_type_ids):
if self.shared_encoder:
return self.encoder(input_ids, q_mask, q_type_ids)[0][:, 0, :]
else:
return self.encoder_q(input_ids, q_mask, q_type_ids)[0][:, 0, :]
class RobertaRetrieverSingle(nn.Module):
"""
shared encoder with roberta-base
"""
def __init__(self,
config,
args
):
super().__init__()
self.encoder = AutoModel.from_pretrained(args.model_name)
self.project = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps))
def encode_seq(self, input_ids, mask):
cls_rep = self.encoder(input_ids, mask)[0][:, 0, :]
vector = self.project(cls_rep)
return vector
def forward(self, batch):
c_cls = self.encode_seq(batch['c_input_ids'], batch['c_mask'])
neg_c_cls = self.encode_seq(batch['neg_input_ids'], batch['neg_mask'])
q_cls = self.encode_seq(batch['q_input_ids'], batch['q_mask'])
return {'q': q_cls, 'c':c_cls, 'neg_c':neg_c_cls}
def encode_q(self, input_ids, q_mask, q_type_ids):
return self.encode_seq(input_ids, q_mask)
class MomentumRetriever(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
# shared encoder for everything
self.encoder = AutoModel.from_pretrained(args.model_name)
self.max_c_len = args.max_c_len
# queue of context token ids
self.k = args.k # queue size
self.register_buffer("queue", torch.zeros(self.k, args.max_c_len*3, dtype=torch.long)) #
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def forward(self, batch):
q_cls = self.encoder(batch['q_input_ids'], batch['q_mask'], batch.get('q_type_ids', None))[0][:, 0, :]
c_cls = self.encoder(batch['c_input_ids'], batch['c_mask'], batch.get('c_type_ids', None))[0][:, 0, :]
neg = self.encoder(batch['neg_input_ids'], batch['neg_mask'], batch.get('neg_type_ids', None))[0][:, 0, :]
return {'q': q_cls, 'c':c_cls, 'neg_c':neg}
def encode_q(self, input_ids, q_mask, q_type_ids):
return self.encoder(input_ids, q_mask, q_type_ids)[0][:, 0, :]
@torch.no_grad()
def encode_queue_ctx(self):
queue = self.queue.clone().detach()
input_ids = queue[:,:self.max_c_len]
input_masks = queue[:,self.max_c_len:2*self.max_c_len]
type_ids = queue[:,self.max_c_len*2:]
queue_c_clss = []
self.encoder.eval()
with torch.no_grad():
for batch_start in range(0, self.k, 100):
queue_c_cls = self.encoder(input_ids[batch_start:batch_start+100], input_masks[batch_start:batch_start+100], type_ids [batch_start:batch_start+100])[0][:, 0, :]
queue_c_clss.append(queue_c_cls)
self.encoder.train()
return torch.cat(queue_c_clss, dim=0)
@torch.no_grad()
def dequeue_and_enqueue(self, batch):
"""
memory bank of previous contexts
"""
# gather keys before updating queue
batch_size = batch["c_input_ids"].shape[0]
ptr = int(self.queue_ptr)
if ptr + batch_size > self.k:
batch_size = self.k - ptr
batch["c_input_ids"] = batch["c_input_ids"][:batch_size]
batch["c_mask"] = batch["c_mask"][:batch_size]
batch["c_type_ids"] = batch["c_type_ids"][:batch_size]
batch_seq_len = batch["c_input_ids"].size(1)
# if self.k % batch_size != 0:
# return
# assert self.k % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[ptr:ptr + batch_size, :batch_seq_len] = batch["c_input_ids"]
self.queue[ptr:ptr + batch_size, self.max_c_len:self.max_c_len+batch_seq_len] = batch["c_mask"]
self.queue[ptr:ptr + batch_size, self.max_c_len*2:self.max_c_len*2+batch_seq_len] = batch["c_type_ids"]
ptr = (ptr + batch_size) % self.k # move pointer
self.queue_ptr[0] = ptr
return
"""
The following are models used to encode the corpus
"""
class CtxEncoder(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder_c = AutoModel.from_pretrained(args.model_name)
self.multi_vector = args.multi_vector
self.scheme = args.scheme
if self.scheme == "layerwise":
self.encoder_c.encoder.output_hidden_states = True
def forward(self, batch):
input_ids, attention_mask, type_ids = batch["input_ids"], batch["input_mask"], batch.get("input_type_ids", None)
if self.multi_vector > 1:
if self.scheme == "layerwise":
c_hiddens =self.encoder(batch['input_ids'], batch['input_mask'], batch.get('input_type_ids', None))[2][::-1]
c_cls = torch.cat([hidden[:,0,:].unsqueeze(1) for hidden in c_hiddens[:self.multi_vector]], dim=1)
elif self.scheme == "tokenwise":
c_cls = self.encoder(batch['input_ids'], batch['input_mask'], batch.get('input_type_ids', None))[0][:, :self.multi_vector, :]
else:
assert False
c_cls = c_cls.view(-1, c_cls.size(-1))
else:
c_cls = self.encoder_c(input_ids, attention_mask, type_ids)[0][:, 0, :]
return {'embed': c_cls}
class RobertaCtxEncoder(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder = AutoModel.from_pretrained(args.model_name)
self.project = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps))
def forward(self, batch):
input_ids, attention_mask = batch["input_ids"], batch["input_mask"]
cls_rep = self.encoder(input_ids, attention_mask)[0][:, 0, :]
vector = self.project(cls_rep)
return {'embed': vector} | 7,436 | 38.142105 | 178 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/models/unified_retriever.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from transformers import AutoModel
import torch.nn as nn
import torch
class UnifiedRetriever(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder_c = AutoModel.from_pretrained(args.model_name)
if "roberta" in args.model_name:
self.roberta = True
self.project = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps))
else:
self.roberta = False
self.stop = nn.Linear(config.hidden_size, 2)
self.stop_drop = nn.Dropout(args.stop_drop)
def encode_seq(self, input_ids, mask, type_ids):
if self.roberta:
cls_rep = self.encoder(input_ids, mask)[0][:, 0, :]
vector = self.project(cls_rep)
else:
vector = self.encoder_c(input_ids, mask, type_ids)[0][:, 0, :]
return vector
def forward(self, batch):
c1 = self.encode_seq(batch['c1_input_ids'], batch['c1_mask'], batch.get('c1_type_ids', None))
c2 = self.encode_seq(batch['c2_input_ids'], batch['c2_mask'], batch.get('c2_type_ids', None))
neg_1 = self.encode_seq(batch['neg1_input_ids'], batch['neg1_mask'], batch.get('neg1_type_ids', None))
neg_2 = self.encode_seq(batch['neg2_input_ids'], batch['neg2_mask'], batch.get('neg2_type_ids', None))
q = self.encode_seq(batch['q_input_ids'], batch['q_mask'], batch.get('q_type_ids', None))
q_sp1 = self.encode_seq(batch['q_sp_input_ids'], batch['q_sp_mask'], batch.get('q_sp_type_ids', None))
qsp_pooled = self.encoder_c(batch['q_sp_input_ids'], batch['q_sp_mask'], batch.get('q_sp_type_ids', None))[1]
stop_logits = self.stop(self.stop_drop(qsp_pooled))
return {'q': q, 'c1': c1, "c2": c2, "neg_1": neg_1, "neg_2": neg_2, "q_sp1": q_sp1, "stop_logits": stop_logits}
def encode_qsp(self, input_ids, q_mask, q_type_ids):
sequence_output, pooled = self.encoder_c(input_ids, q_mask, q_type_ids)[:2]
qsp_vector = sequence_output[:,0,:]
stop_logits = self.stop(pooled)
return qsp_vector, stop_logits
def encode_q(self, input_ids, q_mask, q_type_ids):
return self.encode_seq(input_ids, q_mask, q_type_ids)
class RobertaNQRetriever(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder = AutoModel.from_pretrained(args.model_name)
self.project = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps))
def encode_seq(self, input_ids, mask):
cls_rep = self.encoder(input_ids, mask)[0][:, 0, :]
vector = self.project(cls_rep)
return cls_rep
def forward(self, batch):
c = self.encode_seq(batch['c_input_ids'], batch['c_mask'])
neg = self.encode_seq(batch['neg_input_ids'], batch['neg_mask'])
q = self.encode_seq(batch['q_input_ids'], batch['q_mask'])
q_neg1 = self.encode_seq(batch['q_neg1_input_ids'], batch['q_neg1_mask'])
vectors = {'q': q, 'c': c, "neg": neg, "q_neg1": q_neg1}
return vectors
def encode_q(self, input_ids, q_mask, q_type_ids):
return self.encode_seq(input_ids, q_mask)
class BertNQRetriever(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder = AutoModel.from_pretrained(args.model_name)
def encode_seq(self, input_ids, mask, type_ids):
cls_rep = self.encoder(input_ids, mask, type_ids)[0][:, 0, :]
return cls_rep
def forward(self, batch):
c = self.encode_seq(batch['c_input_ids'], batch['c_mask'], batch.get('c_type_ids', None))
neg = self.encode_seq(batch['neg_input_ids'], batch['neg_mask'], batch.get('neg_type_ids', None))
q = self.encode_seq(batch['q_input_ids'], batch['q_mask'], batch.get('q_type_ids', None))
q_neg1 = self.encode_seq(batch['q_neg1_input_ids'], batch['q_neg1_mask'], batch.get('q_neg1_type_ids', None))
neg_dense1 = self.encode_seq(batch['dense_neg1_input_ids'], batch['dense_neg1_mask'], batch.get('dense_neg1_type_ids', None))
neg_dense2 = self.encode_seq(batch['dense_neg2_input_ids'], batch['dense_neg2_mask'], batch.get('dense_neg2_type_ids', None))
vectors = {'q': q, 'c': c, "neg": neg, "q_neg1": q_neg1, "dense_neg1": neg_dense1, "dense_neg2": neg_dense2}
return vectors
def encode_q(self, input_ids, q_mask, q_type_ids):
return self.encode_seq(input_ids, q_mask, q_type_ids)
class BertNQMomentumRetriever(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder_q = BertNQRetriever(config, args)
self.encoder_k = BertNQRetriever(config, args)
if args.init_retriever != "":
print(f"Load pretrained retriever from {args.init_retriever}")
self.load_retriever(args.init_retriever)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
self.k = args.k
self.m = args.m
self.register_buffer("queue", torch.randn(self.k, config.hidden_size))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def load_retriever(self, path):
state_dict = torch.load(path)
def filter(x): return x[7:] if x.startswith('module.') else x
state_dict = {filter(k): v for (k, v) in state_dict.items() if filter(k) in self.encoder_q.state_dict()}
self.encoder_q.load_state_dict(state_dict)
return
@torch.no_grad()
def momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def dequeue_and_enqueue(self, embeddings):
"""
memory bank of previous context embeddings, c1 and c2
"""
# gather keys before updating queue
batch_size = embeddings.shape[0]
ptr = int(self.queue_ptr)
if ptr + batch_size > self.k:
batch_size = self.k - ptr
embeddings = embeddings[:batch_size]
# replace the keys at ptr (dequeue and enqueue)
self.queue[ptr:ptr + batch_size, :] = embeddings
ptr = (ptr + batch_size) % self.k # move pointer
self.queue_ptr[0] = ptr
return
def forward(self, batch):
q = self.encoder_q.encode_seq(batch['q_input_ids'], batch['q_mask'], batch.get('q_type_ids', None))
q_neg1 = self.encoder_q.encode_seq(batch['q_neg1_input_ids'], batch['q_neg1_mask'], batch.get('q_neg1_type_ids', None))
if self.training:
with torch.no_grad():
c = self.encoder_k.encode_seq(batch['c_input_ids'], batch['c_mask'], batch.get('c_type_ids', None))
neg = self.encoder_k.encode_seq(batch['neg_input_ids'], batch['neg_mask'], batch.get('neg_type_ids', None))
else:
# whether to use the momentum encoder for inference
c = self.encoder_k.encode_seq(batch['c_input_ids'], batch['c_mask'], batch.get('c_type_ids', None))
neg = self.encoder_k.encode_seq(batch['neg_input_ids'], batch['neg_mask'], batch.get('neg_type_ids', None))
vectors = {'q': q, 'c': c, "neg": neg, "q_neg1": q_neg1}
return vectors
| 8,004 | 41.807487 | 152 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/models/hop1_retriever.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from transformers import BertModel, BertConfig, BertPreTrainedModel
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
from torch.nn import CrossEntropyLoss
class Retriever1hop(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.bert_q = BertModel.from_pretrained(args.bert_model_name)
self.bert_c = BertModel.from_pretrained(args.bert_model_name)
self.hidden_size = config.hidden_size
def forward(self, batch):
# representations
q_hidden_states = self.bert_q(batch['q_input_ids'], batch['q_mask'], batch['q_type_ids'])[0]
q_cls = q_hidden_states[:,0,:]
c_hidden_states = self.bert_c(batch['c_input_ids'], batch['c_mask'], batch['c_type_ids'])[0]
c_cls = c_hidden_states[:, 0, :]
neg_c_cls = self.bert_c(batch['neg_input_ids'], batch['neg_mask'], batch['neg_type_ids'])[0][:, 0, :]
# sentence-level representations
gather_index = batch["c_sent_offsets"].unsqueeze(2).expand(-1,-1,self.hidden_size) # B x |S| x h
c_sent_rep = torch.gather(c_hidden_states, 1, gather_index)
outputs = {'q': q_cls, 'c':c_cls, "neg_c": neg_c_cls, "c_sent_rep": c_sent_rep}
return outputs
| 1,496 | 35.512195 | 109 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/models/mhop_retriever.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch import embedding
from transformers import AutoModel
import torch.nn as nn
import torch
class RobertaRetriever(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder = AutoModel.from_pretrained(args.model_name)
self.project = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps))
def encode_seq(self, input_ids, mask):
cls_rep = self.encoder(input_ids, mask)[0][:, 0, :]
vector = self.project(cls_rep)
return vector
def forward(self, batch):
c1 = self.encode_seq(batch['c1_input_ids'], batch['c1_mask'])
c2 = self.encode_seq(batch['c2_input_ids'], batch['c2_mask'])
neg_1 = self.encode_seq(batch['neg1_input_ids'], batch['neg1_mask'])
neg_2 = self.encode_seq(batch['neg2_input_ids'], batch['neg2_mask'])
q = self.encode_seq(batch['q_input_ids'], batch['q_mask'])
q_sp1 = self.encode_seq(batch['q_sp_input_ids'], batch['q_sp_mask'])
vectors = {'q': q, 'c1': c1, "c2": c2, "neg_1": neg_1, "neg_2": neg_2, "q_sp1": q_sp1}
return vectors
def encode_q(self, input_ids, q_mask, q_type_ids):
return self.encode_seq(input_ids, q_mask)
class RobertaMomentumRetriever(nn.Module):
def __init__(self,
config,
args
):
super().__init__()
self.encoder_q = RobertaRetriever(config, args)
self.encoder_k = RobertaRetriever(config, args)
if args.init_retriever != "":
print(f"Load pretrained retriever from {args.init_retriever}")
self.load_retriever(args.init_retriever)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
self.k = args.k
self.m = args.m
self.register_buffer("queue", torch.randn(self.k, config.hidden_size))
# add layernorm?
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def load_retriever(self, path):
state_dict = torch.load(path)
def filter(x): return x[7:] if x.startswith('module.') else x
state_dict = {filter(k): v for (k, v) in state_dict.items() if filter(k) in self.encoder_q.state_dict()}
self.encoder_q.load_state_dict(state_dict)
return
@torch.no_grad()
def momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def dequeue_and_enqueue(self, embeddings):
"""
memory bank of previous context embeddings, c1 and c2
"""
# gather keys before updating queue
batch_size = embeddings.shape[0]
ptr = int(self.queue_ptr)
if ptr + batch_size > self.k:
batch_size = self.k - ptr
embeddings = embeddings[:batch_size]
# if self.k % batch_size != 0:
# return
# assert self.k % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[ptr:ptr + batch_size, :] = embeddings
ptr = (ptr + batch_size) % self.k # move pointer
self.queue_ptr[0] = ptr
return
def forward(self, batch):
q = self.encoder_q.encode_seq(batch['q_input_ids'], batch['q_mask'])
q_sp1 = self.encoder_q.encode_seq(batch['q_sp_input_ids'], batch['q_sp_mask'])
if self.training:
with torch.no_grad():
c1 = self.encoder_k.encode_seq(batch['c1_input_ids'], batch['c1_mask'])
c2 = self.encoder_k.encode_seq(batch['c2_input_ids'], batch['c2_mask'])
neg_1 = self.encoder_k.encode_seq(batch['neg1_input_ids'], batch['neg1_mask'])
neg_2 = self.encoder_k.encode_seq(batch['neg2_input_ids'], batch['neg2_mask'])
else:
# whether to use the momentum encoder for inference
c1 = self.encoder_k.encode_seq(batch['c1_input_ids'], batch['c1_mask'])
c2 = self.encoder_k.encode_seq(batch['c2_input_ids'], batch['c2_mask'])
neg_1 = self.encoder_k.encode_seq(batch['neg1_input_ids'], batch['neg1_mask'])
neg_2 = self.encoder_k.encode_seq(batch['neg2_input_ids'], batch['neg2_mask'])
vectors = {'q': q, 'c1': c1, "c2": c2, "neg_1": neg_1, "neg_2": neg_2, "q_sp1": q_sp1}
return vectors
| 4,975 | 36.69697 | 148 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/utils/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sqlite3
import unicodedata
def load_saved(model, path, exact=True):
try:
state_dict = torch.load(path)
except:
state_dict = torch.load(path, map_location=torch.device('cpu'))
def filter(x): return x[7:] if x.startswith('module.') else x
if exact:
state_dict = {filter(k): v for (k, v) in state_dict.items()}
else:
state_dict = {filter(k): v for (k, v) in state_dict.items() if filter(k) in model.state_dict()}
model.load_state_dict(state_dict)
return model
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
def convert_to_half(sample):
if len(sample) == 0:
return {}
def _convert_to_half(maybe_floatTensor):
if torch.is_tensor(maybe_floatTensor) and maybe_floatTensor.type() == "torch.FloatTensor":
return maybe_floatTensor.half()
elif isinstance(maybe_floatTensor, dict):
return {
key: _convert_to_half(value)
for key, value in maybe_floatTensor.items()
}
elif isinstance(maybe_floatTensor, list):
return [_convert_to_half(x) for x in maybe_floatTensor]
else:
return maybe_floatTensor
return _convert_to_half(sample)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
class DocDB(object):
"""Sqlite backed document storage.
Implements get_doc_text(doc_id).
"""
def __init__(self, db_path=None):
self.path = db_path
self.connection = sqlite3.connect(self.path, check_same_thread=False)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""Close the connection to the database."""
self.connection.close()
def get_doc_ids(self):
"""Fetch all ids of docs stored in the db."""
cursor = self.connection.cursor()
cursor.execute("SELECT id FROM documents")
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results
def get_doc_text(self, doc_id):
"""Fetch the raw text of the doc for 'doc_id'."""
cursor = self.connection.cursor()
cursor.execute(
"SELECT text FROM documents WHERE id = ?",
(normalize(doc_id),)
)
result = cursor.fetchone()
cursor.close()
return result if result is None else result[0]
def para_has_answer(answer, para, tokenizer):
assert isinstance(answer, list)
text = normalize(para)
tokens = tokenizer.tokenize(text)
text = tokens.words(uncased=True)
assert len(text) == len(tokens)
for single_answer in answer:
single_answer = normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
def complex_ans_recall():
"""
calculate retrieval metrics for complexwebQ
"""
import json
import numpy as np
from basic_tokenizer import SimpleTokenizer
tok = SimpleTokenizer()
predictions = json.load(open("/private/home/xwhan/code/learning_to_retrieve_reasoning_paths/results/complexwebq_retrieval_res.json"))
raw_dev = [json.loads(l) for l in open("/private/home/xwhan/data/ComplexWebQ/complexwebq_dev_qas.txt").readlines()]
id2qas = {_["id"]:_ for _ in raw_dev}
assert len(predictions) == len(raw_dev)
answer_recalls = []
for item in predictions:
qid = item["q_id"]
title2passage = item["context"]
gold_answers = id2qas[qid]["answer"]
chain_coverage = []
for chain in item["topk_titles"]:
chain_text = " ".join([title2passage[_] for _ in chain])
chain_coverage.append(para_has_answer(gold_answers, chain_text, tok))
answer_recalls.append(np.sum(chain_coverage) > 0)
print(len(answer_recalls))
print(np.mean(answer_recalls))
if __name__ == "__main__":
complex_ans_recall() | 5,278 | 29.871345 | 137 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/data/unified_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset, Sampler
import torch
import json
import random
from .data_utils import collate_tokens
class UnifiedDataset(Dataset):
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_q_sp_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.max_q_sp_len = max_q_sp_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
if train:
self.data = [_ for _ in self.data if len(_["neg_paras"]) >= 2]
print(f"Total sample count {len(self.data)}")
def encode_para(self, para, max_len):
para_text = para["text"].strip()
# NQ passages do not end with periods
if para_text.endswith("."):
para_text = para_text[:-1]
return self.tokenizer.encode_plus(para["title"].strip(), text_pair=para_text, max_length=max_len, return_tensors="pt")
def __getitem__(self, index):
sample = self.data[index]
question = sample['question']
if question.endswith("?"):
question = question[:-1]
mhop = True
if sample["type"] == "comparison":
random.shuffle(sample["pos_paras"])
start_para, bridge_para = sample["pos_paras"]
elif sample["type"] == "bridge":
for para in sample["pos_paras"]:
if para["title"] != sample["bridge"]:
start_para = para
else:
bridge_para = para
elif sample["type"] == "single":
mhop = False
assert len(sample["pos_paras"]) == 1
start_para = sample["pos_paras"][0]
if len(sample["neg_paras"]) > 0:
bridge_para = random.choice(sample["neg_paras"]) # not used as positive
else:
bridge_para = {"title": "dummy", "text": "dummy"}
else:
assert False
if self.train:
random.shuffle(sample["neg_paras"])
start_para_codes = self.encode_para(start_para, self.max_c_len)
bridge_para_codes = self.encode_para(bridge_para, self.max_c_len)
if len(sample["neg_paras"]) >= 2:
neg_codes_1 = self.encode_para(sample["neg_paras"][0], self.max_c_len)
neg_codes_2 = self.encode_para(sample["neg_paras"][1], self.max_c_len)
else:
if not sample["neg_paras"]:
neg_codes_1 = self.encode_para({"title": "dummy", "text": "dummy"}, self.max_c_len)
else:
neg_codes_1 = self.encode_para(sample["neg_paras"][0], self.max_c_len)
neg_codes_2 = self.encode_para({"title": "dummy", "text": "dummy"}, self.max_c_len)
q_sp_codes = self.tokenizer.encode_plus(question, text_pair=start_para["text"].strip(), max_length=self.max_q_sp_len, return_tensors="pt")
q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
return {
"q_codes": q_codes,
"q_sp_codes": q_sp_codes,
"start_para_codes": start_para_codes,
"bridge_para_codes": bridge_para_codes,
"neg_codes_1": neg_codes_1,
"neg_codes_2": neg_codes_2,
"stop": torch.LongTensor([int(mhop)]) # 0 to stop
}
def __len__(self):
return len(self.data)
import unicodedata
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
class FeverUnifiedDataset(Dataset):
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_q_sp_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.max_q_sp_len = max_q_sp_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
self.single_ids = [idx for idx, _ in enumerate(self.data) if len(_["correct_normalized"]) == 1]
self.multi_ids = [idx for idx, _ in enumerate(self.data) if len(_["correct_normalized"]) > 1]
print(f"Total sample count {len(self.data)}")
print(f"Total single-evidence count {len(self.single_ids)}")
print(f"Total multi-evidence count {len(self.multi_ids)}")
def encode_para(self, para, max_len):
para["title"] = normalize(para["title"])
return self.tokenizer.encode_plus(para["title"].strip(), text_pair=para["text"].strip(), max_length=max_len, return_tensors="pt")
def __getitem__(self, index):
sample = self.data[index]
question = sample['claim']
mhop = len(sample["correct_normalized"]) > 1
if mhop:
neg_paras = sample["tfidf_neg"] + sample["linked_neg"]
evidence_multi = [e for e in sample["evidence"] if len(set([p["title"] for p in e])) > 1]
if self.train:
random.shuffle(neg_paras)
random.shuffle(evidence_multi)
start_para, bridge_para = evidence_multi[0][0], evidence_multi[0][1]
else:
neg_paras = sample["tfidf_neg"] + sample["linked_neg"]
evidence = sample["evidence"]
if self.train:
random.shuffle(neg_paras)
random.shuffle(evidence)
start_para = evidence[0][0]
if len(neg_paras) == 0:
neg_paras.append({"title": "dummy", "text": "dummy"})
bridge_para = random.choice(neg_paras) # not used for training
start_para_codes = self.encode_para(start_para, self.max_c_len)
bridge_para_codes = self.encode_para(bridge_para, self.max_c_len)
if len(neg_paras) >= 2:
neg_codes_1 = self.encode_para(neg_paras[0], self.max_c_len)
neg_codes_2 = self.encode_para(neg_paras[1], self.max_c_len)
else:
if not neg_paras:
neg_codes_1 = self.encode_para({"title": "dummy", "text": "dummy"}, self.max_c_len)
else:
neg_codes_1 = self.encode_para(neg_paras[0], self.max_c_len)
neg_codes_2 = self.encode_para({"title": "dummy", "text": "dummy"}, self.max_c_len)
q_sp_codes = self.tokenizer.encode_plus(question, text_pair=start_para["text"].strip(), max_length=self.max_q_sp_len, return_tensors="pt")
q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
return {
"q_codes": q_codes,
"q_sp_codes": q_sp_codes,
"start_para_codes": start_para_codes,
"bridge_para_codes": bridge_para_codes,
"neg_codes_1": neg_codes_1,
"neg_codes_2": neg_codes_2,
"stop": torch.LongTensor([int(mhop)]) # 0 to stop
}
def __len__(self):
return len(self.data)
class FeverSampler(Sampler):
"""
avoid the retrieval model to bias towards single-evidence claims
the ratio for single/multi evidence
"""
def __init__(self, data_source, ratio=1):
# for each QA pair, sample negative paragraphs
self.single_ids = data_source.single_ids
self.multi_ids = data_source.multi_ids
self.ratio = ratio
self._num_samples = len(self.multi_ids) * (ratio + 1)
def __len__(self):
return self._num_samples
def __iter__(self):
random.shuffle(self.single_ids)
sample_indice = self.multi_ids + self.single_ids[:len(self.multi_ids) * self.ratio]
random.shuffle(sample_indice)
return iter(sample_indice)
def unified_collate(samples, pad_id=0):
if len(samples) == 0:
return {}
batch = {
'q_input_ids': collate_tokens([s["q_codes"]["input_ids"].view(-1) for s in samples], pad_id),
'q_mask':collate_tokens([s["q_codes"]["attention_mask"].view(-1) for s in samples], 0),
'q_sp_input_ids': collate_tokens([s["q_sp_codes"]["input_ids"].view(-1) for s in samples], 0),
'q_sp_mask':collate_tokens([s["q_sp_codes"]["attention_mask"].view(-1) for s in samples], 0),
'c1_input_ids': collate_tokens([s["start_para_codes"]["input_ids"] for s in samples], 0),
'c1_mask': collate_tokens([s["start_para_codes"]["attention_mask"] for s in samples], 0),
'c2_input_ids': collate_tokens([s["bridge_para_codes"]["input_ids"] for s in samples], 0),
'c2_mask': collate_tokens([s["bridge_para_codes"]["attention_mask"] for s in samples], 0),
'neg1_input_ids': collate_tokens([s["neg_codes_1"]["input_ids"] for s in samples], 0),
'neg1_mask': collate_tokens([s["neg_codes_1"]["attention_mask"] for s in samples], 0),
'neg2_input_ids': collate_tokens([s["neg_codes_2"]["input_ids"] for s in samples], 0),
'neg2_mask': collate_tokens([s["neg_codes_2"]["attention_mask"] for s in samples], 0),
'stop_targets': collate_tokens([s["stop"] for s in samples], 0)
}
if "token_type_ids" in samples[0]["q_codes"]:
batch.update({
'q_type_ids': collate_tokens([s["q_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'c1_type_ids': collate_tokens([s["start_para_codes"]["token_type_ids"] for s in samples], 0),
'c2_type_ids': collate_tokens([s["bridge_para_codes"]["token_type_ids"] for s in samples], 0),
"q_sp_type_ids": collate_tokens([s["q_sp_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'neg1_type_ids': collate_tokens([s["neg_codes_1"]["token_type_ids"] for s in samples], 0),
'neg2_type_ids': collate_tokens([s["neg_codes_2"]["token_type_ids"] for s in samples], 0),
})
return batch
class NQUnifiedDataset(Dataset):
"""
For each question, define two training targets
1. Q -> P_pos
2. (Q, P_neg1) -> P_pos
"""
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_q_sp_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.max_q_sp_len = max_q_sp_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
# if train:
self.data = [_ for _ in self.data if len(_["dpr_neg"]) > 0 and len(_["top_neg"]) > 1]
print(f"Total sample count {len(self.data)}")
def encode_para(self, para, max_len):
para_text = para["text"].strip()
if para_text == "":
para_text = para["title"].strip()
return self.tokenizer.encode_plus(para["title"].strip(), text_pair=para_text, max_length=max_len, return_tensors="pt")
def encode_q(self, q):
q_toks = self.tokenizer.tokenize(q)
q_toks = ['[unused0]'] + q_toks
return self.tokenizer.encode_plus(q_toks, max_length=self.max_q_len, return_tensors="pt", is_pretokenized=True)
def encode_q_neg(self, q, neg):
neg_para_toks = self.tokenizer.tokenize(neg["title"].strip() + " [SEP] " + neg["text"].strip())
q_toks = ['[unused1]'] + self.tokenizer.tokenize(q)
return self.tokenizer.encode_plus(q_toks, text_pair=neg_para_toks, max_length=self.max_q_sp_len, return_tensors="pt", is_pretokenized=True)
def __getitem__(self, index):
sample = self.data[index]
question = sample['question']
if question.endswith("?"):
question = question[:-1]
# assert len(sample["pos_paras"]) == 1
if self.train:
random.shuffle(sample["top_neg"])
random.shuffle(sample["dpr_neg"])
p_neg = sample["dpr_neg"][0]
dense_neg1, dense_neg2 = sample["top_neg"][:2]
# p_neg1, p_neg2 = sample["dpr_neg"][:2]
else:
p_neg = sample["dpr_neg"][0] if len(sample["dpr_neg"]) > 0 else {"title": "dummy", "text": "dummy"}
dense_neg1, dense_neg2 = sample["top_neg"][:2]
# p_neg2 = sample["dpr_neg"][1] if len(sample["dpr_neg"]) > 1 else {"title": "dummy", "text": "dummy"}
# pos_para = sample["pos_paras"][0]
if self.train:
pos_para = random.choice(sample["pos_paras"])
else:
pos_para = sample["pos_paras"][0]
# q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
q_codes = self.encode_q(question)
q_neg1_codes = self.encode_q_neg(question, dense_neg1)
# q_neg1_codes = self.tokenizer.encode_plus(question, text_pair=dense_neg1["title"] + " [SEP] " + dense_neg1["text"].strip(), max_length=self.max_q_sp_len, return_tensors="pt")
neg_codes = self.encode_para(p_neg, self.max_c_len)
pos_codes = self.encode_para(pos_para, self.max_c_len)
dense_neg1_codes = self.encode_para(dense_neg1, self.max_c_len)
dense_neg2_codes = self.encode_para(dense_neg2, self.max_c_len)
return {
"q_codes": q_codes,
"q_neg1_codes": q_neg1_codes,
"neg_codes": neg_codes,
"dense_neg1_codes": dense_neg1_codes,
"dense_neg2_codes": dense_neg2_codes,
"pos_codes": pos_codes
}
def __len__(self):
return len(self.data)
def nq_unified_collate(samples, pad_id=0):
if len(samples) == 0:
return {}
batch = {
'q_input_ids': collate_tokens([s["q_codes"]["input_ids"].view(-1) for s in samples], pad_id),
'q_mask':collate_tokens([s["q_codes"]["attention_mask"].view(-1) for s in samples], 0),
'q_neg1_input_ids': collate_tokens([s["q_neg1_codes"]["input_ids"].view(-1) for s in samples], 0),
'q_neg1_mask':collate_tokens([s["q_neg1_codes"]["attention_mask"].view(-1) for s in samples], 0),
'c_input_ids': collate_tokens([s["pos_codes"]["input_ids"] for s in samples], 0),
'c_mask': collate_tokens([s["pos_codes"]["attention_mask"] for s in samples], 0),
'neg_input_ids': collate_tokens([s["neg_codes"]["input_ids"] for s in samples], 0),
'neg_mask': collate_tokens([s["neg_codes"]["attention_mask"] for s in samples], 0),
'dense_neg1_input_ids': collate_tokens([s["dense_neg1_codes"]["input_ids"] for s in samples], 0),
'dense_neg1_mask': collate_tokens([s["dense_neg1_codes"]["attention_mask"] for s in samples], 0),
'dense_neg2_input_ids': collate_tokens([s["dense_neg2_codes"]["input_ids"] for s in samples], 0),
'dense_neg2_mask': collate_tokens([s["dense_neg2_codes"]["attention_mask"] for s in samples], 0),
}
if "token_type_ids" in samples[0]["q_codes"]:
batch.update({
'q_type_ids': collate_tokens([s["q_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'c_type_ids': collate_tokens([s["pos_codes"]["token_type_ids"] for s in samples], 0),
"q_neg1_type_ids": collate_tokens([s["q_neg1_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'neg_type_ids': collate_tokens([s["neg_codes"]["token_type_ids"] for s in samples], 0),
'dense_neg1_type_ids': collate_tokens([s["dense_neg1_codes"]["token_type_ids"] for s in samples], 0),
'dense_neg2_type_ids': collate_tokens([s["dense_neg2_codes"]["token_type_ids"] for s in samples], 0),
})
return batch
| 16,160 | 41.528947 | 184 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/data/encode_datasets.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import csv
import json
import pdb
import numpy as np
from torch.utils.data import Dataset
from tqdm import tqdm
import codecs
from .data_utils import collate_tokens
import unicodedata
import re
import os
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def convert_brc(string):
string = re.sub('-LRB-', '(', string)
string = re.sub('-RRB-', ')', string)
string = re.sub('-LSB-', '[', string)
string = re.sub('-RSB-', ']', string)
string = re.sub('-LCB-', '{', string)
string = re.sub('-RCB-', '}', string)
string = re.sub('-COLON-', ':', string)
return string
class EmDataset(Dataset):
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_c_len,
is_query_embed,
save_path
):
super().__init__()
self.is_query_embed = is_query_embed
self.tokenizer = tokenizer
self.max_c_len = max_c_len
if not os.path.exists(save_path):
os.mkdir(save_path)
save_path = os.path.join(save_path, "id2doc.json") # ID to doc mapping
print(f"Loading data from {data_path}")
if self.is_query_embed:
self.data = [json.loads(_.strip())
for _ in tqdm(open(data_path).readlines())]
else:
if data_path.endswith("tsv"):
self.data = []
with open(data_path) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t', )
for row in reader:
if row[0] != 'id':
id_, text, title = row[0], row[1], row[2]
self.data.append({"id": id_, "text": text, "title": title})
elif "fever" in data_path:
raw_data = [json.loads(l) for l in tqdm(open(data_path).readlines())]
self.data = []
for _ in raw_data:
# _["title"] = normalize(_["title"])
# _["title"] = convert_brc(_["title"])
# _["text"] = convert_brc(_["text"])
self.data.append(_)
else:
self.data = [json.loads(l) for l in open(data_path).readlines()]
print(f"load {len(self.data)} documents...")
id2doc = {}
for idx, doc in enumerate(self.data):
id2doc[idx] = (doc["title"], doc["text"], doc.get("intro", False))
with open(save_path, "w") as g:
json.dump(id2doc, g)
self.max_len = max_q_len if is_query_embed else max_c_len
print(f"Max sequence length: {self.max_len}")
def __getitem__(self, index):
sample = self.data[index]
if "Roberta" in self.tokenizer.__class__.__name__ and sample["text"].strip() == "":
print(f"empty doc title: {sample['title']}")
sample["text"] = sample["title"]
# if sample["text"].endswith("."):
# sample["text"] = sample["text"][:-1]
sent_codes = self.tokenizer.encode_plus(normalize(sample["title"].strip()), text_pair=sample['text'].strip(), max_length=self.max_len, return_tensors="pt")
return sent_codes
def __len__(self):
return len(self.data)
def em_collate(samples):
if len(samples) == 0:
return {}
batch = {
'input_ids': collate_tokens([s['input_ids'].view(-1) for s in samples], 0),
'input_mask': collate_tokens([s['attention_mask'].view(-1) for s in samples], 0),
}
if "token_type_ids" in samples[0]:
batch["input_type_ids"] = collate_tokens([s['token_type_ids'].view(-1) for s in samples], 0)
return batch
| 3,991 | 33.713043 | 163 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/data/mhop_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset
import json
import random
from .data_utils import collate_tokens
class MhopDataset(Dataset):
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_q_sp_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.max_q_sp_len = max_q_sp_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
if train:
import pdb; pdb.set_trace()
# debug TODO: remove for final release
for idx in range(len(self.data)):
self.data[idx]["neg_paras"] = self.data[idx]["tfidf_neg"]
self.data = [_ for _ in self.data if len(_["neg_paras"]) >= 2]
print(f"Total sample count {len(self.data)}")
def encode_para(self, para, max_len):
return self.tokenizer.encode_plus(para["title"].strip(), text_pair=para["text"].strip(), max_length=max_len, return_tensors="pt")
def __getitem__(self, index):
sample = self.data[index]
question = sample['question']
if question.endswith("?"):
question = question[:-1]
if sample["type"] == "comparison":
random.shuffle(sample["pos_paras"])
start_para, bridge_para = sample["pos_paras"]
else:
for para in sample["pos_paras"]:
if para["title"] != sample["bridge"]:
start_para = para
else:
bridge_para = para
if self.train:
random.shuffle(sample["neg_paras"])
start_para_codes = self.encode_para(start_para, self.max_c_len)
bridge_para_codes = self.encode_para(bridge_para, self.max_c_len)
neg_codes_1 = self.encode_para(sample["neg_paras"][0], self.max_c_len)
neg_codes_2 = self.encode_para(sample["neg_paras"][1], self.max_c_len)
q_sp_codes = self.tokenizer.encode_plus(question, text_pair=start_para["text"].strip(), max_length=self.max_q_sp_len, return_tensors="pt")
q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
return {
"q_codes": q_codes,
"q_sp_codes": q_sp_codes,
"start_para_codes": start_para_codes,
"bridge_para_codes": bridge_para_codes,
"neg_codes_1": neg_codes_1,
"neg_codes_2": neg_codes_2,
}
def __len__(self):
return len(self.data)
def mhop_collate(samples, pad_id=0):
if len(samples) == 0:
return {}
batch = {
'q_input_ids': collate_tokens([s["q_codes"]["input_ids"].view(-1) for s in samples], 0),
'q_mask':collate_tokens([s["q_codes"]["attention_mask"].view(-1) for s in samples], 0),
'q_sp_input_ids': collate_tokens([s["q_sp_codes"]["input_ids"].view(-1) for s in samples], 0),
'q_sp_mask':collate_tokens([s["q_sp_codes"]["attention_mask"].view(-1) for s in samples], 0),
'c1_input_ids': collate_tokens([s["start_para_codes"]["input_ids"] for s in samples], 0),
'c1_mask': collate_tokens([s["start_para_codes"]["attention_mask"] for s in samples], 0),
'c2_input_ids': collate_tokens([s["bridge_para_codes"]["input_ids"] for s in samples], 0),
'c2_mask': collate_tokens([s["bridge_para_codes"]["attention_mask"] for s in samples], 0),
'neg1_input_ids': collate_tokens([s["neg_codes_1"]["input_ids"] for s in samples], 0),
'neg1_mask': collate_tokens([s["neg_codes_1"]["attention_mask"] for s in samples], 0),
'neg2_input_ids': collate_tokens([s["neg_codes_2"]["input_ids"] for s in samples], 0),
'neg2_mask': collate_tokens([s["neg_codes_2"]["attention_mask"] for s in samples], 0),
}
if "token_type_ids" in samples[0]["q_codes"]:
batch.update({
'q_type_ids': collate_tokens([s["q_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'c1_type_ids': collate_tokens([s["start_para_codes"]["token_type_ids"] for s in samples], 0),
'c2_type_ids': collate_tokens([s["bridge_para_codes"]["token_type_ids"] for s in samples], 0),
"q_sp_type_ids": collate_tokens([s["q_sp_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'neg1_type_ids': collate_tokens([s["neg_codes_1"]["token_type_ids"] for s in samples], 0),
'neg2_type_ids': collate_tokens([s["neg_codes_2"]["token_type_ids"] for s in samples], 0),
})
if "sent_ids" in samples[0]["start_para_codes"]:
batch["c1_sent_target"] = collate_tokens([s["start_para_codes"]["sent_ids"] for s in samples], -1)
batch["c1_sent_offsets"] = collate_tokens([s["start_para_codes"]["sent_offsets"] for s in samples], 0),
return batch
| 5,278 | 42.270492 | 146 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/data/fever_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch import normal
from torch.utils.data import Dataset
import torch
import json
import random
import unicodedata
import re
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def convert_brc(string):
string = re.sub('-LRB-', '(', string)
string = re.sub('-RRB-', ')', string)
string = re.sub('-LSB-', '[', string)
string = re.sub('-RSB-', ']', string)
string = re.sub('-LCB-', '{', string)
string = re.sub('-RCB-', '}', string)
string = re.sub('-COLON-', ':', string)
return string
class FeverDataset(Dataset):
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_q_sp_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.max_q_sp_len = max_q_sp_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
print(f"Total sample count {len(self.data)}")
def encode_para(self, para, max_len):
para["title"] = normalize(para["title"])
# para["text"] = convert_brc(para["text"])
return self.tokenizer.encode_plus(para["title"].strip(), text_pair=para["text"].strip(), max_length=max_len, return_tensors="pt")
def __getitem__(self, index):
sample = self.data[index]
question = sample["claim"]
evidence_multi = [e for e in sample["evidence"] if len(set([p["title"] for p in e])) > 1]
neg_paras = sample["tfidf_neg"] + sample["linked_neg"]
if self.train:
random.shuffle(evidence_multi)
random.shuffle(neg_paras)
start_para, bridge_para = evidence_multi[0][0], evidence_multi[0][1]
start_para_codes = self.encode_para(start_para, self.max_c_len)
bridge_para_codes = self.encode_para(bridge_para, self.max_c_len)
neg_codes_1 = self.encode_para(neg_paras[0], self.max_c_len)
neg_codes_2 = self.encode_para(neg_paras[1], self.max_c_len)
q_sp_codes = self.tokenizer.encode_plus(question, text_pair=start_para["text"].strip(), max_length=self.max_q_sp_len, return_tensors="pt")
q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
return {
"q_codes": q_codes,
"q_sp_codes": q_sp_codes,
"start_para_codes": start_para_codes,
"bridge_para_codes": bridge_para_codes,
"neg_codes_1": neg_codes_1,
"neg_codes_2": neg_codes_2,
}
def __len__(self):
return len(self.data)
| 2,992 | 33.802326 | 146 | py |
multihop_dense_retrieval | multihop_dense_retrieval-main/mdr/retrieval/data/sp_datasets.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Dataset classes for NQ expeirments
"""
from torch.utils.data import Dataset
import json
import random
from .data_utils import collate_tokens
class SPDataset(Dataset):
"""
strongerly supervised data, following DPR
"""
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
def __getitem__(self, index):
sample = self.data[index]
question = sample['question']
if question.endswith("?"):
question = question[:-1]
if isinstance(sample["pos_paras"], list):
if self.train:
pos_para = random.choice(sample["pos_paras"])
else:
pos_para = sample["pos_paras"][0]
sample["pos_para"] = pos_para
pos_title = sample['pos_para']['title'].strip()
paragraph = sample['pos_para']['text'].strip()
if self.train:
random.shuffle(sample["neg_paras"])
if len(sample["neg_paras"]) == 0:
if self.train:
neg_item = random.choice(self.data)
if "pos_paras" in neg_item:
neg_item["pos_para"] = neg_item["pos_paras"][0]
neg_title = neg_item["pos_para"]["title"].strip()
neg_paragraph = neg_item["pos_para"]["text"].strip()
else:
neg_title = "dummy"
neg_paragraph = "dummy"
else:
neg_title = sample['neg_paras'][0]['title'].strip()
neg_paragraph = sample['neg_paras'][0]['text'].strip()
neg_codes = self.tokenizer.encode_plus(neg_title, text_pair=neg_paragraph, max_length=self.max_c_len, return_tensors="pt")
q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
pos_codes = self.tokenizer.encode_plus(pos_title, text_pair=paragraph, max_length=self.max_c_len, return_tensors="pt")
return {
"q_codes": q_codes,
"pos_codes": pos_codes,
"neg_codes": neg_codes,
}
def __len__(self):
return len(self.data)
import unicodedata
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
class FeverSingleDataset(Dataset):
"""
strongerly supervised data, following DPR
"""
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
def encode_para(self, para, max_len):
para["title"] = normalize(para["title"])
return self.tokenizer.encode_plus(para["title"].strip(), text_pair=para["text"].strip(), max_length=max_len, return_tensors="pt")
def __getitem__(self, index):
sample = self.data[index]
question = sample['claim']
neg_paras = sample["tfidf_neg"] + sample["linked_neg"]
evidence_titles = set()
pos_paras = []
for e in sample["evidence"]:
for p in e:
if p["title"] not in evidence_titles:
pos_paras.append(p)
evidence_titles.add(p["title"])
if self.train:
random.shuffle(neg_paras)
random.shuffle(pos_paras)
pos_para = pos_paras[0]
if len(neg_paras) == 0:
neg_para = {"title": "dummy", "text": "dummy"}
else:
neg_para = neg_paras[0]
neg_codes = self.encode_para(neg_para, self.max_c_len)
q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
pos_codes = self.encode_para(pos_para, self.max_c_len)
return {
"q_codes": q_codes,
"pos_codes": pos_codes,
"neg_codes": neg_codes,
}
def __len__(self):
return len(self.data)
class NQMhopDataset(Dataset):
def __init__(self,
tokenizer,
data_path,
max_q_sp_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_c_len = max_c_len
self.max_q_sp_len = max_q_sp_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
# if train:
self.data = [_ for _ in self.data if len(_["top_neg"]) >= 2]
print(f"Total sample count {len(self.data)}")
def encode_para(self, para, max_len):
return self.tokenizer.encode_plus(para["title"].strip(), text_pair=para["text"].strip(), max_length=max_len, return_tensors="pt")
def encode_q(self, q, max_len, augment=True):
q_toks = self.tokenizer.tokenize(q.strip())
q_toks = q_toks[:max_len-2] # 2 special tokens
if len(q_toks) < max_len - 2 and augment:
# query augmentation
q_toks = q_toks + [self.tokenizer.mask_token] * (max_len - 2 - len(q_toks))
return self.tokenizer.encode_plus(q_toks, max_length=max_len, return_tensors="pt", is_pretokenized=True)
def __getitem__(self, index):
sample = self.data[index]
question = sample['question']
if self.train:
random.shuffle(sample["top_neg"])
error_para = sample["top_neg"][0]
pos_para = sample["pos_paras"][0]
neg_para = sample["top_neg"][1]
if error_para["text"].strip() == "":
error_para["text"] = error_para["title"]
q_codes = self.tokenizer.encode_plus(question, text_pair=error_para["text"].strip(), max_length=self.max_q_sp_len, return_tensors="pt")
if pos_para["text"].strip() == "":
pos_para["text"] = error_para["title"]
pos_codes = self.tokenizer.encode_plus(pos_para["title"].strip(), text_pair=pos_para["text"].strip(), max_length=self.max_c_len, return_tensors="pt")
if neg_para["text"].strip() == "":
neg_para["text"] = neg_para["title"]
neg_codes = self.tokenizer.encode_plus(neg_para["title"].strip(), text_pair=neg_para["text"].strip(), max_length=self.max_c_len, return_tensors="pt")
return {
"q_codes": q_codes,
"pos_codes": pos_codes,
"neg_codes": neg_codes,
}
def __len__(self):
return len(self.data)
def sp_collate(samples, pad_id=0):
if len(samples) == 0:
return {}
batch = {
'q_input_ids': collate_tokens([s["q_codes"]["input_ids"].view(-1) for s in samples], pad_id),
'q_mask':collate_tokens([s["q_codes"]["attention_mask"].view(-1) for s in samples], 0),
'c_input_ids': collate_tokens([s["pos_codes"]["input_ids"].view(-1) for s in samples], pad_id),
'c_mask': collate_tokens([s["pos_codes"]["attention_mask"].view(-1) for s in samples], 0),
'neg_input_ids': collate_tokens([s["neg_codes"]["input_ids"].view(-1) for s in samples], pad_id),
'neg_mask': collate_tokens([s["neg_codes"]["attention_mask"].view(-1) for s in samples], 0),
}
if "token_type_ids" in samples[0]["q_codes"]:
batch.update({
'q_type_ids': collate_tokens([s["q_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'c_type_ids': collate_tokens([s["pos_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'neg_type_ids': collate_tokens([s["neg_codes"]["token_type_ids"].view(-1) for s in samples], 0),
})
return batch
class MHopDataset(Dataset):
"""
strongerly supervised data, following DPR
"""
def __init__(self,
tokenizer,
data_path,
max_q_len,
max_c_len,
train=False,
):
super().__init__()
self.tokenizer = tokenizer
self.max_q_len = max_q_len
self.max_c_len = max_c_len
self.train = train
print(f"Loading data from {data_path}")
self.data = [json.loads(line) for line in open(data_path).readlines()]
if train:
self.data = [_ for _ in self.data if len(_["neg_paras"]) >= 2]
print(f"Total sample count {len(self.data)}")
# q_lens = [len(self.tokenizer.encode(_["question"])) for _ in self.data]
#
# print(f"Max q len {np.max(q_lens)}, mean {np.mean(q_lens)}")
def __getitem__(self, index):
sample = self.data[index]
question = sample['question']
if question.endswith("?"):
question = question[:-1]
if sample["type"] == "bridge":
# make sure bridge is in the second hop
if sample['pos_paras'][0]["title"].strip() == sample["bridge"].strip():
sample["pos_paras"] = sample["pos_paras"][::-1]
if sample["type"] == "comparison":
# if comparison, then the retrieval order does not matter
random.shuffle(sample["pos_paras"])
pos_title_1 = sample['pos_paras'][0]['title'].strip()
paragraph_1 = sample['pos_paras'][0]['text'].strip()
pos_title_2 = sample['pos_paras'][1]['title'].strip()
paragraph_2 = sample['pos_paras'][1]['text'].strip()
if self.train:
random.shuffle(sample["neg_paras"])
# if len(sample["neg_paras"]) == 0:
# if self.train:
# neg_item = random.choice(self.data)
# neg_title = neg_item["pos_paras"][0]["title"].strip()
# neg_paragraph = neg_item["pos_paras"][0]["text"].strip()
# else:
# neg_title = "dummy"
# neg_paragraph = "dummy"
# else:
neg_title_1 = sample['neg_paras'][0]['title'].strip()
neg_paragraph_1 = sample['neg_paras'][0]['text'].strip()
neg_title_2 = sample['neg_paras'][1]['title'].strip()
neg_paragraph_2 = sample['neg_paras'][1]['text'].strip()
# # assert neg_title != pos_title_1 and neg_title != pos_title_2
neg_codes_1 = self.tokenizer.encode_plus(neg_title_1, text_pair=neg_paragraph_1, max_length=self.max_c_len, return_tensors="pt")
neg_codes_2 = self.tokenizer.encode_plus(neg_title_2, text_pair=neg_paragraph_2, max_length=self.max_c_len, return_tensors="pt")
q_codes = self.tokenizer.encode_plus(question, max_length=self.max_q_len, return_tensors="pt")
pos_codes_1 = self.tokenizer.encode_plus(pos_title_1, text_pair=paragraph_1, max_length=self.max_c_len, return_tensors="pt")
pos_codes_2 = self.tokenizer.encode_plus(pos_title_2, text_pair=paragraph_2, max_length=self.max_c_len, return_tensors="pt")
return {
"q_codes": q_codes,
"pos_codes_1": pos_codes_1,
"pos_codes_2": pos_codes_2,
"neg_codes_1": neg_codes_1,
"neg_codes_2": neg_codes_2,
}
def __len__(self):
return len(self.data)
def mhop_collate(samples, pad_id=0):
batch = {
'q_input_ids': collate_tokens([s["q_codes"]["input_ids"].view(-1) for s in samples], pad_id),
'q_mask':collate_tokens([s["q_codes"]["attention_mask"].view(-1) for s in samples], 0),
'c_input_ids_1': collate_tokens([s["pos_codes_1"]["input_ids"].view(-1) for s in samples], pad_id),
'c_mask_1': collate_tokens([s["pos_codes_1"]["attention_mask"].view(-1) for s in samples], 0),
'c_input_ids_2': collate_tokens([s["pos_codes_2"]["input_ids"].view(-1) for s in samples], pad_id),
'c_mask_2': collate_tokens([s["pos_codes_2"]["attention_mask"].view(-1) for s in samples], 0),
'neg_input_ids_1': collate_tokens([s["neg_codes_1"]["input_ids"].view(-1) for s in samples], pad_id),
'neg_mask_1': collate_tokens([s["neg_codes_1"]["attention_mask"].view(-1) for s in samples], 0),
'neg_input_ids_2': collate_tokens([s["neg_codes_2"]["input_ids"].view(-1) for s in samples], pad_id),
'neg_mask_2': collate_tokens([s["neg_codes_2"]["attention_mask"].view(-1) for s in samples], 0),
}
if "token_type_ids" in samples[0]["q_codes"]:
batch.update({
'q_type_ids': collate_tokens([s["q_codes"]["token_type_ids"].view(-1) for s in samples], 0),
'c_type_ids_1': collate_tokens([s["pos_codes_1"]["token_type_ids"].view(-1) for s in samples], 0),
'c_type_ids_2': collate_tokens([s["pos_codes_2"]["token_type_ids"].view(-1) for s in samples], 0),
'neg_type_ids_1': collate_tokens([s["neg_codes_1"]["token_type_ids"].view(-1) for s in samples], 0),
'neg_type_ids_2': collate_tokens([s["neg_codes_2"]["token_type_ids"].view(-1) for s in samples], 0),
})
return batch
| 13,614 | 37.678977 | 157 | py |
flaxformer | flaxformer-main/setup.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""setup.py for Flaxformer."""
import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, "README.md"), encoding="utf-8").read()
except IOError:
README = ""
install_requires = [
"chex>=0.1.4",
"numpy>=1.12",
"jax>=0.2.21",
"flax>=0.5.1",
"aqtp[jax_legacy]>=0.0.10, <=0.1.0",
]
tests_require = [
"absl-py", "pytest", "tensorflow>=2.4.1", "gin-config",
"t5x @ git+https://github.com/google-research/t5x"
]
setup(
name="flaxformer",
version="0.8.2",
description="Flaxformer: Transformer implementations in Flax",
long_description="\n\n".join([README]),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="",
author="Flaxformer team",
author_email="noreply@google.com",
url="https://github.com/google/flaxformer",
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
extras_require={
"testing": tests_require,
},
)
| 1,963 | 29.215385 | 74 | py |
flaxformer | flaxformer-main/flaxformer/activation_partitioning_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for activation_partitioning."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from flax.linen import partitioning as flax_partitioning
from jax import numpy as jnp
from flaxformer import activation_partitioning
class ActivationPartitioningTest(parameterized.TestCase):
def test_with_sharding_migration_dims_unset(self):
x = jnp.array([1, 2, 3])
with mock.patch.object(
flax_partitioning,
"with_sharding_constraint",
autospec=True,
) as mock_wsc:
activation_partitioning.with_sharding_migration(
x=x, activation_partitioning_dims=None, logical_axis_names=("foo",))
mock_wsc.assert_called_once_with(x, ("foo",))
def test_with_sharding_migration_dims_1_axes_calls_new(self):
x = jnp.array([1, 2, 3])
with mock.patch.object(
flax_partitioning, "get_axis_rules", return_value=["rule"]):
with mock.patch.object(
flax_partitioning,
"with_sharding_constraint",
autospec=True,
) as mock_wsc:
activation_partitioning.with_sharding_migration(
x=x, activation_partitioning_dims=1, logical_axis_names=("foo",))
mock_wsc.assert_called_once_with(x, ("foo",))
def test_with_sharding_migration_dims_2_errors(self):
x = jnp.array([1, 2, 3])
with mock.patch.object(
flax_partitioning, "get_axis_rules", return_value=["rule"]):
with self.assertRaisesRegex(ValueError, "rules.*dims.*present"):
activation_partitioning.with_sharding_migration(
x=x, activation_partitioning_dims=2, logical_axis_names=("foo",))
@parameterized.parameters(1, 2)
def test_with_sharding_migration_no_logical_axis_rules(self, dims):
x = jnp.array([1, 2, 3])
with mock.patch.object(
flax_partitioning, "get_axis_rules", return_value=()):
with mock.patch.object(
activation_partitioning,
"with_sharding",
autospec=True,
) as mock_ws:
activation_partitioning.with_sharding_migration(
x=x, activation_partitioning_dims=dims, logical_axis_names=("foo",))
mock_ws.assert_called_once_with(x, dims)
if __name__ == "__main__":
absltest.main()
| 2,816 | 35.115385 | 80 | py |
flaxformer | flaxformer-main/flaxformer/param_conversion_util.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common parameter conversion utilities."""
import collections
import re
from typing import Any, Dict, List, Mapping, Tuple
import jax.numpy as jnp
import tensorflow as tf
def load_tf_params(checkpoint_path: str) -> Dict[str, Any]:
"""Loads TF parameters from the checkpoint at the given path."""
ckpt_reader = tf.train.load_checkpoint(checkpoint_path)
return {
tf_name: ckpt_reader.get_tensor(tf_name)
for tf_name in ckpt_reader.get_variable_to_dtype_map()
}
def convert_tf_params(
tf_params: Dict[str, Any],
mapping: Mapping[Tuple[str, ...], str],
tf_prefix: str = '',
) -> Dict[str, Any]:
"""Given a mapping from TF to Flax names returns a Flax parameters dict.
A utility method that given a TF parameters dictionary, a mapping between the
Flax parameter of depth one (i.e only two keys in the returned dictionary) and
a tf_prefix returns a partial dictionary of the Flax parameters. The method is
used in the rest of the functions to create a final full dictionary of Flax
parameters.
Args:
tf_params: TF BERT model parameters extracted from the checkpoint.
mapping: A parameters mapping between a slice of Flax BERT model dictionary
of parameters and the TF parameters.
tf_prefix: A shared prefix of the TF parameter keys. Will be prepended to
each of TF parameter key in the mapping.
Returns:
Partial parameters for the Flax BERT model.
"""
params = collections.defaultdict(dict)
for jax_key, tf_var in mapping.items():
tf_name = f'{tf_prefix}{tf_var}'
if tf_name in tf_params:
tf_value = tf_params[tf_name]
elif f'{tf_name}:0' in tf_params:
tf_value = tf_params[f'{tf_name}:0']
else:
raise ValueError(f'tf_params does not contain {tf_name!r}')
param_dict = params
for jax_key_part in jax_key[:-1]:
param_dict = param_dict[jax_key_part]
param_dict[jax_key[-1]] = jnp.asarray(tf_value)
return params
def get_int_regex_matches(pattern: str, state: Mapping[str, Any]) -> List[int]:
"""Matches a pattern with an integer capture group against state keys."""
matches = [re.match(pattern, key) for key in state]
return sorted(set(int(m.group(1)) for m in matches if m is not None))
| 2,812 | 35.064103 | 80 | py |
flaxformer | flaxformer-main/flaxformer/testing_utils_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for testing_utils."""
from absl.testing import absltest
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
import jax.numpy as jnp
from flaxformer import testing_utils
class TestingUtilsTest(absltest.TestCase):
def test_format_params_shapes(self):
result = testing_utils.format_params_shapes({"foo[bar]": ["baz", 1, 2, 3]})
self.assertEqual(result, """{
"foo[bar]": ["baz", 1, 2, 3]
}""")
def test_param_dtypes_shapes_axes(self):
params = nn.FrozenDict({
"a": {
"b": jnp.zeros([3, 7], dtype=jnp.float32),
"c": {
"d": jnp.zeros([9], dtype=jnp.float32),
},
},
"b": {
"c": jnp.zeros([3, 7, 4], dtype=jnp.float32),
},
})
params_axes = nn.FrozenDict({
"a": {
"b_axes": nn_partitioning.AxisMetadata(names=("vocab", "embed")),
"c": {
"d_axes": nn_partitioning.AxisMetadata(names=("embed",)),
},
},
"b": {
"c_axes":
nn_partitioning.AxisMetadata(names=("embed", "mlp", "output")),
},
})
result = testing_utils.format_params_shapes(
testing_utils.param_dtypes_shapes_axes(params, params_axes))
self.assertEqual(
result, """{
"a": {
"b": ["float32", "vocab=3", "embed=7"],
"c": {
"d": ["float32", "embed=9"]
}
},
"b": {
"c": ["float32", "embed=3", "mlp=7", "output=4"]
}
}""")
if __name__ == "__main__":
absltest.main()
| 2,132 | 26.346154 | 79 | py |
flaxformer | flaxformer-main/flaxformer/testing_utils.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test-only library."""
import functools
import json
import pathlib
import re
import types
from typing import Any, Dict, Mapping, Optional, Union
from absl.testing import absltest
from flax import linen as nn
from flax import traverse_util
from flax.core import frozen_dict
import jax
import jax.tree_util
from flaxformer.architectures.common import param_remapping
from flaxformer.types import PRNGKey
def param_shapes(params: Mapping[str, Any]) -> Dict[str, Any]:
"""Converts a tree of params into a tree of param shapes."""
params = param_remapping.filter_out_metadata(params)
return jax.tree_map(lambda x: list(x.shape), frozen_dict.unfreeze(params)) # pytype: disable=wrong-arg-types
def param_dtypes_shapes(params: Mapping[str, Any]) -> Dict[str, Any]:
"""Converts a tree of params into a tree of param dtypes and shapes."""
params = param_remapping.filter_out_metadata(params)
return jax.tree_map(lambda x: [str(x.dtype)] + list(x.shape),
frozen_dict.unfreeze(params)) # pytype: disable=wrong-arg-types
def param_dtypes_shapes_axes(params: Mapping[str, Any],
params_axes: Mapping[str, Any]) -> Dict[str, Any]:
"""Construct a tree of param info including dtypes, shapes, and axis names.
The leaf of the constructed dtree are of format [<dtype>, <axis_dim>, ...],
where each <axis_dim> is of format <axis_name>=<dim>.
Args:
params: Model params.
params_axes: Axis annotations, typically under state["params_axes"].
Returns:
A pytree with params info.
"""
params = param_remapping.filter_out_metadata(params)
params_axes = param_remapping.filter_out_metadata(params_axes)
params = frozen_dict.unfreeze(params) # pytype: disable=wrong-arg-types
def remove_axes_suffix(ks):
if not ks[-1].endswith('_axes'):
raise ValueError(
f'Param axes name should end with `_axes`, found {ks[-1]}')
return tuple(ks[:-1]) + (ks[-1][:-len('_axes')],)
params_axes = frozen_dict.unfreeze(params_axes) # pytype: disable=wrong-arg-types
flatten_axes = {
remove_axes_suffix(ks): v
for ks, v in traverse_util.flatten_dict(params_axes).items()
}
params_axes = traverse_util.unflatten_dict(flatten_axes)
def _create_entry(param, param_axes):
output = [str(param.dtype)]
# The param axes should be paired with param dimension, so we check that.
if param.ndim != len(param_axes.names):
raise ValueError('Length of param dimension does not match axes, '
f'{param.shape} != {param_axes.names}.')
for dim, axis_name in zip(param.shape, param_axes.names):
output.append(f'{axis_name}={dim}')
return output
return jax.tree_map(_create_entry, params, params_axes)
def format_params_shapes(params_shapes: Dict[str, Any]) -> str:
"""Formats a dictionary of parameter shapes into a string.
Args:
params_shapes: Dictionary of parameter shapes.
Returns:
String formatted result of those parameter shapes, which is nicely formatted
by using JSON / indentation, but formatting short lists into one line.
"""
# Typically, shape arrays are very verbose, so we want to re-format them to
# fit on a single line. Do so if it wouldn't overflow.
def re_compact_arrays(array_match) -> str:
try:
values = json.loads(array_match.group(0))
except ValueError:
return array_match.group(0)
re_compacted = json.dumps(values) # no indent parameter
return re_compacted if len(re_compacted) < 80 else array_match.group(0)
json_formatted = json.dumps(params_shapes, indent=2)
return re.sub(r'\[[^\[\]]+\]', re_compact_arrays, json_formatted)
def abstract_init(
module: nn.Module,
*,
inputs: Mapping[str, Any],
static_kwargs: Mapping[str, Any] = types.MappingProxyType({}),
rngs: Optional[Union[PRNGKey, Dict[str, PRNGKey]]] = None,
) -> Any:
"""Runs abstract initialization for a Flax module.
Args:
module: Flax module.
inputs: Runtime inputs.
static_kwargs: Static arguments to `module.init`, often `enable_dropout`
currently.
rngs: Optional override random number generators.
Returns:
Pytree with placeholder arrays that have shape and dtype information.
"""
init_fn = functools.partial(module.init, **static_kwargs)
if rngs is None:
rngs = jax.random.PRNGKey(0)
return jax.eval_shape(init_fn, rngs, **inputs)
class ExpectedJsonFiles:
"""Helps check param shapes against JSON files with expected values.
The JSON files with expected shapes contain the parameter pytree, for example,
"mlp": {
"wi": {
"kernel": [13, 2048]
},
"wo": {
"kernel": [2048, 13]
}
},
If the dtype is also included, then it is provided before the shape, e.g.
`"kernel": ["float32", 13, 2048]`.
If the shapes don't match, then the expected shape is printed out. For
intentional changes / regression testing, it can be appropriate to copy this
to the expected shape JSON file.
"""
def __init__(self, base_path: str):
self.path = pathlib.Path(absltest.get_default_test_srcdir()) / base_path
def get_params(self, filename: str) -> Dict[str, Any]:
with open(self.path / filename) as f:
return json.load(f)
def check_params(
self,
actual_params: Mapping[str, Any],
expected_filename: str,
) -> None:
"""Checks parameter dtypes and shapes against expected values."""
actual = param_dtypes_shapes(actual_params)
expected = self.get_params(expected_filename)
if actual != expected:
print(format_params_shapes(actual))
raise AssertionError(
f'Didn\'t match JSON params in {expected_filename}. See actual '
'values above.')
def check_params_and_axes(
self,
actual_params: Mapping[str, Any],
actual_params_axes: Mapping[str, Any],
expected_filename: str,
) -> None:
"""Check parameter dtypes, shapes and axis names against expected values."""
actual = param_dtypes_shapes_axes(actual_params, actual_params_axes)
expected = self.get_params(expected_filename)
if actual != expected:
print(format_params_shapes(actual))
raise AssertionError(
f'Didn\'t match JSON params in {expected_filename}. See actual '
'values above.')
def check_params_shapes_only(
self,
actual_params: Mapping[str, Any],
expected_filename: str,
) -> None:
"""Checks parameter shapes against expected values."""
actual = param_shapes(actual_params)
expected = self.get_params(expected_filename)
if actual != expected:
print('actual:\n', format_params_shapes(actual))
print('expected:\n', format_params_shapes(expected))
raise AssertionError(
f'Didn\'t match JSON params in {expected_filename}. See actual '
'values above.')
| 7,432 | 33.253456 | 111 | py |
flaxformer | flaxformer-main/flaxformer/activation_partitioning.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs to assist with partitioning activations."""
import traceback
from typing import Optional, Tuple, TypeVar
from absl import logging
from flax.linen import partitioning as flax_partitioning
import jax
from jax.experimental.pjit import with_sharding_constraint as jax_pjit_wsc
def global_mesh_defined():
"""Checks if global xmap/pjit mesh resource environment is defined."""
maps_env = jax.experimental.maps.thread_resources.env
return maps_env.physical_mesh.devices.shape != () # pylint: disable=g-explicit-bool-comparison
def with_sharding(x, partitioning_dims: int):
"""Annotate an activation for pjit sharding, no-op on cpu or outside pjit.
These are sharding annotations for the XLA SPMD automatic partitioner (the
system described in https://arxiv.org/abs/2105.04663 and exposed in JAX
through `pjit`, `pmap`, and `xmap`. They are always semantically identity
functions with regard to the program's output, but they create constraints on
the sharding assignments that the partitioner can choose.
Seemingly contradictory code like this:
```
x = foo(x)
x = with_sharding(x, 1)
x = with_sharding(x, 2)
x = bar(x)
```
constrains the partitioner to compute `foo` with 1D (data-parallel) sharding
and `bar` with 2D (data- and model-parallel sharding), and perform resharding
of x in between.
The motivation for adding these annotations inside transformer layers is that
we want to support two different strategies for sharding the activations saved
between the forward and backward passes, in the case where we are using both
data and model parallelism for the model overall (so if you're implementing a
layer that you don't expect to ever be used with model parallelism, you can
stop worrying about any of this).
While some activations will "naturally" be sharded with both data and model
parallelism (i.e., hidden activations inside the MLP and attention activations
that have a heads dimension), others (i.e. the 2-3 activations in each
transformer block that have shape batch by sequence by model) can either be
data-and-model sharded or just data-sharded, and it's very difficult for the
partitioner to make a good choice one way or the other.
Sharding these activations with just data parallelism (`partitioning_dims=1`)
increases the memory used for storing them by a factor of the model-parallel
axis size, and also makes layer norm perform redundant computation between
model-parallel cores. On the other hand, sharding them with both data and
model parallelism (`partitioning_dims=2`) means extra communication to scatter
them from their producer ops and gather them for their consumer ops. This kind
of memory vs. communication tradeoff is currently best left up to the user.
Some heuristics for choosing when to use `with_sharding` and what value of
`partitioning_dims` to use:
* `partitioning_dims=2` is the only way to fit some very largest models.
* `partitioning_dims=2` can substantially increase the training batch size
that fits in memory, so it can sometimes be an alternative to gradient
accumulation.
* On the flip side, using both `partitioning_dims=2` and gradient
accumulation at the same time is discouraged and might currently be buggy.
* `partitioning_dims=1` is essentially always faster for inference/decoding
than `partitioning_dims=2`.
Args:
x: The array to be annotated with pjit sharding constraints.
partitioning_dims: The number of model-parallel shards to create.
Returns:
`x` with the specified pjit constraints.
"""
if jax.devices()[0].platform == 'cpu' or not global_mesh_defined():
return x
else:
if partitioning_dims == 1:
return jax_pjit_wsc(x, jax.sharding.PartitionSpec('data'))
elif partitioning_dims == 2:
if x.ndim == 3:
return jax_pjit_wsc(x,
jax.sharding.PartitionSpec('data', None, 'model')) # pytype: disable=wrong-arg-count,wrong-arg-types
elif x.ndim == 4:
return jax_pjit_wsc(x,
jax.sharding.PartitionSpec('data', None, 'model',
None)) # pytype: disable=wrong-arg-count,wrong-arg-types
else:
raise ValueError(
f'do not know how to partition array of shape {x.shape}')
else:
raise ValueError('only 1D or 2D activation partitioning is supported, '
f'got {partitioning_dims}')
T = TypeVar('T')
def with_sharding_migration(
x: T,
activation_partitioning_dims: Optional[int],
logical_axis_names: Tuple[str, ...],
) -> T:
"""Helper function for migrating from old to new sharding annotations.
Calls to this function were previously `with_sharding(x, dims)` (where the
latter argument is defaulted to 1), and will become
`flax_partitioning.with_sharding_constraint(x, logical_axis_names)`.
Currently, if `activation_partitioning_dims` is unset, then the new logic will
be used (it effectively does not issue a sharding annotation if there are no
logical to physical mapping rules). If it is set, then a warning is issued,
and it is used in all cases except when it is 1, where with standard logical
axis rules, it is equivalent.
Therefore, this function _mostly_ preserves the old semantics, but exercises
the new codepath whenever possible.
Args:
x: Input array.
activation_partitioning_dims: List of activation partitioning dimensions.
logical_axis_names: List of names for each axis in `x`.
Returns:
Version of `x` with sharding annotations attached.
"""
if activation_partitioning_dims is not None:
last_tb = traceback.extract_stack()[-2]
logging.log_first_n(
logging.WARNING,
'In %s:%d, activation_partitioning_dims was set, but it '
'is deprecated and will be removed soon.', 10, last_tb.filename,
last_tb.lineno)
if not flax_partitioning.get_axis_rules():
# If logical axis rules are not present, fall back to old behavior.
return with_sharding(x, activation_partitioning_dims)
else:
if activation_partitioning_dims != 1:
raise ValueError(
'Both logical axis rules and activation_partitioning_dims'
' were present! This can typically be fixed by setting '
'`ACTIVATION_PARTITIONING_DIMS = None` in your configuration so '
'logical axis rules can be used instead.')
else:
return flax_partitioning.with_sharding_constraint(x, logical_axis_names)
else:
return flax_partitioning.with_sharding_constraint(x, logical_axis_names)
| 7,241 | 43.158537 | 129 | py |
flaxformer | flaxformer-main/flaxformer/transformer_common.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Transformer classes."""
import dataclasses
from typing import Any, Callable, Dict, List, Optional
from flax import linen as nn
from flaxformer.architectures.common import param_remapping
from flaxformer.types import Array
class LayerSequence(nn.Module, param_remapping.ParameterRemappable):
"""Common object responsible for holding and applying Transformer layers.
Can be used in encoders/decoders and supports applying a range of sublayers.
Attributes:
num_layers: The number of Transformer layers to create for this encoder.
make_layer: A function that returns a single encoder layer.
extra_modules: Extra modules provided to each make_layer call as kwargs.
"""
num_layers: int
make_layer: Callable[..., Any]
extra_modules: Optional[Dict[str, Optional[nn.Module]]] = None
def setup(self):
kwargs = self.extra_modules if self.extra_modules is not None else {}
self.layers = [self.make_layer(**kwargs) for _ in range(self.num_layers)]
def __call__(self, inputs: Array, *args, **kwargs) -> Array:
"""Applies all Transformer layers to the inputs sequentially.
Args:
inputs: The inputs to the first layer <float>[..., seq_len, hidden_size].
Typically these are the embedded token IDs, combined with embedded
position IDs (or sinusoidal position encodings) and segment IDs.
*args: Positional arguments to be passed to each layer.
**kwargs: Keyword arguments to be passed to each layer.
Returns:
The encoded inputs <float>[..., seq_len, hidden_size].
"""
# Apply all layers and return the output of the last layer.
return self.apply_range_of_layers(0, None, inputs, *args, **kwargs)
def apply_range_of_layers(self, start_idx: int, end_idx: Optional[int],
inputs: Array, *args, **kwargs) -> Array:
"""Passes the inputs to layers [start_idx, end_idx) and returns the output.
Args:
start_idx: The first layer to be applied to the inputs. Numeration starts
from layer zero.
end_idx: The last layer to be applied to the inputs. This layer is
excluded from the interval, i.e. outputs will be returned from layers in
the [start_idx, end_idx) interval. You can set this to None to apply all
layers starting from start_idx.
inputs: The inputs to the first layer. [batch_size..., length, features]
*args: Positional arguments to be passed to each layer.
**kwargs: Keyword arguments to be passed to each layer.
Returns:
The output of the last layer that was applied.
"""
current_activations = inputs
for layer in self.layers[start_idx:end_idx]:
current_activations = layer(current_activations, *args, **kwargs)
return current_activations
@dataclasses.dataclass(frozen=True)
class TransparentLayerSequence:
"""Version of LayerSequence that doesn't add pytree keys.
Normally one should instantiate the layers in a parent module.
Attributes:
layers: List of nn.Modules, which should be owned by a parent Flax module.
"""
layers: List[nn.Module]
def __call__(self, inputs: Array, *args, **kwargs) -> Array:
"""Applies all Transformer layers to the inputs sequentially.
Args:
inputs: The inputs to the first layer <float>[..., seq_len, hidden_size].
Typically these are the embedded token IDs, combined with embedded
position IDs (or sinusoidal position encodings) and segment IDs.
*args: Positional arguments to be passed to each layer.
**kwargs: Keyword arguments to be passed to each layer.
Returns:
The encoded inputs <float>[..., seq_len, hidden_size].
"""
# Apply all layers and return the output of the last layer.
return self.apply_range_of_layers(0, None, inputs, *args, **kwargs)
def apply_range_of_layers(self, start_idx: int, end_idx: Optional[int],
inputs: Array, *args, **kwargs) -> Array:
"""Passes the inputs to layers [start_idx, end_idx) and returns the output.
Args:
start_idx: The first layer to be applied to the inputs. Numeration starts
from layer zero.
end_idx: The last layer to be applied to the inputs. This layer is
excluded from the interval, i.e. outputs will be returned from layers in
the [start_idx, end_idx) interval. You can set this to None to apply all
layers starting from start_idx.
inputs: The inputs to the first layer. [batch_size..., length, features]
*args: Positional arguments to be passed to each layer.
**kwargs: Keyword arguments to be passed to each layer.
Returns:
The output of the last layer that was applied.
"""
current_activations = inputs
for layer in self.layers[start_idx:end_idx]:
current_activations = layer(current_activations, *args, **kwargs) # pytype: disable=not-callable
return current_activations
| 5,500 | 40.674242 | 103 | py |
flaxformer | flaxformer-main/flaxformer/types.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX generic types used as pytype annotations throughout Flaxformer."""
from typing import Callable, Sequence
import jax.numpy as jnp
Array = jnp.ndarray
DType = jnp.dtype
PRNGKey = jnp.ndarray
# TODO: Fix types in flax.linen such that we can use `Tuple[int, ...]`.
Shape = Sequence[int]
Activation = Callable[..., Array]
# Parameter initializers.
Initializer = Callable[[PRNGKey, Shape, DType], Array]
| 986 | 30.83871 | 74 | py |
flaxformer | flaxformer-main/flaxformer/sharding.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs for emitting sharding annotations from Flaxformer."""
import re
from flax import traverse_util
from flax.core import frozen_dict
from flax.linen import partitioning
class AxisNames(tuple):
"""Tuple of string names for each axis, for use outside of jax.jit.
We create a separate class for this so JAX's pytree utilities can distinguish
it from a tuple that should be treated as a pytree, instead treating it as a
leaf.
"""
# TODO: Use t5x.partitioning.AxisNames once this is migrated into t5x.
pass
def axis_names(*names: str) -> partitioning.AxisMetadata:
"""Generates axis name metadata to be sown.
Args:
*names: Names for each parameter axis.
Returns:
partitioning.AxisMetadata metadata struct.
"""
return partitioning.AxisMetadata(names=names)
def reduce_fn(x, y):
"""Reduction function for sow() calls.
Args:
x: Existing value, or () if there was none.
y: New axis names sown.
Returns:
New axis names.
"""
if not isinstance(y, partitioning.AxisMetadata):
raise TypeError(
"Expected newly sown value to be an partitioning.AxisMetadata")
if isinstance(x, partitioning.AxisMetadata):
if x != y:
raise ValueError("If axis names are sown twice, expected them to match. "
f"Got {x} and {y}.")
elif x:
# Shouldn't happen, so raise a fairly internal error.
raise AssertionError(f"Non-initial-or-AxisNames value encountered: {x}")
return y
def _get_single_sowed_value(value) -> AxisNames:
"""Checks that a sown value is as expected.
Args:
value: Pytree leaf node, after calling traverse_util.flatten_dict().
Returns:
AxisNames metadata struct.
Raises:
TypeError: If any objects are of the wrong type.
"""
if not isinstance(value, partitioning.AxisMetadata):
raise TypeError(
"Expected partitioning.AxisMetadata, please make sure to use "
"`reduce_fn`. Got {value}")
return AxisNames(value.names)
def get_axis_names(variables):
"""Gets axis names for variables.
Args:
variables: Flax variables struct, either from `model.init(...)` or
`jax.eval_shape(model.init, ...)`.
Returns:
Struct matching `variables` with sown `AxisNames` as leaves.
"""
variables = frozen_dict.unfreeze(variables) # pytype: disable=wrong-arg-types
flat_param_axes = traverse_util.flatten_dict(variables["params_axes"])
flat_axis_names = {}
for keys, v in flat_param_axes.items():
# Remove '_axes' suffix from axis metadata path to match param tree.
flat_param_key = tuple(re.sub(r"_axes$", "", k) for k in keys)
flat_axis_names[flat_param_key] = _get_single_sowed_value(v)
return traverse_util.unflatten_dict(flat_axis_names)
def check_params_and_axis_names_match(variables):
"""Checks that parameters and axis names match.
This means that every parameter should have axis name metadata associated with
it. It also checks that each parameter dimension has a name.
Args:
variables: Flax variables struct, either from `model.init(...)` or
`jax.eval_shape(model.init, ...)`.
Raises:
ValueError: If axis names don't exist, or don't match the param shape.
"""
variables = frozen_dict.unfreeze(variables)
def _flatten_with_joined_names(xs):
return {"/".join(k): v for k, v in traverse_util.flatten_dict(xs).items()}
flat_params = _flatten_with_joined_names(variables["params"])
flat_axis_names = _flatten_with_joined_names(get_axis_names(variables))
for key, array in flat_params.items():
if key not in flat_axis_names:
raise ValueError(f"Axis names were not sow'd for {key}.")
names = flat_axis_names[key]
if len(array.shape) != len(names):
raise ValueError(
f"For {key}, axis names ({names}) doesn't contain one name "
"for each parameter dimension (shape {array.shape})")
| 4,440 | 30.94964 | 80 | py |
flaxformer | flaxformer-main/flaxformer/sharding_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sharding."""
from absl.testing import absltest
from flax import linen as nn
from flax.core import unfreeze
from flax.linen import partitioning
import jax
from jax import numpy as jnp
from flaxformer import sharding
class BasicModule(nn.Module):
def setup(self):
self.x = self.param("x", nn.initializers.ones, (1, 2))
self.sow(
"params_axes",
"x_axes",
sharding.axis_names("heads", "unmodeled"),
reduce_fn=sharding.reduce_fn)
def __call__(self, z):
return self.x + z
class ShardingTest(absltest.TestCase):
def test_axis_names(self):
self.assertEqual(
sharding.axis_names("embed", "unsharded"),
partitioning.AxisMetadata(names=("embed", "unsharded")))
def test_sowing_reduction(self):
module = BasicModule()
# Check the initial axes annotations.
variables = module.init(jax.random.PRNGKey(0), jnp.array([[6, 7]]))
self.assertDictEqual(
unfreeze(variables["params_axes"]),
{"x_axes": sharding.axis_names("heads", "unmodeled")},
)
# Re-run and make sure that axes are the same.
_, variables = module.apply(variables, jnp.array([[6, 7]]), mutable=True)
self.assertDictEqual(
unfreeze(variables["params_axes"]),
{"x_axes": sharding.axis_names("heads", "unmodeled")},
)
def test_check_params_and_axis_names_match_matches(self):
sharding.check_params_and_axis_names_match(
variables={
"params": {
"foo": {
"bar": jnp.array([1, 2, 3])
}
},
"params_axes": {
"foo": {
"bar_axes": sharding.axis_names("unsharded")
}
},
})
def test_check_params_and_axis_names_missing(self):
with self.assertRaisesRegex(ValueError, ".*not sow.*foo/bar"):
sharding.check_params_and_axis_names_match(variables={
"params": {
"foo": {
"bar": jnp.array([1, 2, 3])
}
},
"params_axes": {},
})
def test_check_params_and_axis_names_wrong_size(self):
with self.assertRaisesRegex(ValueError, ".*foo/bar.*doesn't.*for each"):
sharding.check_params_and_axis_names_match(
variables={
"params": {
"foo": {
"bar": jnp.array([1, 2, 3])
}
},
"params_axes": {
"foo": {
"bar_axes":
sharding.axis_names("unsharded", "model", "vocab")
}
},
})
def test_get_axis_names(self):
variables = {
"params": {
"foo": {
# Make sure the method is not distracted by actual parameters.
"bar": jnp.array([1, 2, 3])
}
},
"params_axes": {
"foo": {
"bar_axes": sharding.axis_names("unsharded", "model", "vocab")
}
},
}
self.assertEqual(
sharding.get_axis_names(variables),
{"foo": {
"bar": sharding.AxisNames(("unsharded", "model", "vocab"))
}})
if __name__ == "__main__":
absltest.main()
| 3,844 | 28.351145 | 78 | py |
flaxformer | flaxformer-main/flaxformer/transformer_common_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flaxformer.bert_model."""
from absl.testing import absltest
from flax import linen as nn
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer import transformer_common as common
class MockDecoderLayer(nn.Module):
"""A test layer that takes two inputs."""
hidden_size: int
@nn.compact
def __call__(self, inputs, encoder_outputs):
return nn.Dense(self.hidden_size)(
jnp.concatenate([inputs, encoder_outputs], -1))
class TransformerCommonTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.rng = random.PRNGKey(0)
def test_encoder_layer_stack_output_shape_ok(self):
"""Tests that an encoder layer stack outputs are of the correct shape."""
batch_size, seq_len, embedding_size, hidden_size = 2, 3, 4, 5
input_shape = (batch_size, seq_len, embedding_size)
inputs = random.uniform(self.rng, input_shape, dtype=jnp.float32)
num_layers = 3
def make_layer():
return nn.Dense(hidden_size)
model = common.LayerSequence(num_layers=num_layers, make_layer=make_layer)
result, _ = model.init_with_output(self.rng, inputs)
expected = (batch_size, seq_len, hidden_size)
self.assertEqual(expected, result.shape)
def test_encoder_layer_stack_applies_layer_range_correctly(self):
"""Tests that an encoder layer stack correctly applies a range of layers."""
batch_size, seq_len, embedding_size, hidden_size = 2, 3, 4, 5
input_shape = (batch_size, seq_len, embedding_size)
inputs = random.uniform(self.rng, input_shape, dtype=jnp.float32)
num_layers = 3
def make_layer():
return nn.Dense(hidden_size)
model = common.LayerSequence(num_layers=num_layers, make_layer=make_layer)
out, variables = model.init_with_output(self.rng, inputs)
partial_out = model.apply(
variables, 0, 1, inputs, method=model.apply_range_of_layers)
full_out = model.apply(
variables, 1, None, partial_out, method=model.apply_range_of_layers)
np.testing.assert_allclose(out, full_out, rtol=1e-5)
def test_decoder_layer_stack_output_shape_ok(self):
"""Tests that a decoder layer stack outputs are of the correct shape."""
batch_size, seq_len, embedding_size, hidden_size = 2, 3, 4, 5
input_shape = (batch_size, seq_len, embedding_size)
encoder_outputs_shape = (batch_size, seq_len, hidden_size)
inputs = random.uniform(self.rng, input_shape, dtype=jnp.float32)
encoder_outputs = random.uniform(
self.rng, encoder_outputs_shape, dtype=jnp.float32)
num_layers = 3
def make_layer():
return MockDecoderLayer(hidden_size)
model = common.LayerSequence(num_layers=num_layers, make_layer=make_layer)
result, _ = model.init_with_output(self.rng, inputs, encoder_outputs)
expected = (batch_size, seq_len, hidden_size)
self.assertEqual(expected, result.shape)
def test_decoder_layer_stack_applies_layer_range_correctly(self):
"""Tests that a decoder layer stack correctly applies a range of layers."""
batch_size, seq_len, embedding_size, hidden_size = 2, 3, 4, 5
input_shape = (batch_size, seq_len, embedding_size)
encoder_outputs_shape = (batch_size, seq_len, hidden_size)
inputs = random.uniform(self.rng, input_shape, dtype=jnp.float32)
encoder_outputs = random.uniform(
self.rng, encoder_outputs_shape, dtype=jnp.float32)
num_layers = 3
def make_layer():
return MockDecoderLayer(hidden_size)
model = common.LayerSequence(num_layers=num_layers, make_layer=make_layer)
out, variables = model.init_with_output(self.rng, inputs, encoder_outputs)
partial_out = model.apply(
variables,
0,
1,
inputs,
encoder_outputs,
method=model.apply_range_of_layers)
full_out = model.apply(
variables,
1,
None,
partial_out,
encoder_outputs,
method=model.apply_range_of_layers)
np.testing.assert_allclose(out, full_out, rtol=1e-5)
class TransparentLayerSequenceTest(absltest.TestCase):
def test_transparent_layer_sequence_equals_regular(self):
batch_size, seq_len, embedding_size, hidden_size = 2, 3, 4, 5
input_shape = (batch_size, seq_len, embedding_size)
encoder_outputs_shape = (batch_size, seq_len, hidden_size)
inputs = random.uniform(random.PRNGKey(0), input_shape, dtype=jnp.float32)
encoder_outputs = random.uniform(
random.PRNGKey(1), encoder_outputs_shape, dtype=jnp.float32)
num_layers = 3
def make_layer():
return MockDecoderLayer(hidden_size)
model = common.LayerSequence(num_layers=num_layers, make_layer=make_layer)
out, variables = model.init_with_output(
random.PRNGKey(2), inputs, encoder_outputs)
class OuterModule(nn.Module):
def setup(self):
self.layers = [make_layer() for _ in range(num_layers)]
self.layer_sequence = common.TransparentLayerSequence(self.layers)
def __call__(self, *args, **kwargs):
return self.layer_sequence(*args, **kwargs)
model2 = OuterModule()
out2 = model2.apply(variables, inputs, encoder_outputs)
np.testing.assert_allclose(out, out2, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
| 5,809 | 35.540881 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/components_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for scaling module."""
from absl.testing import absltest
from jax import numpy as jnp
from jax import random
import tensorflow as tf
from flaxformer.architectures.dual_encoder import components
class LearnableScalingTest(tf.test.TestCase):
def test_logits_get_scaled_by_init_scaling_value_during_training(self):
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
init_scaling_value = random.uniform(key1).item()
x = random.normal(key2, (2, 3, 4))
def _get_learnable_scaling(dtype):
return components.LearnableScaling(
dtype=dtype, init_scaling_value=init_scaling_value)
model_fn = _get_learnable_scaling
y = model_fn(jnp.float32).init_with_output(key3, x, enable_dropout=True)
self.assertAllClose(y[0], init_scaling_value * x)
if __name__ == '__main__':
absltest.main()
| 1,429 | 31.5 | 76 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/dual_encoder_architecture_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the dual encoder architecture."""
import dataclasses
from typing import Any
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
from jax import numpy as jnp
from jax import random
import numpy as np
from flaxformer import testing_utils
from flaxformer.architectures.dual_encoder import components
from flaxformer.architectures.dual_encoder import dual_encoder_architecture
from flaxformer.architectures.dual_encoder import l2_norm
from flaxformer.architectures.dual_encoder import poolings
from flaxformer.architectures.dual_encoder import similarity_functions
from flaxformer.architectures.dual_encoder import single_tower_logit_functions
from flaxformer.architectures.t5 import t5_architecture as flaxformer_t5_architecture
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components import layer_norm
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
expected_files = testing_utils.ExpectedJsonFiles(
'flaxformer/architectures/dual_encoder/testdata/'
)
check_dual_encoder_params = expected_files.check_params_shapes_only
PROJECTION_DIM = 768
OUTPUT_DIM = 3
EMBEDDING_INIT = nn.initializers.normal(stddev=1.0)
RELPOS_BIAS_INIT = nn.initializers.variance_scaling(1.0, 'fan_avg', 'uniform')
ATTENTION_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal')
MLP_KERNEL_INIT = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
FINAL_KERNEL_INIT = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
BIAS_INIT = nn.initializers.normal(stddev=1e-6)
def make_token_emb1(vocab_size, dtype, name='token_embedder', num_features=13):
"""First test configuration for token embeddings."""
return embedding.Embed(
num_embeddings=vocab_size,
features=num_features,
cast_input_dtype=jnp.int32,
dtype=dtype,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=EMBEDDING_INIT,
name=name,
)
def make_attention1(num_attn_heads, dtype):
"""First test configuration for attention."""
return dense_attention.MultiHeadDotProductAttention(
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1)
def make_mlp1(dtype):
"""First test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
activations=('relu',),
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=dtype)
def _make_relpos_bias(
num_attn_heads: int,
dtype: Any) -> relative_position_biases.RelativePositionBiases:
return relative_position_biases.RelativePositionBiases(
num_buckets=32,
max_distance=128,
num_heads=num_attn_heads,
dtype=dtype,
embedding_init=RELPOS_BIAS_INIT)
def make_test_dual_encoder(
similarity_fn: str,
pool_method: str,
) -> dual_encoder_architecture.DualEncoder:
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is not None
return flaxformer_t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relative_position_bias=shared_relative_position_bias)
def _make_projection_layer():
return dense.DenseGeneral(
PROJECTION_DIM,
use_bias=False,
dtype=dtype,
kernel_init=FINAL_KERNEL_INIT,
bias_init=BIAS_INIT)
def _make_encoder(*, shared_token_embedder=None):
assert shared_token_embedder is None
return flaxformer_t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(4, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relative_position_bias_factory=lambda: _make_relpos_bias( # pylint: disable=g-long-lambda
num_attn_heads, dtype),
)
def _make_pooler(pool_method):
"""These make utilities are only for tests.
In practice, the pooler functors are set in gin files.
Arguments:
pool_method: pooling method to obtain the encodings
Returns:
pooler functor.
"""
if pool_method == 'mean':
return poolings.MeanPooling()
elif pool_method == 'max':
return poolings.MaxPooling()
elif pool_method == 'attention':
return poolings.AttentionPooling()
else:
raise ValueError(f'Do not support pooling method: {pool_method}.')
def _make_similarity_layer():
if similarity_fn == 'batch_dot_product':
return similarity_functions.BatchDotProduct(name=similarity_fn)
if similarity_fn == 'pointwise_ffnn':
make_dropout = lambda: nn.Dropout(rate=0.1)
return similarity_functions.PointwiseFFNN(
OUTPUT_DIM, dropout_factory=make_dropout, name=similarity_fn
)
if similarity_fn == 'single_tower_pointwise_ffnn':
make_dropout = lambda: nn.Dropout(rate=0.1)
return single_tower_logit_functions.SingleTowerPointwiseFFNN(
OUTPUT_DIM, dropout_factory=make_dropout, name=similarity_fn
)
def _make_l2_norm():
return l2_norm.L2Norm()
return dual_encoder_architecture.DualEncoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
pooler_factory=None
if pool_method == 'first'
else lambda: _make_pooler(pool_method),
l2_norm_factory=_make_l2_norm,
projection_layer_factory=_make_projection_layer,
similarity_layer_factory=_make_similarity_layer,
dtype=dtype, # pytype: disable=wrong-keyword-args
)
class DualEncoderTest(parameterized.TestCase):
@parameterized.named_parameters(
('max_pooling', 'max'), ('mean_pooling', 'mean'),
('attention_pooling', 'attention'), ('first_token', 'first'))
def test_dual_encoder_with_batch_dot_product_shapes(self, pool_method):
"""Tests if the dual encoder with batch dot product has correct output shapes."""
left_inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
right_inputs = np.array(
[
# Batch 1.
[101, 283, 85],
# Batch 2.
[101, 492, 17],
],
dtype=np.int32)
model = make_test_dual_encoder(
similarity_fn='batch_dot_product', pool_method=pool_method)
results, variables = model.init_with_output(
random.PRNGKey(0), left_inputs, right_inputs, enable_dropout=False
)
left_encoded = results.left_encoded
right_encoded = results.right_encoded
logits = results.logits
reformatted = model.apply({},
variables['params'],
method=model.to_save_format)
if pool_method == 'attention':
check_dual_encoder_params(
reformatted,
'dual_encoder_shapes_batch_dot_product_attention_pooling.json')
else:
check_dual_encoder_params(reformatted,
'dual_encoder_shapes_batch_dot_product.json')
self.assertEqual(left_encoded.shape, (2, PROJECTION_DIM))
self.assertEqual(right_encoded.shape, (2, PROJECTION_DIM))
self.assertEqual(logits.shape, (2, 2))
def test_dual_encoder_with_batch_dot_product_negative_shapes(self):
"""Tests if the dual encoder with batch dot product and negative inpus has correct output shapes."""
left_inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
right_inputs = np.array(
[
# Batch 1.
[101, 283, 85],
# Batch 2.
[101, 492, 17],
],
dtype=np.int32)
right_negative_inputs = np.array(
[
# Batch 1.
[101, 135, 27],
# Batch 2.
[101, 129, 76],
],
dtype=np.int32)
model = make_test_dual_encoder(
similarity_fn='batch_dot_product', pool_method='mean')
results, variables = model.init_with_output(
random.PRNGKey(0),
left_inputs,
right_inputs,
right_negative_inputs,
enable_dropout=False,
)
left_encoded = results.left_encoded
right_encoded = results.right_encoded
logits = results.logits
reformatted = model.apply({},
variables['params'],
method=model.to_save_format)
check_dual_encoder_params(reformatted,
'dual_encoder_shapes_batch_dot_product.json')
left_batch_size = left_inputs.shape[0]
right_batch_size = right_inputs.shape[0]
negative_batch_size = right_negative_inputs.shape[0]
self.assertEqual(left_encoded.shape, (left_batch_size, PROJECTION_DIM))
self.assertEqual(right_encoded.shape, (right_batch_size, PROJECTION_DIM))
self.assertEqual(logits.shape,
(left_batch_size, right_batch_size + negative_batch_size))
def test_dual_encoder_with_pointwise_ffnn_shapes(self):
"""Tests if the dual encoder with pointwise ffnn has correct output shapes."""
left_inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
right_inputs = np.array(
[
# Batch 1.
[101, 283, 85],
# Batch 2.
[101, 492, 17],
],
dtype=np.int32)
model = make_test_dual_encoder(
similarity_fn='pointwise_ffnn', pool_method='first')
results, variables = model.init_with_output(
random.PRNGKey(0), left_inputs, right_inputs, enable_dropout=False
)
left_encoded = results.left_encoded
right_encoded = results.right_encoded
logits = results.logits
reformatted = model.apply({},
variables['params'],
method=model.to_save_format)
check_dual_encoder_params(reformatted,
'dual_encoder_shapes_pointwise_ffnn.json')
self.assertEqual(left_encoded.shape, (2, PROJECTION_DIM))
self.assertEqual(right_encoded.shape, (2, PROJECTION_DIM))
self.assertEqual(logits.shape, (2, OUTPUT_DIM))
def test_dual_encoder_with_single_tower_pointwise_ffnn_shapes(self):
"""Tests if DE with single tower pointwise ffnn has correct output shapes.
"""
left_inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
right_inputs = np.array(
[
# Batch 1.
[101, 283, 85],
# Batch 2.
[101, 492, 17],
],
dtype=np.int32)
model = make_test_dual_encoder(
similarity_fn='single_tower_pointwise_ffnn', pool_method='first')
results, variables = model.init_with_output(
random.PRNGKey(0), left_inputs, right_inputs, enable_dropout=False
)
left_encoded = results.left_encoded
right_encoded = results.right_encoded
logits = results.logits
reformatted = model.apply({},
variables['params'],
method=model.to_save_format)
check_dual_encoder_params(
reformatted, 'dual_encoder_shapes_single_tower_pointwise_ffnn.json')
self.assertEqual(left_encoded.shape, (2, PROJECTION_DIM))
self.assertEqual(right_encoded.shape, (2, PROJECTION_DIM))
self.assertEqual(logits.shape, (2, OUTPUT_DIM))
if __name__ == '__main__':
absltest.main()
| 13,011 | 32.278772 | 105 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/l2_norm_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for L2 norm."""
from absl.testing import absltest
from jax import numpy as jnp
from jax import random
from flaxformer.architectures.dual_encoder import l2_norm
class L2NormTest(absltest.TestCase):
def test_l2_norm(self):
"""Test if the l2 norm layer has correct shapes and types."""
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
x = random.normal(key1, (2, 3, 4))
model_fn = lambda dtype: l2_norm.L2Norm(dtype=dtype)
y, _ = model_fn(jnp.float32).init_with_output(key2, x)
self.assertEqual(x.shape, y.shape)
self.assertEqual(y.dtype, jnp.float32)
y, _ = model_fn(jnp.int32).init_with_output(key3, x)
self.assertEqual(y.dtype, jnp.int32)
if __name__ == "__main__":
absltest.main()
| 1,333 | 31.536585 | 74 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/poolings_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pooling layers."""
from typing import Optional
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen
from jax import numpy as jnp
from jax import random
import numpy as np
from flaxformer.architectures.dual_encoder import poolings
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import dense
from flaxformer.components import layer_norm
from flaxformer.components.attention import dense_attention
def _get_layer_factory():
def _get_layer(shared_relative_position_bias: Optional[linen.Module]):
attention = dense_attention.MultiHeadDotProductAttention(
num_heads=2, head_dim=2, use_bias=False, dtype=jnp.float32
)
mlp = dense.MlpBlock(
use_bias=False, intermediate_dim=2, activations=('relu',)
)
dropout_factory = lambda: linen.Dropout(0.0)
layer = t5_architecture.EncoderLayer(
attention=attention,
mlp=mlp,
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm.T5LayerNorm,
shared_relative_position_bias=shared_relative_position_bias,
)
return layer
return _get_layer
class PoolingsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.batch_size = 2
self.seq_len = 3
self.hidden_size = 4
self.input_masks = np.ones((self.batch_size, self.seq_len))
self.encoder_mask = np.ones((self.batch_size, 1, 1, self.seq_len))
@parameterized.named_parameters(
('max_pooling', poolings.MaxPooling()),
('mean_pooling', poolings.MeanPooling()),
('attention_pooling', poolings.AttentionPooling()),
(
'multihead_attention_pooling',
poolings.MultiHeadAttentionPooling(
num_heads=2, head_dim=2, layer_norm_factory=layer_norm.T5LayerNorm
),
),
(
'multi_layer_pooling',
poolings.MultiLayerPooling(
layer_factory=_get_layer_factory(),
num_layers=2,
layer_norm_factory=layer_norm.T5LayerNorm,
),
),
(
'multi_layer_pooling_with_mean_pooling',
poolings.MultiLayerPooling(
layer_factory=_get_layer_factory(),
num_layers=2,
layer_norm_factory=layer_norm.T5LayerNorm,
pooler_factory=poolings.MeanPooling,
),
),
('last_token_pooling', poolings.LastTokenPooling()),
)
def test_poolings(self, pooler):
"""Test if the pooling layers have correct shapes and types."""
rng = random.PRNGKey(0)
key1, key2 = random.split(rng, 2)
encoded_inputs = random.normal(
key1, (self.batch_size, self.seq_len, self.hidden_size)
)
rngs = {'params': rng, 'dropout': key2}
encodings, _ = pooler.init_with_output(
rngs, encoded_inputs, self.input_masks
)
self.assertEqual(encodings.shape, (self.batch_size, self.hidden_size))
self.assertEqual(encodings.dtype, jnp.float32)
def test_last_token_poolings(self):
rngs = {'params': random.PRNGKey(0)}
encoded_inputs = jnp.array(
[
[[0.2, 0.4], [0.22, 0.42], [0.23, 0.43], [0.24, 0.44]],
[[0.3, 0.6], [-0.32, 0.62], [0.33, -0.63], [-0.34, -0.64]],
[[-0.4, 0.8], [0.42, -0.82], [-0.43, 0.83], [0.44, -0.84]],
],
dtype=jnp.float32,
)
input_masks = jnp.array(
[
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 0, 0],
],
dtype=jnp.int32,
)
encodings, _ = poolings.LastTokenPooling().init_with_output(
rngs, encoded_inputs, input_masks
)
np.testing.assert_array_equal(
encodings,
jnp.array(
[
[0.23, 0.43],
[-0.34, -0.64],
[0.42, -0.82],
],
dtype=jnp.float32,
),
)
if __name__ == '__main__':
absltest.main()
| 4,504 | 30.725352 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/dual_encoder_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains "architecture" classes for dual encoder models.
These are combinators which assemble components (L2Norm, MLP, etc.) into
networks.
"""
import inspect
from typing import Callable, Mapping, Optional, Union
from flax import linen as nn
import flax.struct
import jax.numpy as jnp
from typing_extensions import Protocol
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.t5 import t5_architecture as flaxformer_t5_architecture
from flaxformer.components import embedding
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
def check_use_negative_inputs(logit_creation_layer: nn.Module) -> bool:
call_args = inspect.signature(logit_creation_layer).parameters
return 'right_additional_encodings' in call_args
@flax.struct.dataclass
class DualEncoderOutput:
"""Outputs of T5X retrieval DualEncoder architecture."""
left_encoded: Array
right_encoded: Array
logits: Union[Array, Mapping[str, Array]]
class MakeEncoderFn(Protocol):
"""Signature for functions that will make a low-level Encoder."""
def __call__(
self,
*,
shared_token_embedder: Optional[embedding.Embed] = None,
) -> flaxformer_t5_architecture.Encoder:
"""Makes a low-level Encoder instance.
Args:
shared_token_embedder: Shared token embedder instance, which should be
passed to the returned module. If this is non-None, you should use it
instead of providing your own token embedder.
Returns:
Encoder instance.
"""
class DualEncoder(nn.Module, param_remapping.ParameterRemappable):
"""Dual encoder model.
The left tower and the right tower share parameters.
Attributes:
encoder_factory: Factory which will make the lower-level Encoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `encoder_factory`.
shared_token_embedder_factory: A callable that returns an embedder that can
be shared between the encoder and decoder.
pooler_factory: Optional specialization of encoder output pooling layer.
l2_norm_factory: Optional specialization of encoder output normalization.
projection_layer_factory: Optional specialization of encoder output
projection layer.
similarity_layer_factory: Optional specialization of encoder output
similarity layer.
dtype: DType for dual encoder to cast embedded inputs, and for attention
mask generation.
"""
# Core components: shared token embedder and low-level encoder.
encoder_factory: MakeEncoderFn
shared_token_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
pooler_factory: Optional[Callable[[], nn.Module]] = None
l2_norm_factory: Optional[Callable[[], nn.Module]] = None
projection_layer_factory: Optional[Callable[[], nn.Module]] = None
similarity_layer_factory: Optional[Callable[[], nn.Module]] = None
# Configures behavior when the model is called. Many of these might eventually
# be better as call parameters.
dtype: DType = jnp.float32
def setup(self):
self.token_embedder = (
self.shared_token_embedder_factory() # pylint: disable=not-callable
if self.shared_token_embedder_factory else None)
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder)
if self.pooler_factory:
self.pooler = self.pooler_factory() # pylint: disable=not-callable
if self.l2_norm_factory:
self.l2_norm = self.l2_norm_factory() # pylint: disable=not-callable
if self.projection_layer_factory:
self.projection_layer = self.projection_layer_factory() # pylint: disable=not-callable
if self.similarity_layer_factory:
self.similarity_layer = self.similarity_layer_factory() # pylint: disable=not-callable
def encode(self,
encoder_input_tokens: jnp.ndarray,
encoder_segment_ids=None,
encoder_positions=None,
*,
enable_dropout: bool = True) -> Array:
"""Applies Transformer encoder-branch on the inputs.
Args:
encoder_input_tokens: input data to the encoder.
encoder_segment_ids: encoder input segmentation info for packed examples.
encoder_positions: encoder input subsequence positions for packed
examples.
enable_dropout: Enables dropout if set to True.
Returns:
encoded feature array from the transformer encoder.
"""
# Make padding attention mask.
encoder_mask = dense_attention.make_attention_mask(
encoder_input_tokens > 0, encoder_input_tokens > 0, dtype=self.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if encoder_segment_ids is not None:
encoder_mask = dense_attention.combine_masks(
encoder_mask,
dense_attention.make_attention_mask(
encoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=self.dtype))
encoded = self.encoder( # pytype: disable=attribute-error
encoder_input_tokens,
inputs_positions=encoder_positions,
encoder_mask=encoder_mask,
enable_dropout=enable_dropout)
if self.pooler_factory:
input_masks = jnp.array(encoder_input_tokens > 0, jnp.float32)
encodings = self.pooler(
encoded, input_masks, deterministic=not enable_dropout)
else:
# Fallback to use first token.
encodings = encoded[:, 0, :]
if self.projection_layer_factory:
projection_output = self.projection_layer(encodings)
else:
projection_output = encodings
if self.l2_norm_factory:
encoded = self.l2_norm(projection_output)
else:
encoded = projection_output
return encoded
@property
def encoder_embedder(self) -> embedding.MultiEmbed:
return self.encoder.embedder
def compute_similarity(self,
left_encoded: Array,
right_encoded: Array,
right_negative_encoded: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
if right_negative_encoded is not None:
if not check_use_negative_inputs(self.similarity_layer):
raise ValueError(
'Received negative inputs but similarity layer'
f' "{self.similarity_layer.name}" does not support negative inputs.'
)
return self.similarity_layer(
left_encoded,
right_encoded,
right_negative_encoded,
enable_dropout=enable_dropout,
)
return self.similarity_layer(
left_encoded, right_encoded, enable_dropout=enable_dropout
)
def __call__(self,
left_encoder_input_tokens,
right_encoder_input_tokens,
right_negative_encoder_input_tokens=None,
left_encoder_segment_ids=None,
right_encoder_segment_ids=None,
right_negative_encoder_segment_ids=None,
left_encoder_positions=None,
right_encoder_positions=None,
right_negative_encoder_positions=None,
*,
enable_dropout: bool = True) -> DualEncoderOutput:
"""Applies Dual Encoder model on the inputs.
Args:
left_encoder_input_tokens: input data to the left encoder.
right_encoder_input_tokens: input data to the right encoder.
right_negative_encoder_input_tokens: input negative data to the right
encoder.
left_encoder_segment_ids: left encoder segmentation info for packed
examples.
right_encoder_segment_ids: right encoder segmentation info for packed
examples.
right_negative_encoder_segment_ids: right encoder segmentation info for
packed negative examples.
left_encoder_positions: left encoder subsequence positions for packed
examples.
right_encoder_positions: right encoder subsequence positions for packed
examples.
right_negative_encoder_positions: right encoder subsequence positions for
packed negative examples.
enable_dropout: Enables dropout if set to True.
Returns:
encodings and similarity scores from the dual encoder.
"""
if self.similarity_layer_factory is None:
raise ValueError(
'DualEncoder instances without a similarity layer may only be used'
' for encoding inputs, not comparing them.'
)
left_encoded = self.encode(
left_encoder_input_tokens,
encoder_segment_ids=left_encoder_segment_ids,
encoder_positions=left_encoder_positions,
enable_dropout=enable_dropout)
right_encoded = self.encode(
right_encoder_input_tokens,
encoder_segment_ids=right_encoder_segment_ids,
encoder_positions=right_encoder_positions,
enable_dropout=enable_dropout)
right_negative_encoded = None
if right_negative_encoder_input_tokens is not None:
right_negative_encoded = self.encode(
right_negative_encoder_input_tokens,
encoder_segment_ids=right_negative_encoder_segment_ids,
encoder_positions=right_negative_encoder_positions,
enable_dropout=enable_dropout,
)
logits = self.compute_similarity(
left_encoded,
right_encoded,
right_negative_encoded,
enable_dropout=enable_dropout,
)
return DualEncoderOutput(left_encoded, right_encoded, logits)
| 10,154 | 35.397849 | 93 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/similarity_functions.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Similarity functions for dual encoder models.
We define a variety of similarity functions for dual encoder models.
Batch similarity functions are computed on all pairs of the left encodings and
right encodings so that the returned similarity matrix satisfies
S[i,j] = similarity(encodings1[i], encodings2[j]).
Pointwise similarity functions are computed just on the original pairs, and thus
return a vector of the same length as the batch size:
S[i] = similarity(encodings1[i], encodings2[i]).
"""
from typing import Any, Callable, Iterable, Optional, Tuple, Union
from flax import linen as nn
from flax.linen.linear import default_kernel_init
from jax import lax
import jax.numpy as jnp
from flaxformer.components import dense
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
# ============================ Pointwise Similarity ============================
class PointwiseFFNN(nn.Module):
"""Pointwise feed-forward NN similarity functions.
The two encodings are concatenated and then fed into a fully-connected layers
to produce the similarity.
Optionally, other features of the two encodings can be computed: element-wise
difference and element-wise product (see the InferSent paper:
https://arxiv.org/abs/1705.02364).
Attributes:
features: tuple with numbers of output features.
use_bias: whether to add a bias to the output (default: False).
dtype: the dtype of the computation (default: float32).
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_concat_feature: Whether add the two encodings.
use_difference_feature: Whether add the difference of two encodings.
use_product_feature: Whether add the product of two encodings.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the feature concatenation.
intermediate_features: An iterable containing dimensions for intermediate
layers. These are the hidden layers before the last hidden layer.
intermediate_act_fn: An activation function for the hidden layers.
"""
features: Union[Iterable[int], int]
use_bias: bool = False
dtype: DType = jnp.float32
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
bias_init: Initializer = nn.initializers.zeros
precision: Any = None
act_fn: str = 'relu'
use_concat_feature: bool = True
use_difference_feature: bool = True
use_product_feature: bool = True
dropout_factory: Optional[Callable[[], nn.Module]] = None
intermediate_features: Optional[Union[Iterable[int], int]] = None
intermediate_act_fn: str = 'relu'
def _build_layer(self, f):
return dense.DenseGeneral(
axis=-1,
features=f,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axis_names=['embed', 'affinity'],
precision=self.precision)
def setup(self):
layer_features = self.intermediate_features or []
ffnn_layers = []
# Build the layers
for f in layer_features:
ffnn_layers.append(self._build_layer(f))
ffnn_layers.append(self._build_layer(self.features))
self.ffnn_layers = ffnn_layers
# dropout
if self.dropout_factory:
self.dropout = self.dropout_factory() # pylint: disable=not-callable
else:
self.dropout = None
# intermediate activations
if self.intermediate_act_fn != 'linear':
self.intermediate_activation = getattr(nn, self.intermediate_act_fn)
else:
self.intermediate_activation = None
# final activation
if self.act_fn != 'linear':
self.final_activation = getattr(nn, self.act_fn)
else:
self.final_activation = None
def __call__(self,
encodings1: Array,
encodings2: Optional[Array] = None,
*,
enable_dropout: bool = True) -> Array:
"""Compute the pointiwse feed-forward NN similarity from 1 or 2 encodings.
Args:
encodings1: A 2-D tensor of (left) encodings with shape [batch size,
encoding dim].
encodings2: An optional 2-D tensor of (right) encodings with shape [batch
size, encoding dim].
enable_dropout: Whether to enable dropout layers.
Returns:
A 1-D tensor of similarities with shape [batch size].
"""
inputs = []
encodings1_dim = encodings1.shape[-1]
if encodings2 is not None:
encodings2_dim = encodings2.shape[-1]
# Optionally add the two encodings as features.
if self.use_concat_feature:
inputs += [encodings1, encodings2]
# If using element-wise features, enforce that the encodings have the same
# dimension.
if self.use_difference_feature or self.use_product_feature:
if encodings1_dim != encodings2_dim:
raise ValueError(
'If using element-wise features, enforce that the encodings have '
'the same dimension. The dimensions are: encodings1_dim: %d, '
'encodings2_dim: %d' % (encodings1_dim, encodings2_dim))
# Optionally add the element-wise difference as a feature.
if self.use_difference_feature:
inputs += [jnp.abs(encodings1 - encodings2)]
# Optionally add the element-wise product as a feature.
if self.use_product_feature:
inputs += [lax.mul(encodings1, encodings2)]
else:
inputs = [encodings1]
inputs = jnp.concatenate(inputs, axis=-1)
if self.dropout_factory:
inputs = self.dropout(inputs, deterministic=not enable_dropout)
# Pass through the hidden layers
for layer in self.ffnn_layers[:-1]:
inputs = layer(inputs)
if self.intermediate_activation:
inputs = self.intermediate_activation(inputs)
if self.dropout:
inputs = self.dropout(inputs, deterministic=not enable_dropout)
# Pass through the final layer
logits = self.ffnn_layers[-1](inputs)
if self.final_activation:
logits = self.final_activation(logits)
return logits
class DotProduct(nn.Module):
"""Vanilla row-wise version of dot product similarity function."""
def __call__(self, left_encodings: Array, right_encodings: Array,
**params) -> Tuple[Array, ...]:
"""Computes the point-wise product similarity from two encodings.
Args:
left_encodings: A 2-D tensor of (left) encodings with shape [batch size,
encoding dim].
right_encodings: A 2-D tensor of (right) encodings with shape [batch size,
encoding dim].
**params: Hyperparameters dict.
Returns:
A 2-D tensor of dot product similarities with shape [batch size, 1].
"""
# Implement the dot product as module to be consistent to other similarity
# functions.
del self
return jnp.sum(left_encodings * right_encodings, axis=-1, keepdims=True)
# ============================ Batch Similarity ================================
class BatchDotProduct(nn.Module):
"""Batch version of dot product similarity function."""
use_only_explicit_hard_negatives: bool = False
@nn.compact
def __call__(self,
left_encodings: Array,
right_encodings: Array,
right_additional_encodings: Optional[Array] = None,
**params) -> Tuple[Array, ...]:
"""Compute the batch dot product similarity from two encodings.
Args:
left_encodings: A 2-D tensor of (left) encodings with shape [batch size,
encoding dim].
right_encodings: A 2-D tensor of (right) encodings with shape [batch size,
encoding dim].
right_additional_encodings: An optional 2-D tensor of (right) additional
encodings with shape [batch_size * num_hard_negatives, encoding_dim].
**params: Hyperparameters dict.
Returns:
logits: A 2-D tensor of dot product similarities. If
right_additional_encodings are provided, then the output shape is
[batch_size, batch_size + num_hard_negatives] if
use_only_explicit_hard_negatives is True, and
[batch_size, num_hard_negatives * batch_size] if
use_only_explicit_hard_negatives is False. If right_additional_encodings
are not provided, then the output shape is [batch_size, batch_size].
"""
if self.use_only_explicit_hard_negatives:
# Compute in-batch logits of shape [batch_size, batch_size].
logits = jnp.dot(left_encodings, right_encodings.transpose())
if right_additional_encodings is not None:
batch_size, encoding_dim = left_encodings.shape
right_additional_encodings = right_additional_encodings.reshape(
[batch_size, -1, encoding_dim]
)
# Logits for explicitly provided hard negatives. The shape
# is [batch_size, num_hard_negatives].
additional_logits = jnp.sum(
left_encodings[:, jnp.newaxis, :] * right_additional_encodings,
axis=-1,
)
# Final logits of shape [batch_size, batch_size + num_hard_negatives].
logits = jnp.concatenate([logits, additional_logits], axis=-1)
else:
if right_additional_encodings is not None:
right_encodings = jnp.concatenate(
[right_encodings, right_additional_encodings], axis=0
)
# Final logits. Each examples uses all other hard negatives in the batch,
# so the shape is [batch_size, num_hard_negatives * batch_size].
logits = jnp.dot(left_encodings, right_encodings.transpose())
return logits
class DoNothing(nn.Module):
"""A do-nothing similarity function.
This is useful if we want to just take the embeddings and compute the losses
outside the forward module.
"""
@nn.compact
def __call__(self,
left_encodings: Array,
right_encodings: Array,
right_additional_encodings: Optional[Array] = None,
**params) -> Tuple[Array, ...]:
"""Compute the batch dot product similarity from two encodings.
Args:
left_encodings: Unused. A 2-D tensor of (left) encodings with shape [batch
size, encoding dim].
right_encodings: Unused. A 2-D tensor of (right) encodings with shape
[batch size, encoding dim].
right_additional_encodings: Unused. An optional 2-D tensor of (right)
additional encodings with shape [batch size, encoding dim].
**params: Unused. Hyperparameters dict.
Returns:
A single 0.
"""
del left_encodings
del right_encodings
del right_additional_encodings
del params
return jnp.zeros((), dtype=jnp.int32)
class BatchAttentionSimilarity(nn.Module):
"""Batched attention-based similiarity score.
Attributes:
attention: the attention module.
mlp_layer: the MLP module, applied after attention.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
activation_fn: An activation function for the hidden layers. Default to
'linear', where no activation is requested.
dropout_factory: An optional callable that returns a new dropout instance.
If it exists, it is applied after the attention module.
"""
attention: nn.Module
mlp_layer: nn.Module
layer_norm_factory: Callable[[], nn.Module]
activation_fn: str = 'linear'
dropout_factory: Optional[Callable[[], nn.Module]] = None
def setup(self):
assert self.mlp_layer.out_dim == 1, (
'Requires mlp layer to return a Tensor with output dimension 1. '
f'Currently, mlp_layer.out_dim={self.mlp_layer.out_dim}'
)
self.pre_attention_layer_norm = self.layer_norm_factory()
self.pre_mlp_layer_norm = self.layer_norm_factory()
# Optional dropout module.
if self.dropout_factory:
self.dropout = self.dropout_factory() # pylint: disable=not-callable
else:
self.dropout = None
if self.activation_fn == 'linear':
self.activation = None
else:
self.activation = getattr(nn, self.activation_fn)
def __call__(
self,
encoded_input_1: Array,
encoded_input_2: Array,
encoded_input_mask_1: Array,
encoded_input_mask_2: Array,
*,
pointwise_similarity: bool = True,
enable_dropout: bool = True,
) -> Array:
"""Computes the attention based similarity from two encodings.
encoded_input_1 and encoded_input_2 are the encoded inputs from
the two encoder towers. They share the same batch size and encoding
dimension. If the left and right towers are asymmetric, or generate
embeddings of different dimensionality, please add projection layers
to map them into the same dimsion.
Args:
encoded_input_1: A 3-D tensor of the shape [batch_size, sequence_length_1,
encoding_dim].
encoded_input_2: A 3-D tensor of the shape [batch_size, sequence_length_2,
encoding_dim].
encoded_input_mask_1: A 2-D tensor of the shape [batch_size,
sequence_length_1], as a binary mask for the non-padded tokens of
encoded_input_1.
encoded_input_mask_2: A 2-D tensor of the shape [batch_size,
sequence_length_2], as a binary mask for the non-padded tokens of
encoded_input_2.
pointwise_similarity: a bool indicaing whether to return pointwise
similarity only. Default to True. If False, it allows encoded_input_1
and encoded_input_2 have different batch sizes, and returns a score
matrix of the shape [batch_size_1, batch_size_2], with elements being
Similarity(encoded_input_1[i], encoded_input_2[j]), where i in [0,
batch_size_1) and j in [0, batch_size_2). If True, it requires the batch
sizes of encoded_input_1 and encoded_input_2 to be the same, and returns
a vector of Similarity(encoded_input_1[i], encoded_input_2[i]), where i
in [0, batch_size).
enable_dropout: a bool indicating whether to use dropout.
Returns:
similarity_tensor: a float tensor with similarity score.
If pointwise_similarity = True, the tensor is of the shape [batch_size],
otherwise [batch_size_1, batch_size_2].
"""
batch_size_1 = encoded_input_1.shape[0]
batch_size_2 = encoded_input_2.shape[0]
if pointwise_similarity:
assert batch_size_1 == batch_size_2, (
'pointwise similarity requires both inputs have the same batch. '
f'Current shape are {encoded_input_1.shape} and '
f'{encoded_input_2.shape}.'
)
else:
input_ndim = jnp.ndim(encoded_input_1)
# To calculate the similarity between the i-th element in encoded_input_1
# and the j-th element in encoded_input_2, we need to tile / repeat the
# encoded_input_1/2.
# The encoded_input_1 is tiled along the batch dimension by batch_size_2.
# e.g. encoded_input_1 has 3 elements [[1-st], [2-nd], [3-rd]], and
# batch_size_2 = 2. The tiled results will be
# [[1-st], [2-nd], [3-rd], [1-st], [2-nd], [3-rd]].
encoded_input_1 = jnp.tile(
encoded_input_1, (batch_size_2,) + (1,) * (input_ndim - 1)
)
encoded_input_mask_1 = jnp.tile(
encoded_input_mask_1,
(batch_size_2,) + (1,) * (jnp.ndim(encoded_input_mask_1) - 1),
)
# The encoded_input_2 is repeated along the batch dimension by
# batch_size_1. e.g. encoded_input_2 has 2 elements [[1-st], [2-nd]], and
# batch_size_1 = 3. The tiled results will be
# [[1-st], [2-nd],[1-st], [2-nd], [1-st], [2-nd]].
encoded_input_2 = jnp.repeat(encoded_input_2, batch_size_1, axis=0)
encoded_input_mask_2 = jnp.repeat(
encoded_input_mask_2, batch_size_1, axis=0
)
dtype = encoded_input_1.dtype
encoded_input_1 = self.pre_attention_layer_norm(encoded_input_1)
encoded_input_2 = self.pre_attention_layer_norm(encoded_input_2)
# Mask to remove padding tokens.
encoded_mask = dense_attention.make_attention_mask(
encoded_input_mask_1, encoded_input_mask_2, dtype=dtype
)
# attention map returns a tensor of the shape [batch_size,
# sequence_length_1, embedding_dim]
mlp_input = self.attention(
encoded_input_1,
encoded_input_2,
encoded_mask,
enable_dropout=enable_dropout,
)
if self.dropout is not None and enable_dropout:
mlp_input = self.dropout(mlp_input, deterministic=not enable_dropout)
mlp_input = self.pre_mlp_layer_norm(mlp_input)
# mlp_layer returns [batch_size, sequence_length_1, 1]
logits = self.mlp_layer(mlp_input, enable_dropout=enable_dropout)
logits = jnp.reshape(logits, encoded_input_mask_1.shape)
avg_logits = jnp.sum(logits * encoded_input_mask_1, axis=1) / jnp.sum(
encoded_input_mask_1, axis=1
)
if self.activation:
avg_logits = self.activation(avg_logits)
if not pointwise_similarity:
# In case of element-wise similarity, the returned tensor will be a metrix
# with [i, j]-th element being similarity(encoded_input_1[i],
# encoded_input_2[j]).
avg_logits = jnp.reshape(
avg_logits, [batch_size_1, batch_size_2], order='F'
)
return avg_logits
| 18,034 | 37.618844 | 105 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/components.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reusable component modules."""
from flax import linen as nn
from flax.linen import partitioning
import jax.numpy as jnp
from flaxformer import types
class LearnableScaling(nn.Module):
"""A module with just one single learnable scalar."""
dtype: types.DType = jnp.float32
init_scaling_value: float = 100.0
def setup(self):
self.scalar = partitioning.param_with_axes(
"learnable_scalar",
nn.initializers.constant(self.init_scaling_value), (1,),
jnp.float32,
axes=("embed",))
@nn.compact
def __call__(self,
x: jnp.ndarray,
enable_dropout: bool = True) -> jnp.ndarray:
if enable_dropout:
# Only apply logit scaling during training since during eval the scaling
# will not affect the eval metrics.
broadscast_scalar = jnp.expand_dims(self.scalar, axis=1)
return jnp.asarray(x, self.dtype) * broadscast_scalar
return jnp.asarray(x, self.dtype)
class ConstantScaling(nn.Module):
"""A module with just one single constant scalar."""
scaling_value: float = 100.0
@nn.compact
def __call__(self, x: jnp.ndarray, train: bool = False) -> jnp.ndarray:
# For backwards compatibility (from before logit scaling was called from
# loss modules), constant scaling is applied regardless of train
return x * self.scaling_value
| 1,925 | 32.206897 | 78 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/single_tower_logit_functions_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for similarity functions."""
from absl.testing import absltest
from flax import linen as nn
from jax import random
from flaxformer.architectures.dual_encoder import single_tower_logit_functions
OUTPUT_DIM = 3
BATCH_SIZE = 2
class SimilarityFunctionsTest(absltest.TestCase):
def test_single_tower_pointwise_ffnn(self):
"""Test if the PointwiseFFNN similarity function has correct shapes."""
make_dropout = lambda: nn.Dropout(rate=0.1)
model = single_tower_logit_functions.SingleTowerPointwiseFFNN(
OUTPUT_DIM, dropout_factory=make_dropout)
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
x = random.normal(key1, (BATCH_SIZE, 8))
y = random.normal(key2, (BATCH_SIZE, 8))
z, _ = model.init_with_output(key3, x, y, enable_dropout=False)
self.assertEqual(z.shape, (BATCH_SIZE, OUTPUT_DIM))
if __name__ == "__main__":
absltest.main()
| 1,483 | 34.333333 | 78 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/similarity_functions_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for similarity functions."""
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
from jax import random
import jax.numpy as jnp
import tensorflow as tf
from flaxformer.architectures.dual_encoder import similarity_functions
from flaxformer.components import dense
from flaxformer.components import layer_norm
from flaxformer.components.attention import dense_attention
BATCH_SIZE = 2
DTYPE = jnp.float32
NUM_ATTN_HEADS = 13
OUTPUT_DIM = 17
ATTENTION_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal'
)
BIAS_INIT = nn.initializers.normal(stddev=1e-6)
MLP_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal'
)
def make_attention():
"""Test configuration for attention."""
return dense_attention.MultiHeadDotProductAttention(
num_heads=NUM_ATTN_HEADS,
dtype=DTYPE,
qkv_features=512,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
)
def make_dropout():
"""Test configuration for the dropout layer."""
return nn.Dropout(rate=0.1)
def make_mlp():
"""Test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
out_dim=1,
activations=('relu',),
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
intermediate_dropout_rate=0.1,
dtype=DTYPE,
)
def make_batch_attention_similarity_model():
"""Test configuration for BatchAttentionSimilarity module."""
return similarity_functions.BatchAttentionSimilarity(
attention=make_attention(),
mlp_layer=make_mlp(),
layer_norm_factory=layer_norm.T5LayerNorm,
activation_fn='linear',
dropout_factory=make_dropout,
)
class SimilarityFunctionsTest(absltest.TestCase):
def test_pointwise_ffnn(self):
"""Test if the PointwiseFFNN similarity function has correct shapes."""
model = similarity_functions.PointwiseFFNN(
OUTPUT_DIM, dropout_factory=make_dropout)
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
x = random.normal(key1, (BATCH_SIZE, 8))
y = random.normal(key2, (BATCH_SIZE, 8))
z, _ = model.init_with_output(key3, x, y, enable_dropout=False)
self.assertEqual(z.shape, (BATCH_SIZE, OUTPUT_DIM))
def test_pointwise_ffnn_without_dropout(self):
"""Test if the PointwiseFFNN similarity function has correct shapes."""
model = similarity_functions.PointwiseFFNN(OUTPUT_DIM, dropout_factory=None)
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
x = random.normal(key1, (BATCH_SIZE, 8))
y = random.normal(key2, (BATCH_SIZE, 8))
z, _ = model.init_with_output(key3, x, y, enable_dropout=False)
self.assertEqual(z.shape, (BATCH_SIZE, OUTPUT_DIM))
def test_batch_dot_product(self):
"""Test if the BatchDotProduct similarity function has correct shapes."""
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
x = random.normal(key1, (BATCH_SIZE, 8))
y = random.normal(key2, (BATCH_SIZE, 8))
model = similarity_functions.BatchDotProduct()
z, _ = model.init_with_output(key3, x, y)
self.assertEqual(z.shape, (BATCH_SIZE, BATCH_SIZE))
def test_batch_dot_product_with_negative(self):
"""Test if the BatchDotProduct similarity function has correct shapes."""
rng = random.PRNGKey(0)
key1, key2, key3, key4 = random.split(rng, 4)
left_encodings = random.normal(key1, (BATCH_SIZE, 8))
right_encodings = random.normal(key2, (BATCH_SIZE, 8))
right_negative_encodings = random.normal(key3, (BATCH_SIZE, 8))
model = similarity_functions.BatchDotProduct()
logits, _ = model.init_with_output(
key4, left_encodings, right_encodings, right_negative_encodings
)
# The shape of logits equals [num_positive, num_positive + num_negative]
# where both num_positive and num_negative equals BATCH_SIZE.
self.assertEqual(logits.shape, (BATCH_SIZE, BATCH_SIZE + BATCH_SIZE))
def test_batch_dot_product_with_negative_use_only_explicit_hard_negatives(
self,
):
"""Test if the BatchDotProduct similarity function has correct shapes."""
rng = random.PRNGKey(0)
key1, key2, key3, key4 = random.split(rng, 4)
left_encodings = random.normal(key1, (BATCH_SIZE, 8))
right_encodings = random.normal(key2, (BATCH_SIZE, 8))
right_negative_encodings = random.normal(key3, (BATCH_SIZE, 8))
model = similarity_functions.BatchDotProduct(
use_only_explicit_hard_negatives=True
)
logits, _ = model.init_with_output(
key4, left_encodings, right_encodings, right_negative_encodings
)
# The shape of logits equals [num_positive, num_positive + 1].
self.assertEqual(logits.shape, (BATCH_SIZE, BATCH_SIZE + 1))
def test_pointwise_ffnn_with_multiple_layers(self):
"""Test the Multi-layer PointwiseFFNN has correct shapes."""
model = similarity_functions.PointwiseFFNN(
OUTPUT_DIM, dropout_factory=None, intermediate_features=[1024, 512, 55])
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
x = random.normal(key1, (BATCH_SIZE, 8))
y = random.normal(key2, (BATCH_SIZE, 8))
z, _ = model.init_with_output(key3, x, y, enable_dropout=False)
self.assertEqual(z.shape, (BATCH_SIZE, OUTPUT_DIM))
class SimilarityFunctionsParameterizedTest(
tf.test.TestCase, parameterized.TestCase
):
@parameterized.named_parameters(('pointwise', True), ('elementwise', False))
def test_batch_attention_similarity(self, pointwise_similarity):
"""Test the BatchAttentionSimilarity."""
model = make_batch_attention_similarity_model()
if pointwise_similarity:
batch_size_1 = BATCH_SIZE
batch_size_2 = BATCH_SIZE
else:
batch_size_1 = 2 * BATCH_SIZE
batch_size_2 = 3 * BATCH_SIZE
rng = random.PRNGKey(0)
keys = random.split(rng, 3)
left_encodings = random.normal(keys[0], (batch_size_1, 11, 23))
right_encodings = random.normal(keys[1], (batch_size_2, 3, 23))
left_mask = jnp.ones((batch_size_1, 11))
right_mask = jnp.ones((batch_size_2, 3))
output, _ = model.init_with_output(
keys[2],
left_encodings,
right_encodings,
left_mask,
right_mask,
enable_dropout=False,
pointwise_similarity=pointwise_similarity,
)
if pointwise_similarity:
self.assertEqual(output.shape, (batch_size_1,))
else:
self.assertEqual(output.shape, (batch_size_1, batch_size_2))
# The following is to catch the ordering of the output, to ensure the
# [i, j]-th element is the similarity calculated from
# [left_encoding[i], right_encoding[j]].
pointwise_output, _ = model.init_with_output(
keys[2],
left_encodings,
right_encodings[:batch_size_1, :, :],
left_mask,
right_mask[:batch_size_1, :],
enable_dropout=False,
pointwise_similarity=True,
)
self.assertAllClose(jnp.diagonal(output), pointwise_output)
if __name__ == '__main__':
absltest.main()
| 7,770 | 33.847534 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/poolings.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for pooling operations."""
import functools
from typing import Callable, Optional
from flax import linen as nn
from flax.linen import partitioning
from flax.linen.linear import default_kernel_init
import jax
from jax import lax
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer import transformer_common as common
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.architectures.t5 import t5_common_layers
from flaxformer.components import dense
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
NEG_INF = -1e10
EPSILON = 1e-10
@functools.partial(jax.vmap, in_axes=[0, 0], out_axes=0)
def batch_gather(x: Array, idx: Array) -> Array:
"""Performs a batched gather of the data.
Args:
x: A [batch, num_in, ...] Array of data to gather from.
idx: A [batch, num_out] Array of dtype int32 or int64 specifying which
elements to gather. Every value is expected to be in the range of [0,
num_in].
Returns:
A [batch, num_out, ...] Array of gathered data.
"""
return x[idx]
class AttentionPooling(nn.Module):
"""Self attention pooling given a sequence of encodings.
Reference: https://arxiv.org/pdf/1712.02047.pdf.
Attributes:
kernel_init: Initializer for the dense layer kernel.
dtype: The dtype of the computation (default: float32).
act_fn: activation function.
"""
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
dtype: DType = jnp.float32
act_fn: str = 'linear'
@nn.compact
def __call__(self, encoded_inputs: Array, input_masks: Array, **kwargs):
"""Apply attention pooling to the encoder output embeddings.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the encoder. <float32>[batch_size, seq_length,
hidden_size].
input_masks: The input masks that indicate the non padding position of the
sequences. <float32>[batch_size, seq_length].
**kwargs: Keyward based arguments, currently unused.
Returns:
An array of logits <float32>[batch_size, hidden_size].
"""
encoding_size = encoded_inputs.shape[-1]
attention_hidden = dense.DenseGeneral(
features=encoding_size,
use_bias=True,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axis_names=['embed', 'affinity'],
name='attention_hidden',
)(encoded_inputs)
if self.act_fn != 'linear':
attention_hidden = getattr(nn, self.act_fn)(attention_hidden)
attention_logits = dense.DenseGeneral(
features=encoding_size,
use_bias=True,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axis_names=['embed', 'affinity'],
name='attention_logits',
)(attention_hidden)
# Broadcast to the `hidden_size` dimension.
input_masks = jnp.expand_dims(input_masks, axis=-1)
attention_bias = lax.select(
input_masks > 0,
jnp.full(input_masks.shape, 0.0).astype(self.dtype),
jnp.full(input_masks.shape, NEG_INF).astype(self.dtype),
)
logits = attention_logits + attention_bias
weights = jax.nn.softmax(logits, axis=1)
encodings = jnp.sum(encoded_inputs * weights, axis=1)
return encodings
class MultiHeadAttentionPooling(nn.Module):
"""Multihead attention pooling given a sequence of encodings.
Implements multihead attention based pooling where query is a single vector
and key/values are computed by projecting the encoded input.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
dropout_factory: A callable that returns the dropout layer.
layer_norm_factory: A callable that returns a layer norm.
activation_partitioning_dims: When set to 2, partitions intermediate
variables containing the input and output of the encoder layer.
query_init: Initializer for the query vector.
dropout_rate: dropout rate
dtype: The dtype of the computation (default: float32).
"""
num_heads: int
head_dim: int
layer_norm_factory: Callable[[], nn.Module]
activation_partitioning_dims: int = 1
query_init: Initializer = nn.initializers.zeros
dropout_rate: float = 0.1
dtype: DType = jnp.float32
@nn.compact
def __call__(
self,
encoded_inputs: Array,
input_masks: Array,
deterministic: bool = False,
):
"""Apply attention pooling to the encoder output embeddings.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the encoder. <float32>[batch_size, seq_length,
hidden_size].
input_masks: The input masks that indicate the non padding position of the
sequences. <float32>[batch_size, seq_length].
deterministic: Disables dropout if set to True.
Returns:
An array of logits <float32>[batch_size, hidden_size].
"""
encoding_size = encoded_inputs.shape[-1]
batch_size = encoded_inputs.shape[0]
query = partitioning.param_with_axes(
'attention_query',
self.query_init,
(encoding_size,),
self.dtype,
axes=('embed',),
)
# [batch_size, 1 embedding_size]
query_3d = jnp.tile(query, (batch_size, 1, 1))
query_3d = activation_partitioning.with_sharding(
query_3d, self.activation_partitioning_dims
)
x = self.layer_norm_factory()(query_3d)
x = activation_partitioning.with_sharding(
x, self.activation_partitioning_dims
)
# Also see the `attention_layer` function defined in
# flaxformer/architectures/t5/t5_common_layers.
encoder_masks = dense_attention.make_attention_mask(
jnp.ones([batch_size, 1], dtype=input_masks.dtype), input_masks
)
y = t5_common_layers.attention_layer(
num_heads=self.num_heads,
head_dim=self.head_dim,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
)(x, encoded_inputs, encoder_masks, enable_dropout=not deterministic)
y = nn.Dropout(rate=self.dropout_rate, broadcast_dims=[])(
y, deterministic=deterministic
)
# [batch_size, 1, embedding_size]
y = activation_partitioning.with_sharding(
y, self.activation_partitioning_dims
)
return jnp.reshape(y, (batch_size, encoding_size))
class MultiLayerPooling(nn.Module):
"""Multi-layer transformer pooling.
Attributes:
layer_factory: A callable that returns an EncoderLayer.
layer_norm_factory: A callable that returns a layer norm.
num_layers: Number of layers to generate.
pooler_factory: Optional specialization of final pooling layer. If None,
embedding representation for the first token is used as sequence
representation.
dtype: DType to cast the embedded inputs.
shared_relative_position_bias_factory: A callable that returns a relative
position bias instance which will be shared for all encoder layers. Only
set this if using shared relative position biases.
"""
layer_factory: t5_architecture.MakeEncoderLayerFn
layer_norm_factory: Callable[[], nn.Module]
num_layers: int
pooler_factory: Optional[Callable[[], nn.Module]] = None
dtype: DType = jnp.float32
shared_relative_position_bias_factory: Optional[Callable[[], nn.Module]] = (
None
)
def setup(self):
self.relpos_bias = (
self.shared_relative_position_bias_factory() # pylint: disable=not-callable
if self.shared_relative_position_bias_factory is not None
else None
)
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias
)
self.layers = [lyrf() for _ in range(self.num_layers)]
self.encoder = common.TransparentLayerSequence(self.layers)
self.encoder_norm = self.layer_norm_factory()
if self.pooler_factory:
self.pooler = self.pooler_factory() # pylint: disable=not-callable
def __call__(
self,
encoded_inputs: Array,
input_masks: Array,
deterministic: bool = False,
):
encoder_mask = dense_attention.make_attention_mask(
input_masks, input_masks, dtype=self.dtype
)
logit_mask = jnp.expand_dims(input_masks, axis=-1)
encoded = self.encoder(
encoded_inputs,
encoder_mask=encoder_mask,
logit_mask=logit_mask,
enable_dropout=not deterministic,
)
encoded = self.encoder_norm(encoded)
if self.pooler_factory:
encodings = self.pooler(encoded, input_masks, deterministic=deterministic)
else:
# Fallback to use first token.
encodings = encoded[:, 0, :]
return encodings
class MeanPooling(nn.Module):
"""Mean pooling given a sequence of encodings."""
@nn.compact
def __call__(self, encoded_inputs: Array, input_masks: Array, **kwargs):
"""Apply mean pooling to the encoder output embeddings.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the encoder. <float32>[batch_size, seq_length,
hidden_size].
input_masks: The input masks that indicate the non padding position of the
sequences. <float32>[batch_size, seq_length].
**kwargs: Keyward based arguments, currently unused.
Returns:
An array of logits <float32>[batch_size, hidden_size].
"""
# Broadcast to the `hidden_size` dimension.
input_masks = jnp.expand_dims(input_masks, axis=-1)
embeddings_sum = jnp.sum(encoded_inputs * input_masks, axis=1)
masks_sum = jnp.maximum(input_masks.sum(axis=1), EPSILON)
return embeddings_sum / masks_sum
class MaxPooling(nn.Module):
"""Max pooling given a sequence of encodings."""
@nn.compact
def __call__(self, encoded_inputs: Array, input_masks: Array, **kwargs):
"""Apply max pooling to the encoder output embeddings.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the encoder. <float32>[batch_size, seq_length,
hidden_size].
input_masks: The input masks that indicate the non padding position of the
sequences. <float32>[batch_size, seq_length].
**kwargs: Keyward based arguments, currently unused.
Returns:
An array of logits <float32>[batch_size, hidden_size].
"""
# Broadcast to the `hidden_size` dimension.
input_masks = jnp.expand_dims(input_masks, axis=-1)
encodings = encoded_inputs * input_masks + (1 - input_masks) * -1e9
encodings = jnp.max(encodings, 1)
return encodings
class LastTokenPooling(nn.Module):
"""Outputs the encodings from the last (non-padding) tokens from each sequence."""
@nn.compact
def __call__(self, encoded_inputs: Array, input_masks: Array, **kwargs):
"""Apply last token pooling to the encoder output embeddings.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the encoder. <float32>[batch_size, seq_length,
hidden_size].
input_masks: The input masks that indicate the non padding position of the
sequences. <float32>[batch_size, seq_length].
**kwargs: Keyward based arguments, currently unused.
Returns:
An array of logits <float32>[batch_size, hidden_size].
"""
# Compute the length of each sequence by counting the indicator tokens
lengths = jnp.sum(input_masks, axis=1, dtype=jnp.int32)
# Find the position of the last token in each sequence
last_idx = jnp.asarray(jnp.maximum(lengths - 1, 0), dtype=jnp.int32)
# Get the embeddings from the last token
encodings = batch_gather(encoded_inputs, last_idx)
return encodings
| 12,448 | 33.969101 | 105 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/single_tower_logit_functions.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logit functions for dual encoder models.
We define a variety of logit functions for dual encoder models. These
functions are similar to the similarity_functions, but are in this file because
they do not necessarily create logits for "similarity between 2 towers". Eg:
Creation of logits by applying fully connected layer on a single tower.
Pointwise logit_creation functions are computed just a single encoding and
return a vector of the same length as the batch size
LC[i] = logit_creation(encodings1[i]).
"""
from typing import Optional
from flaxformer.architectures.dual_encoder import similarity_functions
from flaxformer.types import Array
# TODO: Factor out the FFNN bits into a new class and use that
# from `PointwiseFFNN` and `SingleTowerPointwiseFFNN`
class SingleTowerPointwiseFFNN(similarity_functions.PointwiseFFNN):
"""Single Tower Pointwise feed-forward NN logit creation function.
Attributes:
"""
tower_name: str = 'left'
def __call__(self,
encodings1: Optional[Array] = None,
encodings2: Optional[Array] = None,
*,
enable_dropout: bool = True) -> Array:
"""Apply fully connected layer on either tower to change dim to num_classes.
Use this class if instead of using a similarity function between the logits
of 2 towers, you want to apply a Fully Connected NN on top of an individual
tower to create logits. Eg: Classification task on an individual tower.
Args:
encodings1: A 2-D tensor of (left) encodings with shape [batch size,
encoding dim].
encodings2: A 2-D tensor of (right) encodings with shape [batch size,
encoding dim].
enable_dropout: Whether to enable dropout layers.
Returns:
A 1-D tensor of logits with shape [B,num_classes].
"""
if self.tower_name == 'left':
return super().__call__(encodings1, None, enable_dropout=enable_dropout)
else:
return super().__call__(encodings2, None, enable_dropout=enable_dropout)
| 2,595 | 37.176471 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/dual_encoder/l2_norm.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""L2 norm, which omits subtraction of mean or bias."""
from flax import linen as nn
from jax import lax
from jax import numpy as jnp
from flaxformer.types import Array
from flaxformer.types import DType
class L2Norm(nn.Module):
"""L2 normalization.
Operates on the last axis of the input data.
Attributes:
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
"""
epsilon: float = 1e-6
dtype: DType = jnp.float32
@nn.compact
def __call__(self, x: Array) -> Array:
"""Applies l2 normalization on the input.
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
sum2 = jnp.sum(lax.square(x), axis=-1, keepdims=True)
y = jnp.asarray(x * lax.rsqrt(sum2 + self.epsilon), self.dtype)
return y
| 1,468 | 27.803922 | 74 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/hierarchical_relative_position_bias_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for hierarchical_relative_position_bias.py."""
from absl.testing import absltest
from absl.testing import parameterized
from jax import random
from flaxformer import testing_utils
from flaxformer.architectures.h_transformer import hierarchical_relative_position_bias as h_rpb
class RpbTest(parameterized.TestCase):
"""Test cases for HierarchicalRelativePositionBias."""
def setUp(self):
super().setUp()
self.num_head = 2
self.num_cluster = 4
@parameterized.named_parameters(
('left_block', -1),
('right_block', 1),
('mid_block', 0),
)
def test_rpb_1d(self, block_coord: int):
rpb_1d_module = h_rpb.OneDimHierarchicalRelativePositionBias(
num_cluster=self.num_cluster,
num_head=self.num_head,
)
rng = random.PRNGKey(0)
result, variables = rpb_1d_module.init_with_output(rng, block_coord)
expected_shape = (1, 1, self.num_cluster, self.num_cluster, self.num_head)
self.assertEqual(result.shape, expected_shape)
expected_positions = f'relpos_buckets={self.num_cluster*4-1}'
expected_heads = f'heads={self.num_head}'
expected_1d_rpb = {
'1d_relative_position_bias': [
'float32', expected_positions, expected_heads
]
}
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(variables['params'],
variables['params_axes']),
expected_1d_rpb)
if __name__ == '__main__':
absltest.main()
| 2,070 | 31.873016 | 95 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/token_hierarchy.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Token Hierarchy classes for the h-attention algorithm."""
import abc
import collections
import dataclasses
import enum
from typing import Dict, List, Optional, OrderedDict
from absl import logging
import gin
from jax import lax
import jax.numpy as jnp
import numpy as np
from flaxformer.types import Array
from flaxformer.types import DType
@enum.unique
class InputArrayName(enum.Enum):
"""Input array names."""
QUERY = enum.auto()
KEY = enum.auto()
VALUE = enum.auto()
@enum.unique
class TokenBlockName(enum.Enum):
"""Token block names.
At each level in the token hierarchy, the tokens are partitioned
into equal-sized blocks. The attention among blocks corresponds
to the attention matrix structure.
"""
ANCHOR = enum.auto()
# For one-dimension token sequences
LEFT = enum.auto()
RIGHT = enum.auto()
@dataclasses.dataclass()
class HierarchicalCoarsenResults:
packed_coarse_qkv: Dict[TokenBlockName, Array]
packed_aggregated_key_padding_mask: Optional[Dict[TokenBlockName, Array]]
@dataclasses.dataclass()
class CoarsenPaddingMaskResults:
"""The results of coarsening the padding mask.
Attributes:
aggregated_padding_mask: The aggregated padding mask where each entry at
level=k is the sum of a sub set of mask tokens at level=k-1.
denominator: Each entry at level=k records the number of effective mask
tokens at level=k-1 that contributes to the corresponding mask token in
aggregated_padding_mask. For instance, if a sub set of mask tokens at
level=k-1 is [1, 1, 0, 0], then the corresponding entry in denominator at
level=k should be 2.
"""
aggregated_padding_mask: Dict[int, Array]
denominator: Optional[Dict[int, Array]]
@gin.constants_from_enum
class TokenCoarseningMethod(str, enum.Enum):
"""Names of the coarsening method."""
SAMPLE = 'sample'
SUM = 'sum'
CONST_AVERAGE = 'const_average'
@gin.constants_from_enum
class ConvKernelType(str, enum.Enum):
"""Names of the convolution kernel type."""
CONST = 'const'
LINEAR = 'linear'
class OneDimTokenCoarsening:
"""Coarsening class for one-dimension sequence token hierarchy."""
def __init__(
self,
method: TokenCoarseningMethod = TokenCoarseningMethod.SUM,
coarsening_ratio: int = 2,
):
"""Initialize coarsening.
Args:
method: Coarsening method name.
coarsening_ratio: The ratio of the token count at two adjacent levels. For
instance, 2 means token count is reduced by a factor of 2 from level-k
to level-(k+1) due to coarsening.
"""
self._coarsening_ratio = coarsening_ratio
self._method = method
def __call__(self, inputs: Array) -> Array:
"""Coarsens or downscales sequence length by coarsening_ratio.
Args:
inputs: Input sequences, <float>[batch, seq_len, num_head_ head_dim].
Returns:
Coarsened sequences, <float>[batch, seq_len//coarsening_ratio,
num_head_ head_dim].
Raises:
ValueError: This is triggered if sequence length does not divide
coarsening_ratio.
"""
(batch, seq_len, num_head, head_dim) = inputs.shape
new_seq_len = seq_len // self._coarsening_ratio
if new_seq_len * self._coarsening_ratio != seq_len:
raise ValueError(
f'The sequence length {seq_len} does not divide coarsening_ratio '
f'{self._coarsening_ratio}.'
)
new_shape = (batch, new_seq_len, self._coarsening_ratio, num_head, head_dim)
reshaped_inputs = inputs.reshape(new_shape)
if self._method == TokenCoarseningMethod.SAMPLE:
# For auto-regressive decoder, only sample the first position of
# each block. This avoids leakage from future tokens.
x = reshaped_inputs[:, :, 0, :, :]
else:
x = reshaped_inputs.sum(axis=2, keepdims=False)
if self._method == TokenCoarseningMethod.CONST_AVERAGE:
x /= self._coarsening_ratio
return x
class OneDimTokenInterpolation:
"""Interpolation class for one-dimension sequence token hierarchy."""
def __init__(
self,
conv_kernel_size: int = 2,
conv_kernel_type: ConvKernelType = ConvKernelType.CONST,
channel_dim: int = 1,
interpolation_ratio: int = 2,
dtype: DType = jnp.float32,
use_edge_correction: bool = True,
):
"""Generates a static conv kernel for interpolation.
Args:
conv_kernel_size: Convolution kernel size used for interpolation.
conv_kernel_type: Convolution kernel type for the interpolation.
channel_dim: Size of Channel dimension.
interpolation_ratio: The ratio of the token count at two adjacent levels.
For instance, 2 means sequence length is increased by a factor of 2 from
level-k to level-(k-1) due to interpolation.
dtype: The dtype of the computation.
use_edge_correction: Indicates if a correction is applied to the edge
output entries.
Raises:
ValueError: This is triggered if method is not in the pre-determined set
or coarsening_ratio is larger than conv_kernel_size.
"""
if interpolation_ratio > conv_kernel_size:
raise ValueError(
f'interpolation_ratio {interpolation_ratio} is larger than '
f'conv_kernel_size {conv_kernel_size}. This means some tokens '
'will not be included in the interpolation and the final output will '
'be wrong.'
)
self._interpolation_ratio = interpolation_ratio
self._use_edge_correction = use_edge_correction
self._conv_kernel_type = conv_kernel_type
if conv_kernel_type == ConvKernelType.CONST:
kernel = np.ones((conv_kernel_size,))
elif conv_kernel_type == ConvKernelType.LINEAR:
kernel = np.array([0.5, 1.0, 0.5])
else:
raise ValueError(f'Unsupported conv_kernel_type {conv_kernel_type}.')
kernel = jnp.array(kernel[:, None, None], dtype=dtype)
self._conv_kernel = (
jnp.repeat(kernel, channel_dim, axis=2) if channel_dim > 1 else kernel
)
def __call__(self, inputs: Array) -> Array:
"""Interpolates or upscales sequence length by 2x.
Args:
inputs: Input sequences with shape <float>[batch, seq_len, channel_dim].
Returns:
Interpolated embeddings with shape <float>[batch, 2*seq_len, channel_dim].
"""
dn = lax.conv_dimension_numbers(
inputs.shape, self._conv_kernel.shape, ('NHC', 'HIO', 'NHC')
)
kernel_size = self._conv_kernel.shape[0]
padding = ((kernel_size - 1, kernel_size - 1),)
lhs_dilation = (self._interpolation_ratio,)
y = lax.conv_general_dilated(
inputs.astype(jnp.float32),
self._conv_kernel.astype(jnp.float32),
(1,),
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=(1,),
dimension_numbers=dn,
feature_group_count=inputs.shape[-1],
)
if self._conv_kernel_type == ConvKernelType.LINEAR:
# Linear conv_kernel can potentially improve accuracy. But it has two
# issues:
# 1) It expands the output sequence length by one at both left and right
# end. This makes the output seq_len larger by one than necessary. So
# we need to truncate the first token in the output.
# 2) The original sequence is zero padded before the conv kernel is
# applied. This makes the edge entries on both ends in the output less
# accurate than those obtained with the constant conv_kernel. We can
# compensate this artifact by simply multiplying the last entry by 2.
# See the unit test for this effect.
output_sequence_length = inputs.shape[1] * 2
y = y[:, -output_sequence_length:]
if self._use_edge_correction:
correction = np.ones((output_sequence_length,))
correction[-1] = 2
correction = jnp.array(correction[None, :, None])
y *= correction
return y
class TokenHierarchy(metaclass=abc.ABCMeta):
"""Base class for the Token Hierarchy."""
def __init__(
self,
seq_len: int,
num_cluster: int = 2,
conv_kernel_size: int = 2,
interpolation_kernel_type: ConvKernelType = ConvKernelType.LINEAR,
for_self_attention: bool = True,
causal_mask: bool = False,
token_ratio: int = 2,
dtype: DType = jnp.float32,
):
"""Initializes class attributes.
Args:
seq_len: Sequence length.
num_cluster: Number of clusters at each level in the hierarchy.
conv_kernel_size: Size of convolution kernels.
interpolation_kernel_type: Type of interpolation convolution kernels.
for_self_attention: This indicates if this is for the self attention.
causal_mask: This specifies whether to apply a causal mask on the
attention weights. If True, the output at timestep `t` will not depend
on inputs at timesteps strictly greater than `t`.
token_ratio: The ratio of the token count at two adjacent levels. For
instance, 2 means token count is reduced by a factor of 2 from level-k
to level-(k+1) due to coarsening.
dtype: The dtype of the computation.
Raises:
ValueError: This is triggered if num_cluster is not an even number or
seq_len and num_cluster are incompatible.
"""
self._conv_kernel_size = conv_kernel_size
self._interpolation_kernel_type = interpolation_kernel_type
self._for_self_attention = for_self_attention
self._causal_mask = causal_mask
self._token_ratio = token_ratio
self._dtype = dtype
if num_cluster % 2 != 0:
raise ValueError('num_cluster must be an even number.')
max_num_cluster = seq_len // 2
if max_num_cluster <= 1:
raise ValueError(
'max_num_cluster must be larger than 1; instead got '
f'max_num_cluster={max_num_cluster}. This is caused '
'by a small input length = 2.'
)
self._num_cluster = min(num_cluster, max_num_cluster)
if self._num_cluster != num_cluster:
logging.info(
'num_cluster is reset from %d to %d because max_num_cluster = %d',
num_cluster,
self._num_cluster,
max_num_cluster,
)
self._num_block_leaf_level = seq_len // self._num_cluster
if self._num_block_leaf_level * self._num_cluster != seq_len:
raise ValueError(
'seq_len must be divisible by num_cluster; instead got '
f'(seq_len, num_cluster)={seq_len, self._num_cluster}.'
)
if self._num_block_leaf_level < 2:
raise ValueError(
'num_block=seq_len/num_cluster>=2 is required; '
f'instead got num_block={self._num_block_leaf_level}'
)
self._num_level = int(np.log2(self._num_block_leaf_level))
if np.exp2(self._num_level) != self._num_block_leaf_level:
raise ValueError(
'num_block=seq_len/num_cluster must be power of 2;'
f' instead got num_block={self._num_block_leaf_level}'
)
self._num_block = [self._num_block_leaf_level] * self._num_level
for level in range(1, self._num_level):
self._num_block[level] = self._num_block[level - 1] // 2
self._setup_block_hierarchy()
logging.info('seq_len = %d', seq_len)
logging.info('num_cluster = %d', self._num_cluster)
logging.info('num_level = %d', self._num_level)
logging.info('num_block = %s', self._num_block)
logging.info('total_num_block = %d', self._total_num_block)
logging.info('num_fine_block = %d', self._num_fine_block)
logging.info('num_coarse_block = %d', self._num_coarse_block)
def _setup_block_hierarchy(self):
self._num_fine_block = 0
self._num_coarse_block = 0
self._total_num_block = 0
self._level_end_coarse_block_idx = []
self._block_coord = OrderedDict()
self._causal_block_names = []
self._neighbor_block_names = []
@abc.abstractmethod
def hierarchical_coarsen(
self,
inputs: Array,
input_array_name: InputArrayName = InputArrayName.QUERY,
padding_mask: Optional[Array] = None,
) -> HierarchicalCoarsenResults:
"""Hierarchically coarsens inputs level by level.
Args:
inputs: Input Query/Key/Value.
input_array_name: The name of the inputs.
padding_mask: Query/Key padding mask.
Returns:
Packed coarse Query/Key/Value and optional packed coarse padding mask.
"""
@abc.abstractmethod
def interpolate_cumulative_sum(self, coarse_y: Array) -> Array:
"""Interpolates and cumulatively sums over all levels.
This function performs two tasks: 1) Interpolates from coarse grid to fine
grid level-by-level, starting from the coarsest level. 2) Cumulatively
sums the interpolated results at each level.
Args:
coarse_y: Packed and coarsened embeddings.
Returns:
Interpolated and cumulatively summed embeddings over all levels.
"""
@abc.abstractmethod
def recover_input_shape(self, packed_coarse_qkv: Array, level: int) -> Array:
"""Recovers from blockwise partitioned shape to the input shape.
Args:
packed_coarse_qkv: Packed coarse qkv at the specified level.
level: The hierarchy level where the packed_coarse_y sits.
Returns:
Reshaped coarse_qkv.
"""
@property
def block_coord(self) -> OrderedDict[TokenBlockName, int]:
return self._block_coord
@property
def block_names(self) -> List[TokenBlockName]:
if self._causal_mask:
return self._causal_block_names
else:
return list(self._block_coord.keys())
@property
def neighbor_block_names(self) -> List[TokenBlockName]:
return self._neighbor_block_names
@property
def num_cluster(self) -> int:
return self._num_cluster
@property
def num_block_cluster(self) -> int:
return self._num_cluster
@property
def num_level(self) -> int:
return self._num_level
@property
def num_block(self) -> List[int]:
return self._num_block
@property
def num_fine_block(self) -> int:
return self._num_fine_block
@property
def num_coarse_block(self) -> int:
return self._num_coarse_block
@property
def total_num_block(self) -> int:
return self._total_num_block
@property
def level_end_coarse_block_idx(self) -> List[int]:
return self._level_end_coarse_block_idx
@property
def growth_factor(self) -> float:
return self._token_ratio
def gen_packed_zero_block_mask(
self,
batch_size: int,
use_growth_factor: bool = False,
trailing_ndim: int = 2,
) -> Dict[TokenBlockName, Array]:
"""Generates blockwise zero mask pattern in packed form.
Args:
batch_size: The batch size for training data.
use_growth_factor: This indicates if the block entries at each level is
enlarged by a factor exponential to the hierarchy level.
trailing_ndim: Number of dimensions after the block dim. See notes below.
Returns:
Packed zero block mask: Dict with key=block_name, and value array has
shape <float>[batch, packed_dim, num_cluster=1, num_head=1]
or [batch, packed_dim, num_cluster=1, num_head=1, head_dim=1]
or [batch, packed_dim, num_cluster=1, num_cluster=1, head_dim=1].
The shapes are simply [batch, packed_dim, 1, 1] or
[batch, packed_dim, 1, 1, 1].
Note:
This mask is used to zero out blocks in three types of arrays:
1) The coarse token blocks with shape [batch, num_block, num_cluster,
num_head, head_dim];
2) The relative position bias with the shape [batch, num_block,
num_cluster, num_cluster, num_head];
3) The attention matrix row sum as softmax partition with the shape
[batch, num_block, num_cluster, num_head].
The mask shape should be compatible with them.
The final shapes for the three types are different only in the number of
trailing dimensions, 2 vs. 3. Hence we just need a flag to differentiate
them.
"""
growth_factor = self.growth_factor if use_growth_factor else 1
packed_zero_block_mask = {}
for block_name in self.block_names:
if block_name == TokenBlockName.ANCHOR:
continue
block_mask_list = []
scalar = 1.0
for level in range(self.num_level):
block_mask = self._gen_zero_block_mask(level, block_name, batch_size)
if scalar > 1.0:
block_mask *= scalar
block_mask_list.append(block_mask)
scalar *= growth_factor
packed_block_mask = jnp.concatenate(block_mask_list, axis=1)
if trailing_ndim == 2:
packed_block_mask = packed_block_mask[:, :, None, None]
else:
packed_block_mask = packed_block_mask[:, :, None, None, None]
packed_zero_block_mask[block_name] = packed_block_mask
return packed_zero_block_mask
@abc.abstractmethod
def _gen_zero_block_mask(
self,
level: int,
block_name: TokenBlockName,
batch_size: int,
) -> Array:
"""Generates a block mask.
Args:
level: The specified level in the hierarchy.
block_name: The generated mask is for the token/attention block with this
name.
batch_size: The batch size for training data.
Returns:
Generated zero block mask with shape <float32>[batch_size, num_block]
"""
def _shift_blocks_1d(input_array: Array, block_name: TokenBlockName) -> Array:
"""Shifts array blocks along axis=1.
Args:
input_array: Input array with shape <float>[batch, num_block, ...].
block_name: The name of the token block.
Returns:
Shifted array with the same shape as that of input_array.
"""
pad_shape = list(input_array.shape)
pad_shape[1] = 1
zero_pad = jnp.zeros(tuple(pad_shape), dtype=jnp.float32)
if block_name == TokenBlockName.LEFT:
# Shift top blocks downward and fill the vacancy with zero blocks.
# This pushes out the bottom block.
result = jnp.concatenate((zero_pad, input_array[:, :-1]), axis=1)
elif block_name == TokenBlockName.RIGHT:
# Shift bottom blocks upward and fill the vacancy with zero blocks.
# This pushes out the top block.
result = jnp.concatenate((input_array[:, 1:], zero_pad), axis=1)
else:
result = input_array
return result
class OneDimTokenHierarchy(TokenHierarchy):
"""Token hierarchy for one-dimensional sequences.
See arxiv.org/abs/2107.11906 for details on token hierarchy
for one-dimensional sequences.
"""
def _setup_block_hierarchy(self):
"""Sets up block hierarchy."""
# These are for bookkeeping.
self._num_fine_block = self._num_block[0]
self._num_coarse_block = sum(self._num_block[1:])
self._total_num_block = sum(self._num_block)
# This is used to access h-attention blocks.
self._block_coord = collections.OrderedDict({
TokenBlockName.ANCHOR: 0,
TokenBlockName.LEFT: -1,
TokenBlockName.RIGHT: 1,
})
self._causal_block_names = [TokenBlockName.ANCHOR, TokenBlockName.LEFT]
self._neighbor_block_names = list(self.block_names)
self._neighbor_block_names.remove(TokenBlockName.ANCHOR)
# This is used to unpack multilevel blocks.
self._level_end_coarse_block_idx = [0] * self._num_level
for level in range(1, self._num_level):
self._level_end_coarse_block_idx[level] = (
self._level_end_coarse_block_idx[level - 1] + self._num_block[level]
)
def hierarchical_coarsen(
self,
inputs: Array,
input_array_name: InputArrayName = InputArrayName.QUERY,
padding_mask: Optional[Array] = None,
) -> HierarchicalCoarsenResults:
"""Hierarchically coarsens inputs level by level.
Args:
inputs: Query/Key/Value, <float>[batch, seq_len, num_head, head_dim].
input_array_name: The name of the inputs.
padding_mask: padding mask, <float>[batch, seq_len, 1].
Returns:
packed_coarse_qkv: Packed and coarsened Query/Key/Value.
key: TokenBlockName.ANCHOR
value: <float>[batch, num_block[0], num_cluster, num_head, head_dim].
key: TokenBlockName.LEFT
value: <float>[batch, packed_dim, num_cluster, num_head, head_dim].
key: TokenBlockName.RIGHT
value: <float>[batch, packed_dim, num_cluster, num_head, head_dim].
Packed aggregated Key padding mask.
It has the same key-value pair as packed_coarse_qkv
Raises:
ValueError: This is triggered when inputs_q, query_padding_mask or
key_padding_mask has the wrong rank.
"""
if inputs.ndim != 4:
raise ValueError(f'inputs rank={inputs.ndim}, it must be 4.')
if padding_mask is not None:
if padding_mask.ndim != 3:
raise ValueError(
f'padding_mask rank = {padding_mask.ndim}, it must be 3.'
)
padding_mask = padding_mask[..., None]
decoder_only = self._for_self_attention and self._causal_mask
encoder_only = self._for_self_attention and not self._causal_mask
cross_attention = not self._for_self_attention
aggregated_padding_mask = None
if input_array_name == InputArrayName.QUERY:
if encoder_only or cross_attention:
# In both cases, q_denominator is needed for coarsening query.
coarse_mask_results = self._coarsen_padding_mask(
padding_mask, need_aggregation=False
)
q_denominator = coarse_mask_results.denominator
else:
q_denominator = None
# For auto-regressive decoder, only sample the first position of each
# coarsening cluster. This avoids the leakage from future positions.
coarse_qkv = self._coarsen_query_or_key(
inputs, denominator=q_denominator, use_sample=decoder_only
)
elif input_array_name == InputArrayName.KEY:
coarse_mask_results = self._coarsen_padding_mask(
padding_mask, need_aggregation=True
)
aggregated_padding_mask = coarse_mask_results.aggregated_padding_mask
coarse_qkv = self._coarsen_query_or_key(
inputs, denominator=coarse_mask_results.denominator, use_sample=False
)
else:
coarse_qkv = self._coarsen_value(inputs)
coarse_qkv = self._partition_sequences(coarse_qkv)
packed_coarse_qkv = self._pack_coarse_qkv(
coarse_qkv, input_name=input_array_name
)
if aggregated_padding_mask is not None:
aggregated_padding_mask = self._partition_sequences(
aggregated_padding_mask
)
packed_aggregated_padding_mask = self._pack_coarse_qkv(
aggregated_padding_mask, input_name=InputArrayName.KEY
)
else:
packed_aggregated_padding_mask = None
return HierarchicalCoarsenResults(
packed_coarse_qkv=packed_coarse_qkv,
packed_aggregated_key_padding_mask=packed_aggregated_padding_mask,
)
def recover_input_shape(self, packed_coarse_qkv: Array, level: int) -> Array:
"""Recovers from blockwise partitioned shape to the input sequence shape.
Args:
packed_coarse_qkv: Packed coarse qkv with shape <float>[batch, num_block,
num_cluster, features] or <float>[batch, num_block, num_cluster,
num_head, head_dim].
level: The hierarchy level where the packed_coarse_qkv sits.
Returns:
Reshaped coarse_qkv with shape <float>[batch, seq_len, features], where
seq_len = num_block * num_cluster,
features = num_head * head_dim
"""
if packed_coarse_qkv.ndim == 4:
(batch, _, num_cluster, features) = packed_coarse_qkv.shape
channel_dim = features
else:
(batch, _, num_cluster, num_head, head_dim) = packed_coarse_qkv.shape
channel_dim = num_head * head_dim
num_block = self.num_block[level]
new_shape = tuple((batch, num_block * num_cluster, channel_dim))
return packed_coarse_qkv.reshape(new_shape)
def interpolate_cumulative_sum(self, coarse_y: Array) -> Array:
"""Interpolates and cumulatively sums over all levels.
Args:
coarse_y: Packed and coarsened embeddings with shape <float>[batch,
num_coarse_block, num_cluster, features] or <float>[batch,
num_coarse_block, num_cluster, num_head, head_dim].
Returns:
Interpolated and cumulatively summed embeddings over all levels.
Its shape is <float>[batch, seq_len, features], where
seq_len = num_block[0] * num_cluster,
features = num_head * head_dim
"""
if coarse_y.ndim == 4:
channel_dim = coarse_y.shape[-1]
else:
# Flatten the last two dims since conv_general() inside interpolation
# only allows one channel_dim.
(num_head, head_dim) = coarse_y.shape[-2:]
channel_dim = num_head * head_dim
interpolation_fn = OneDimTokenInterpolation(
conv_kernel_size=self._conv_kernel_size,
conv_kernel_type=self._interpolation_kernel_type,
channel_dim=channel_dim,
interpolation_ratio=self._token_ratio,
)
cumulative_sum = 0.0 # Default value in case num_level==1.
for level in range(self.num_level - 1, 0, -1):
level_start = self.level_end_coarse_block_idx[level - 1]
level_end = self.level_end_coarse_block_idx[level]
current_level_coarse_y = self.recover_input_shape(
coarse_y[:, level_start:level_end], level
)
if level == self.num_level - 1:
# This starts the cumsum. So only assignment is done.
cumulative_sum = current_level_coarse_y
else:
cumulative_sum += current_level_coarse_y
cumulative_sum = interpolation_fn(cumulative_sum)
return cumulative_sum # pytype: disable=bad-return-type # jax-ndarray
def _coarsen_padding_mask(
self, padding_mask: Optional[Array], need_aggregation: bool = False
) -> CoarsenPaddingMaskResults:
"""Coarsens padding mask.
Args:
padding_mask: Query or Key/Value padding mask, <float>[batch, seq_len, 1].
need_aggregation: This indicates if aggregated padding mask is needed.
Returns:
aggregated_padding_mask: Aggregated padding mask.
key: level
value: <float>[batch, seq_len[level], 1]
denominator: The denominator to be used for normalization.
key: level
value: <float>[batch, seq_len[level], 1]
"""
if padding_mask is None:
return CoarsenPaddingMaskResults( # pytype: disable=wrong-arg-types # jax-ndarray
aggregated_padding_mask=padding_mask, denominator=None
)
coarsening_fn = OneDimTokenCoarsening(
method=TokenCoarseningMethod.SUM, coarsening_ratio=self._token_ratio
)
coarse_padding_mask = {0: padding_mask}
if need_aggregation:
aggregated_padding_mask = {0: coarse_padding_mask[0]}
denominator = {}
for level in range(1, self.num_level):
coarse_padding_mask[level] = coarsening_fn(coarse_padding_mask[level - 1])
# Sets zero entries to ones to avoid divide-by-zero later.
# Note: No need for denominator[0] since it will not be used.
denominator[level] = lax.select(
coarse_padding_mask[level] > 0,
coarse_padding_mask[level].astype(self._dtype),
jnp.ones(coarse_padding_mask[level].shape, dtype=self._dtype),
)
if need_aggregation:
if level == 1:
aggregated_padding_mask[level] = coarse_padding_mask[level]
else:
aggregated_padding_mask[level] = coarsening_fn(
aggregated_padding_mask[level - 1]
)
# Sets entries to 1/0 so that coarse_padding_mask at each level is still
# a binary mask. This is important to get the correct denominator.
# Note: No need to treat level-0 padding since it has not been aggregated.
coarse_padding_mask[level] = lax.select(
coarse_padding_mask[level] > 0,
jnp.ones(coarse_padding_mask[level].shape, dtype=self._dtype),
jnp.zeros(coarse_padding_mask[level].shape, dtype=self._dtype),
)
if not need_aggregation:
aggregated_padding_mask = None
return CoarsenPaddingMaskResults(
aggregated_padding_mask=aggregated_padding_mask, denominator=denominator
)
def _coarsen_query_or_key(
self,
inputs_qk: Array,
denominator: Optional[Array],
use_sample: bool = False,
) -> Dict[int, Array]:
"""Coarsens Query or Key.
Args:
inputs_qk: Query or Key, <float32>[batch, seq_len, num_head, head_dim].
denominator: <float32>[batch, seq_len, 1, 1].
use_sample: bool, indicating if sampling is used to coarsen.
Returns:
coarse_qk: Coarsened Query or Key.
key: level
value: <float>[batch, seq_len[level], num_head, head_dim]
"""
if use_sample:
method = TokenCoarseningMethod.SAMPLE
else:
if denominator is None:
method = TokenCoarseningMethod.CONST_AVERAGE
else:
method = TokenCoarseningMethod.SUM
coarsening_fn = OneDimTokenCoarsening(
method=method, coarsening_ratio=self._token_ratio
)
coarse_qk = {0: inputs_qk}
for level in range(1, self.num_level):
coarse_qk[level] = coarsening_fn(coarse_qk[level - 1])
if not use_sample and denominator is not None:
coarse_qk[level] /= denominator[level]
return coarse_qk
def _coarsen_value(self, inputs: Array) -> Dict[int, Array]:
"""Coarsens Value.
Args:
inputs: Value, <float32>[batch, seq_len, num_head, head_dim].
Returns:
coarse_v: Coarsened Value.
key: level
value: <float>[batch, seq_len[level], num_head, head_dim]
"""
coarsening_fn = OneDimTokenCoarsening(
method=TokenCoarseningMethod.SUM, coarsening_ratio=self._token_ratio
)
coarse_v = {0: inputs}
for level in range(1, self.num_level):
coarse_v[level] = coarsening_fn(coarse_v[level - 1])
return coarse_v
def _pack_coarse_qkv(
self, coarse_qkv: Dict[int, Array], input_name: InputArrayName
) -> Dict[TokenBlockName, Array]:
"""Packs coarse Query/Key/Value.
Args:
coarse_qkv: Coarse Query/Key/Value at multiple levels in a dict, where key
is a specific level, value is an array with shape <float>[batch,
seq_len[level], feature_dim]
input_name: Indicates if coarse_qkv is Query, Key or Value.
Returns:
packed_coarse_qkv: Packed coarse Query/Key/Value in a dict
key: TokenBlockName.ANCHOR,
value: Original Query/Key/Value at level=0. This is for computing
or being multiplied by attention[TokenBlockName.ANCHOR]. Its shape
is <float>[batch, num_block[0], num_cluster, feature_dim].
key: TokenBlockName.LEFT,
value: Coarsened Query/Key/Value at all levels. This is for
computing or being multiplied by attention[TokenBlockName.LEFT].
Its shape is <float>[batch, packed_dim, num_cluster, feature_dim].
If causal_mask==False, there is one more (key, value) pair.
key: TokenBlockName.RIGHT,
value: Coarsened Query/Key/Value at all levels. This is for
computing or being multiplied by attention[TokenBlockName.RIGHT].
Its shape is <float>[batch, packed_dim, num_cluster, feature_dim].
"""
to_replace = input_name == InputArrayName.QUERY
if to_replace:
batch_size = coarse_qkv[0].shape[0]
packed_zero_block_mask = self.gen_packed_zero_block_mask(
batch_size=batch_size, use_growth_factor=False, trailing_ndim=3
)
packed_coarse_qkv = {}
for block_name in self.block_names:
if block_name == TokenBlockName.ANCHOR:
packed_coarse_qkv[TokenBlockName.ANCHOR] = coarse_qkv[0]
else:
packed_list = []
for level in range(self.num_level):
if to_replace:
packed_list.append(coarse_qkv[level])
else:
packed_list.append(_shift_blocks_1d(coarse_qkv[level], block_name))
packed_coarse_qkv[block_name] = jnp.concatenate(packed_list, axis=1)
if to_replace:
packed_coarse_qkv[block_name] *= packed_zero_block_mask[block_name]
return packed_coarse_qkv
def _partition_sequences(
self, coarse_qkv: Dict[int, Array]
) -> Dict[int, Array]:
"""Partitions sequences at each level of the hierarchy into blockwise shape.
Args:
coarse_qkv: Coarse Query/Key/Value, where key=level, value=array with
shape <float>[batch, seq_len[level], num_head, head_dim].
Returns:
Partitioned Coarse Query/Key/Value.
"""
(batch, _, num_head, head_dim) = coarse_qkv[0].shape
reshaped_coarse_qkv = {}
for level in range(self.num_level):
new_shape = (
batch,
self.num_block[level],
self.num_cluster,
num_head,
head_dim,
)
reshaped_coarse_qkv[level] = coarse_qkv[level].reshape(new_shape)
return reshaped_coarse_qkv
def _gen_zero_block_mask(
self,
level: int,
block_name: TokenBlockName,
batch_size: int,
) -> Array:
"""Generates a block mask.
Args:
level: The specified level in the hierarchy.
block_name: The generated mask is for the token/attention block with this
name.
batch_size: The batch size for training data.
Returns:
Generated zero block mask with shape <float32>[batch_size, num_block]
"""
num_block = self.num_block[level]
block_coord = self.block_coord[block_name]
block_mask = np.ones((batch_size, num_block), dtype=self._dtype)
if block_coord == -1:
block_mask[:, 0] = 0.0
elif block_coord == 1:
block_mask[:, -1] = 0.0
return jnp.array(block_mask)
| 34,032 | 34.525052 | 89 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/h_attention.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Hierarchical attention classes."""
import abc
import enum
import functools
from typing import Any, Callable, Dict, Optional, Union, Tuple
from absl import logging
from flax import linen as nn
import gin
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.h_transformer import hierarchical_relative_position_bias as h_rpb
from flaxformer.architectures.h_transformer import partitioning
from flaxformer.architectures.h_transformer import token_hierarchy as th
from flaxformer.components import dense
from flaxformer.types import Array
from flaxformer.types import Initializer
from flaxformer.types import PRNGKey
AXES = partitioning.AxisName
@gin.constants_from_enum
class MaxSimilarityMode(str, enum.Enum):
"""Names of the mode for finding max similarity."""
SAMPLE_ANCHOR = 'sample_anchor'
SCAN_ANCHOR = 'scan_anchor'
SCAN_ALL = 'scan_all'
class HierarchicalAttention(nn.Module, metaclass=abc.ABCMeta):
"""Hierarchical attention base class.
This computes hierarchical multi-head dot-product attention with linear
complexity in memory usage and runtime.
This class can be used for encoder-only or decoder-only by giving the same
inputs_kv and inputs_q in the call parameters. The attribute causal_mask is
to be used to separate these two cases.
It can also be used for encoder-decoder cross attention by giving different
inputs_kv and inputs_q in the call parameters. Note that the code assumes
that the Query and Key/Value have the same spatial size (after padding)
and they share the same token hierarchy. Hence this cross attention can
be applied to the tasks like machine translation, but not to the tasks
like summarization.
Attributes:
num_heads: Number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
num_clusters: Number of clusters at each level in the hierarchy. At level=0,
this is the diagonal block size in the attention matrix.
causal_mask: This specifies whether to apply a causal mask on the attention
weights. If True, the output at timestep `t` will not depend on inputs at
timesteps strictly greater than `t`.
dtype: The dtype of the computation.
qkv_features: Feature dimension of the key, query, and value.
out_features: Feature dimension of the output updated value.
broadcast_dropout: This indicates if a broadcasted dropout for attention
weights is used along the batch dimension.
dropout_rate: Dropout rate.
precision: Numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: Initializer for the kernel of the Dense layers.
bias_init: Initializer for the bias of the Dense layers.
use_bias: Whether the Dense layers use bias.
split_head_kernel: whether to store QKVO projection kernels with a split
head dimension. Default to False so the kernels are stored in 2D shape for
the compatibility with Adafactor optimizer.
rescale_logits: bool. Whether to explicitly rescale `query` logits by
1/sqrt(depth_kq). Default is to do this implicitly by folding the
rescaling into query_kernel_init.
sharding_over_head_dimension: Whether to shard over the head dimension.
Setting this to False when the number of heads is not divisible your
activation num_partitions.
use_rpb: Whether the hierarchical relative position bias is used. Default to
True because this setting delivers better results.
use_multihead_rpb: Whether the hierarchical relative position bias is
different among multihead. If False, the same relative position bias is
shared among all heads. Default to True so the bias array is stored in 2D
shape for the compatibility with Adafactor optimizer.
conv_kernel_size: Convolution kernel size used for interpolation. This is
not used during interpolation if the attribute
interpolation_kernel_type=ConvKernelType.LINEAR since the kernel size is
fixed at 3.
interpolation_kernel_type: Type of interpolation convolution kernels.
use_mxu: Indicates if MXU function einsum is used.
max_similarity_mode: Name of the mode to find max similarity.
max_similarity_factor: This is a buffer factor to amplify the max similariy
found for the anchor similarity block. We need a larger-than-one factor to
approximate the global maximum similarity. This factor can be adjusted
upward if we get a NAN runtime error which usually indicates the overflow
in attention=exp(similarity). But excessively large offset could lead to
underflow.
use_row_sum: Indicates if row sum is used to compute softmax partition.
multihead_projection: Indicates if the multihead projection is performed. In
unit tests, turning this off avoids randomness in projection.
output_projection: Project the output of `attention_fn` to `out_features`.
If False, returns the output of `attention_fn` without a projection.
softmax_temperature: Temperature parameter in softmax. Default=1.0. A larger
temperature smooths the output distribution of the softmax.
"""
num_heads: int = 8
num_clusters: int = 2
causal_mask: bool = False
dtype: jnp.dtype = jnp.float32
qkv_features: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.1
precision: Optional[jax.lax.Precision] = None
kernel_init: Initializer = nn.linear.default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
bias_init: Initializer = nn.initializers.zeros
use_bias: bool = True
split_head_kernel: bool = False
rescale_logits: bool = True
sharding_over_head_dimension: bool = True
use_rpb: bool = True
use_multihead_rpb: bool = True
conv_kernel_size: int = 2
interpolation_kernel_type: th.ConvKernelType = th.ConvKernelType.CONST
use_mxu: bool = True
max_similarity_mode: MaxSimilarityMode = MaxSimilarityMode.SCAN_ALL
max_similarity_factor: float = 3.
use_row_sum: bool = False
multihead_projection: bool = True
output_projection: bool = True
softmax_temperature: float = 1.0
enable_param_axes: bool = True
partitioner_factory: Callable[[], Any] = partitioning.Partitioner1D
def setup(self):
self.partitioner = self.partitioner_factory()
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
query_padding_mask: Optional[Array] = None,
key_padding_mask: Optional[Array] = None,
enable_dropout: Optional[bool] = False) -> Array:
"""Applies multi-head dot product hierarchical attention on input data.
Args:
inputs_q: Query, <float>[batch..., length, q_features].
inputs_kv: Key/Value, <float>[batch..., length, kv_features].
query_padding_mask: Query padding mask, <int>[batch..., length] or
<int>[batch..., length, 1]. Zero entries mean the corresponding Query
tokens are padding token.
key_padding_mask: Key/Value padding mask, <int>[batch..., length] or
<int>[batch..., length, 1]. Zero entries mean the corresponding
Key/Value tokens are padding token.
enable_dropout: Indicates if the attention weights are masked randomly
with dropout.
Returns:
If output_projection is True, then output of shape
`<float>[batch..., length, out_features]`, where out_features is set to
features if not provided. If output_projection is False, then output of
shape `<float>[batch..., length, num_heads, head_dim]`.
"""
self._validate_call_parameters(inputs_q, inputs_kv, query_padding_mask,
key_padding_mask)
is_self_attention = inputs_q is inputs_kv
inputs_q = self.partitioner.annotate_layer_activation(inputs_q)
inputs_kv = self.partitioner.annotate_layer_activation(inputs_kv)
# Applies padding_mask.
if query_padding_mask is not None:
if query_padding_mask.ndim == inputs_q.ndim - 1:
query_padding_mask = query_padding_mask[..., None]
inputs_q *= query_padding_mask
if key_padding_mask is not None:
if key_padding_mask.ndim == inputs_kv.ndim - 1:
key_padding_mask = key_padding_mask[..., None]
inputs_kv *= key_padding_mask
# Performs Multihead projections.
query, key, value = self._multihead_projection(inputs_q, inputs_kv)
if self.sharding_over_head_dimension:
query = self.partitioner.annotate_multihead_qkv(query)
key = self.partitioner.annotate_multihead_qkv(key)
value = self.partitioner.annotate_multihead_qkv(value)
# Computes hierarchical attention and applies it to Value.
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
updated_value = self._hierarchical_attention_fn(
query,
key,
value,
query_padding_mask=query_padding_mask,
key_padding_mask=key_padding_mask,
dropout_rng=dropout_rng,
is_self_attention=is_self_attention)
updated_value = self.partitioner.annotate_layer_activation(updated_value)
if self.output_projection:
# The updated_value no longer has multihead shape due to interpolation.
# So it is a simple 2D projection. This means reshape_kernel=False.
kwargs = dict(
features=self.out_features or inputs_q.shape[-1],
axis=-1,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
name='out',
)
if self.enable_param_axes:
kwargs['reshape_kernel'] = False
kwargs['kernel_axis_names'] = [AXES.KV, AXES.EMBED]
return dense.DenseGeneral(**kwargs)(updated_value)
else:
return nn.DenseGeneral(**kwargs)(updated_value)
else:
return updated_value
def _validate_call_parameters(self,
inputs_q: Array,
inputs_kv: Array,
query_padding_mask: Optional[Array] = None,
key_padding_mask: Optional[Array] = None):
"""Validates the parameters to the call method.
Args:
inputs_q: Query, <float>[batch..., length, q_features].
inputs_kv: Key/Value, <float>[batch..., length, kv_features].
query_padding_mask: Query padding mask.
key_padding_mask: Key/Value padding mask.
Raises:
ValueError: This is triggered if any of the parameters have a wrong shape.
"""
def _validate_padding_mask_shape(padding_mask: Array, inputs: Array,
mask_name: str):
expected_shape = inputs.shape[:
-1] if padding_mask.ndim == inputs.ndim - 1 else inputs.shape[:-1] + (
1,)
if padding_mask.shape != expected_shape:
raise ValueError(f'{mask_name} must have shape {expected_shape}; '
f' instead got shape {padding_mask.shape}')
if query_padding_mask is not None:
_validate_padding_mask_shape(query_padding_mask, inputs_q,
'query_padding_mask')
if key_padding_mask is not None:
_validate_padding_mask_shape(key_padding_mask, inputs_kv,
'key_padding_mask')
if inputs_kv is None:
raise ValueError('inputs_kv is not given.')
if inputs_q.ndim != inputs_kv.ndim:
raise ValueError(f'Mismatched inputs rank: expected '
f'inputs_q.ndim ({inputs_q.ndim}) == '
f'inputs_kv.ndim ({inputs_kv.ndim})')
if inputs_q.ndim < 3:
raise ValueError(f'Expected rank of inputs >= 3, was {inputs_q.ndim}')
if inputs_q.shape[:-1] != inputs_kv.shape[:-1]:
raise ValueError(f'Mismatched inputs_kv and inputs_q shape: expected '
f'inputs_q.shape[:-1] ({inputs_q.shape[:-1]}) == '
f'inputs_kv.shape[:-1] ({inputs_kv.shape[:-1]})')
qkv_features = self.qkv_features or inputs_q.shape[-1]
if qkv_features % self.num_heads != 0:
raise ValueError(
f'The features dimension {qkv_features} is not divisible by number '
f'of heads {self.num_heads}.'
)
@abc.abstractmethod
def _setup_hierarchy(
self,
features: Union[int, Tuple[int, int]],
for_self_attention: bool,
) -> th.TokenHierarchy:
"""Sets up token hierarchy.
Args:
features: Features dimension in inputs.
for_self_attention: Indicating if this for the self attention.
Returns:
Instance of TokenHierarchy.
"""
@abc.abstractmethod
def _setup_position_bias(self,
hierarchy) -> h_rpb.HierarchicalRelativePositionBias:
"""Sets up hierarchical position bias.
Args:
hierarchy: Token hierarchy.
Returns:
Instance of HierarchicalRelativePositionBias.
"""
def _multihead_projection(self, inputs_q, inputs_kv):
"""Project inputs_q/kv to multi-headed query, key and value.
Args:
inputs_q: Query, <float>[batch..., length, q_features].
inputs_kv: Key/Value, <float>[batch..., length, kv_features].
Returns:
query: Array with shape <float>[batch..., length, num_head, head_dim]`.
key: Array with shape <float>[batch..., length, num_head, head_dim]`.
value: Array with shape <float>[batch..., length, num_head, head_dim]`.
"""
qkv_features = self.qkv_features or inputs_q.shape[-1]
head_dim = qkv_features // self.num_heads
if self.multihead_projection:
kwargs = dict(
axis=-1,
features=(self.num_heads, head_dim),
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
)
if self.enable_param_axes:
dense_module = dense.DenseGeneral
kwargs['kernel_axis_names'] = [AXES.EMBED, AXES.HEADS, AXES.KV]
kwargs['reshape_kernel'] = not self.split_head_kernel
else:
dense_module = nn.DenseGeneral
make_dense = functools.partial(dense_module, **kwargs)
key = make_dense(
kernel_init=self.kernel_init, name='key_multihead_projection')(
inputs_kv)
value = make_dense(
kernel_init=self.kernel_init, name='value_multihead_projection')(
inputs_kv)
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
if self.rescale_logits:
# The rescaling is done explicitly. This takes more memory but is
# numerically more stable.
inputs_q /= depth_scaling
query_kernel_init = self.kernel_init
else:
# This folds logit rescaling into initializer.
query_kernel_init = (
lambda *args: self.kernel_init(*args) / depth_scaling)
query = make_dense(
kernel_init=query_kernel_init, name='query_multihead_projection')(
inputs_q)
else:
# This is only for unit tests. It avoids the randomness in the projection.
projected_shape = inputs_q.shape[:-1] + tuple((self.num_heads, head_dim))
if self.rescale_logits:
# The rescaling is done explicitly.
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
inputs_q /= depth_scaling
query = inputs_q.reshape(projected_shape)
key = inputs_kv.reshape(projected_shape)
value = key
return query, key, value
def _hierarchical_attention_fn(
self, # pytype: disable=annotation-type-mismatch # jax-ndarray
query: Array,
key: Array,
value: Array,
query_padding_mask: Optional[Array] = None,
key_padding_mask: Optional[Array] = None,
dropout_rng: PRNGKey = None,
is_self_attention: bool = True,
) -> Array:
r"""Applies hierarchical attention given query, key, and value.
Args:
query: Multihead Query with shape <float>[batch..., length,
num_heads, head_dim].
key: Multihead Key with shape <float>[batch..., length,
num_heads, head_dim].
value: Multihead Value with shape <float>[batch..., length,
num_heads, head_dim].
query_padding_mask: Original query padding mask.
key_padding_mask: Original key padding mask.
dropout_rng: The key for generating random dropout.
is_self_attention: Indicates if this is self-attention.
Returns:
Updated Value with shape <float>[batch..., length, features]`.
"""
head_dim = query.shape[-1]
if query.ndim == 5:
seq_length_x = query.shape[-3]
seq_length_y = query.shape[-4]
hierarchy = self._setup_hierarchy(
features=(seq_length_x, seq_length_y),
for_self_attention=is_self_attention,
)
else:
seq_length = query.shape[-3]
hierarchy = self._setup_hierarchy(
seq_length, for_self_attention=is_self_attention
)
coarse_query = hierarchy.hierarchical_coarsen(
query,
input_array_name=th.InputArrayName.QUERY,
padding_mask=query_padding_mask)
packed_coarse_q = coarse_query.packed_coarse_qkv
if self.sharding_over_head_dimension:
packed_coarse_q = self.partitioner.annotate_coarse_qkv(packed_coarse_q)
coarse_key = hierarchy.hierarchical_coarsen(
key,
input_array_name=th.InputArrayName.KEY,
padding_mask=key_padding_mask)
aggregated_key_padding_mask = coarse_key.packed_aggregated_key_padding_mask
packed_coarse_k = coarse_key.packed_coarse_qkv
if self.sharding_over_head_dimension:
packed_coarse_k = self.partitioner.annotate_coarse_qkv(packed_coarse_k)
similarity = self._compute_hierarchical_similarity(
packed_coarse_q, packed_coarse_k, hierarchy
)
if self.sharding_over_head_dimension:
similarity = self.partitioner.annotate_similarity(similarity)
attention = self._compute_hierarchical_attention(similarity, hierarchy)
if self.sharding_over_head_dimension:
attention = self.partitioner.annotate_attention(attention)
softmax_partition = self._compute_softmax_partition(
attention, hierarchy, head_dim, query_padding_mask,
aggregated_key_padding_mask)
softmax_partition = self.partitioner.annotate_softmax_partition(
softmax_partition
)
# Note:
# Attention dropout should follow the computation of softmax_partition,
# according to the implementation in flax.nn.attention.
if dropout_rng is not None:
attention = self._attention_dropout(attention, hierarchy, dropout_rng) # pytype: disable=wrong-arg-types # jax-ndarray
if self.sharding_over_head_dimension:
attention = self.partitioner.annotate_attention(attention)
coarse_value = hierarchy.hierarchical_coarsen(
value, input_array_name=th.InputArrayName.VALUE)
coarse_value = coarse_value.packed_coarse_qkv
if self.sharding_over_head_dimension:
coarse_value = self.partitioner.annotate_coarse_qkv(coarse_value)
updated_value = self._multiply_attention_value(attention, coarse_value,
hierarchy)
updated_value = self.partitioner.annotate_layer_activation(updated_value)
updated_value /= softmax_partition
if query_padding_mask is not None:
# Attention matrix has a few rows with all zeros. These rows correspond to
# the zeros in query_padding_mask. As a consequence, the entries
# corresponding to these rows in updated_value should be zeros.
updated_value *= query_padding_mask
updated_value = self.partitioner.annotate_layer_activation(updated_value)
return updated_value
def _compute_hierarchical_similarity(
self,
query: Dict[th.TokenBlockName, Array],
key: Dict[th.TokenBlockName, Array],
hierarchy: th.TokenHierarchy,
) -> Dict[th.TokenBlockName, Array]:
"""Computes hierarchical similarity matrix.
Args:
query: Packed coarse Query, value array shape is <float>[batch,
packed_dim, num_clusters, num_heads, head_dim].
key: Packed coarse Key, value array shape is <float>[batch, packed_dim,
num_clusters, num_heads, head_dim].
hierarchy: Token hierarchy.
Returns:
Similarity arrays for all token block interaction.
"""
def _matmult(query: Array, key: Array) -> Array:
# einsum_str = 'bpqKhd, bpQkhd->bpqkh'
if self.use_mxu:
return jnp.einsum('bpqhd, bpkhd->bpqkh', query, key)
else:
return jnp.sum(
query[..., None, :, :] * key[..., None, :, :, :], axis=-1)
if self.use_rpb:
batch_size = query[th.TokenBlockName.ANCHOR].shape[0]
zero_block_mask = hierarchy.gen_packed_zero_block_mask(
batch_size=batch_size, use_growth_factor=False, trailing_ndim=3)
position_bias_fn = self._setup_position_bias(hierarchy)
similarity = {}
for block_name in hierarchy.block_names:
similarity[block_name] = _matmult(query[block_name], key[block_name])
if self.sharding_over_head_dimension:
similarity[block_name] = self.partitioner.annotate_similarity(
similarity[block_name]
)
if self.use_rpb:
block_coord = hierarchy.block_coord[block_name]
position_bias = position_bias_fn(block_coord)
# Explicitly duplicates along batch. This is useful for model partition.
batch = query[block_name].shape[0]
position_bias = jnp.repeat(position_bias, batch, axis=0)
if block_name == th.TokenBlockName.ANCHOR:
similarity[block_name] += position_bias
else:
similarity[block_name] += (
position_bias * zero_block_mask[block_name])
# The normalization below is critical to avoid NaN error. I suspect that
# float32 is not enough to support the potentially very large value in
# exp(similarity). Hence we need to subtract a reasonable constant
# to reduce the value of exp(similarity). This will not change the
# attention weights.
similarity_offset = self._find_similarity_offset(similarity, hierarchy)
for block_name in hierarchy.block_names:
similarity[block_name] -= lax.stop_gradient(similarity_offset)
return similarity
def _find_similarity_offset(
self,
similarity: Dict[th.TokenBlockName, Array],
hierarchy: th.TokenHierarchy,
) -> Array:
"""Finds the offset to normalize the similarity array.
Args:
similarity: Similarity arrays, value array has shape <float>[batch,
packed_dim, num_clusters, num_clusters, num_heads].
hierarchy: Token hierarchy.
Returns:
Similarity offset with shape<float>[batch, 1, 1, 1, num_heads].
"""
if self.max_similarity_mode == MaxSimilarityMode.SAMPLE_ANCHOR:
max_axes = tuple((1,))
similarity_offset = jnp.max(
similarity[th.TokenBlockName.ANCHOR][:, :, 0, 0, :],
axis=max_axes,
keepdims=True)
if self.max_similarity_factor > 1.:
similarity_offset *= self.max_similarity_factor
similarity_offset = similarity_offset[:, :, None, None, :]
elif self.max_similarity_mode == MaxSimilarityMode.SCAN_ANCHOR:
# The jnp.max reduces [batch, num_block, num_clusters, num_clusters,
# num_heads] to [batch, 1, 1, 1, num_heads]. We need to find the maximum
# for each head of the attention for each example in a batch separately.
max_axes = tuple((1, 2, 3))
similarity_offset = jnp.max(
similarity[th.TokenBlockName.ANCHOR], axis=max_axes, keepdims=True)
if self.max_similarity_factor > 1.:
similarity_offset *= self.max_similarity_factor
else:
max_axes = tuple((1, 2, 3))
max_similarity_list = []
for block_name in hierarchy.block_names:
max_similarity_list.append(
jnp.max(similarity[block_name], axis=max_axes, keepdims=True))
# The jnp.stack() adds axis=0 which is then removed by jnp.max().
max_similarity_all = jnp.stack(max_similarity_list, axis=0)
similarity_offset = jnp.max(max_similarity_all, axis=0, keepdims=False)
if self.sharding_over_head_dimension:
similarity_offset = self.partitioner.annotate_similarity(
similarity_offset
)
return similarity_offset
def _compute_hierarchical_attention(
self, similarity: Dict[th.TokenBlockName, Array],
hierarchy: th.TokenHierarchy) -> Dict[th.TokenBlockName, Array]:
"""Computes the scaled dot-product attention hierarchically.
Args:
similarity: Similarity arrays, value array has shape <float>[batch,
packed_dim, num_clusters, num_clusters, num_heads].
hierarchy: Token hierarchy.
Returns:
Attention arrays for all token block interaction. It value array has the
same shape as that of similarity.
"""
attention = {}
assert self.softmax_temperature > 1e-10, 'Softmax temperature too small.'
for block_name in hierarchy.block_names:
attention[block_name] = jnp.exp(
similarity[block_name] / self.softmax_temperature
)
if self.sharding_over_head_dimension:
attention = self.partitioner.annotate_attention(attention)
# This is to correct the overlapping between attention blocks for the
# adjacent levels
if hierarchy.num_level > 1:
logging.info('Applying correction_mask')
correction_mask = self._gen_correction_mask(hierarchy)
for block_name in hierarchy.neighbor_block_names:
attention[block_name] *= correction_mask[block_name]
# This is for the auto-regressive decoding. We only need to explicitly
# mask the attention[th.TokenBlockName.ANCHOR] because we can
# simply skip the use of non-causal attention blocks in the
# hierarchical attention-value multiplication. So no need to explicitly
# mask non-causal attention blocks.
if self.causal_mask:
logging.info('Applying causal_mask.')
attention[th.TokenBlockName.ANCHOR] *= self._gen_causal_mask(hierarchy)
return attention
@abc.abstractmethod
def _gen_correction_mask(
self, hierarchy: th.TokenHierarchy) -> Dict[th.TokenBlockName, Array]:
"""Generates correction mask.
Args:
hierarchy: Token hierarchy.
Returns:
The correction mask.
"""
def _gen_causal_mask(self, hierarchy: th.TokenHierarchy) -> Array:
"""Generates causal mask.
Args:
hierarchy: Token hierarchy.
Returns:
Causal mask with shape <float>[num_block_cluster, num_block_cluster, 1].
"""
nc = hierarchy.num_block_cluster
causal_mask = jnp.tril(jnp.ones((nc, nc), dtype=self.dtype), k=0)
# Needs to add the last num_heads axis. The first 2 dimensions in
# attention[TokenBlockName.ANCHOR] are handled by broadcast.
return causal_mask[:, :, None]
def _compute_softmax_partition(
self,
attention: Dict[th.TokenBlockName, Array],
hierarchy: th.TokenHierarchy,
head_dim: int,
query_padding_mask: Optional[Array] = None,
aggregated_key_padding_mask: Optional[Array] = None) -> Array:
"""Computes softmax partition.
Args:
attention: Attention arrays for all blocks, value array has shape
<float>[batch, pack_dim, num_clusters, num_clusters, num_heads].
hierarchy: Token hierarchy.
head_dim: head dimension.
query_padding_mask: Original query padding mask.
aggregated_key_padding_mask: Packed aggregated key padding mask. Its value
array has shape [batch, packed_dim, num_clusters, 1].
Returns:
softmax_partition: Partition for the softmax calculation. Array shape
is [batch, length, 1].
"""
if aggregated_key_padding_mask is not None:
# Expands from [batch, packed_dim, num_clusters, 1] to
# [batch, packed_dim, num_clusters, num_heads] in preparation
# to compute attention*all_ones.
all_ones = {}
for block_name in hierarchy.block_names:
all_ones[block_name] = jnp.repeat(
aggregated_key_padding_mask[block_name], self.num_heads, axis=3)
softmax_partition = self._multiply_attention_value(
attention, all_ones, hierarchy)
else:
if self.use_row_sum:
softmax_partition = self._row_sum(attention, hierarchy)
else:
(batch_size, packed_dim, num_cluster, _, num_heads) = (
attention[th.TokenBlockName.ANCHOR].shape)
all_ones = hierarchy.gen_packed_zero_block_mask(
batch_size=batch_size, use_growth_factor=True, trailing_ndim=2)
# Expands from [batch_size, packed_dim, 1, 1] to
# [batch_size, packed_dim, num_clusters, num_heads] in preparation
# to compute attention*all_ones.
for block_name in hierarchy.neighbor_block_names:
repeated_all_ones = jnp.repeat(
all_ones[block_name], num_cluster, axis=2)
repeated_all_ones = jnp.repeat(repeated_all_ones, num_heads, axis=3)
all_ones[block_name] = repeated_all_ones
# Special treatment for anchor since it is not created by the function
# gen_packed_zero_block_mask().
all_ones[th.TokenBlockName.ANCHOR] = jnp.ones(
(batch_size, packed_dim, num_cluster, num_heads), dtype=self.dtype)
softmax_partition = self._multiply_attention_value(
attention, all_ones, hierarchy)
# Sets entries corresponding to padding tokens to 1.
if query_padding_mask is not None:
softmax_partition = softmax_partition * query_padding_mask + (
1. - query_padding_mask)
# Filters out potentially very small entries which can lead to NaN.
very_small_entry = 1e-6
softmax_partition = lax.select(
softmax_partition > very_small_entry,
softmax_partition.astype(self.dtype),
jnp.ones(softmax_partition.shape, dtype=self.dtype))
return self._duplicate_heads(softmax_partition, head_dim)
def _row_sum(self, attention: Dict[th.TokenBlockName, Array],
hierarchy: th.TokenHierarchy) -> Array:
"""Computes softmax partition by summing attention matrix rows.
If there is no padding_mask, simple row summation correctly
computes softmax_partition. We need packed_zero_block_mask to
account for the scaling factor 2^k in coarsening at level-k.
Args:
attention: Attention arrays for all blocks, value array has shape
<float>[batch, pack_dim, num_clusters, num_clusters, num_heads].
hierarchy: Token hierarchy.
Returns:
softmax_partition: Partition for the softmax calculation. Array shape
is [batch, length, 1].
"""
batch_size = attention[th.TokenBlockName.ANCHOR].shape[0]
zero_block_mask = hierarchy.gen_packed_zero_block_mask(
batch_size=batch_size, use_growth_factor=True, trailing_ndim=2)
first_block = True
for block_name in hierarchy.block_names:
# Summing over cluster column indexes in each block.
block_result = jnp.sum(attention[block_name], axis=-2)
if block_name == th.TokenBlockName.ANCHOR:
fine_partition = block_result
else:
block_result *= zero_block_mask[block_name]
if first_block:
coarse_partition = block_result
first_block = False
else:
coarse_partition += block_result
# Merges coarse_partition and fine_partition at level=0 since they have
# the same shape.
fine_partition += coarse_partition[:, :hierarchy.num_fine_block]
softmax_partition = hierarchy.recover_input_shape(fine_partition, level=0)
softmax_partition += hierarchy.interpolate_cumulative_sum(
coarse_partition[:, hierarchy.num_fine_block:])
return softmax_partition
@abc.abstractmethod
def _duplicate_heads(self, softmax_partition: Array, head_dim: int) -> Array:
"""Duplicates entries in softmax_partition by head_dim times.
Args:
softmax_partition: Partition for the softmax calculation. Array shape is
[batch..., length, num_heads]. This array does not have head_dim axis
which is to be added here.
head_dim: The head dimension size.
Returns:
New softmax_partition with added duplicated entries.
"""
def _multiply_attention_value(self, attention: Dict[th.TokenBlockName, Array],
value: Dict[th.TokenBlockName, Array],
hierarchy: th.TokenHierarchy) -> Array:
"""Compute y=attention*value using hierarchical attention.
Args:
attention: The attention weights for all token blocks. Its dict value
shape is <float>[batch, packed_dim, num_clusters, num_clusters,
num_heads].
value: Packed coarse Value for all blocks. The dict value array shape is
<float>[batch, packed_dim, num_clusters, num_heads, head_dim].
hierarchy: Token hierarchy.
Returns:
Multiplication result of y = attention * value with shape
<float>[batch..., length, features]
"""
def _matmul(attention: Array, value: Array) -> Array:
if value.ndim == 5:
if self.use_mxu:
result = jnp.einsum('bpqkh, bpkhd->bpqhd', attention, value)
else:
# einsum_str = 'bpqkhD, bpQkhd->bpqhd'
result = jnp.sum(
attention[..., None] * value[..., None, :, :, :], axis=3)
else:
if self.use_mxu:
result = jnp.einsum('bpqkh, bpkh->bpqh', attention, value)
else:
# einsum_str = 'bpqkh, bpQkh->bpqh'
result = jnp.sum(attention * value[..., None, :, :], axis=3)
return result
first_block = True
for block_name in hierarchy.block_names:
block_result = _matmul(attention[block_name], value[block_name])
if block_name == th.TokenBlockName.ANCHOR:
fine_y = block_result
else:
if first_block:
coarse_y = block_result
first_block = False
else:
coarse_y += block_result
# Merge coarse_y and fine_y at level=0 since they have the same shape.
fine_y += coarse_y[:, :hierarchy.num_fine_block]
result_y = hierarchy.recover_input_shape(fine_y, level=0)
result_y += hierarchy.interpolate_cumulative_sum(
coarse_y[:, hierarchy.num_fine_block:])
return result_y
def _attention_dropout(self, attention: Array, hierarchy: th.TokenHierarchy,
dropout_rng: PRNGKey) -> Array:
"""Apply dropout to the hierarchical attention weights.
Args:
attention: Attention arrays for all blocks, value array has shape
<float>[batch, pack_dim, num_clusters, num_clusters, num_heads].
hierarchy: Token hierarchy.
dropout_rng: The key for generating random dropout.
Returns:
New attention arrays for all blocks with randomly zeroed out entries.
"""
def dropout_multiplier(attention_block, dropout_rng):
keep_prob = jax.lax.tie_in(attention_block, 1.0 - self.dropout_rate)
if self.broadcast_dropout:
(_, num_block, _, num_clusters, _) = attention_block.shape
# The dropout is broadcast across batch and num_heads.
dropout_shape = (1, num_block, num_clusters, num_clusters, 1)
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attention_block.shape)
# This roughly preserves the raw sum of each attention row.
multiplier = (
keep.astype(self.dtype) / jnp.asarray(keep_prob, dtype=self.dtype))
return multiplier
for block_name in hierarchy.block_names:
attention[block_name] *= dropout_multiplier(attention[block_name],
dropout_rng)
return attention
class OneDimHierarchicalAttention(HierarchicalAttention):
"""One-dimensional hierarchical attention class for sequences.
See arxiv.org/abs/2107.11906 for algorithm details.
"""
def _setup_hierarchy(
self, features: Union[int, Tuple[int, int]], for_self_attention: bool
) -> th.OneDimTokenHierarchy:
return th.OneDimTokenHierarchy(
features,
num_cluster=self.num_clusters,
for_self_attention=for_self_attention,
causal_mask=self.causal_mask,
conv_kernel_size=self.conv_kernel_size,
interpolation_kernel_type=self.interpolation_kernel_type,
dtype=self.dtype)
def _setup_position_bias(
self, hierarchy: th.OneDimTokenHierarchy
) -> h_rpb.OneDimHierarchicalRelativePositionBias:
"""Sets up hierarchical position bias.
Args:
hierarchy: OneDimTokenHierarchy.
Returns:
Instance of OneDimHierarchicalRelativePositionBias.
"""
num_heads = self.num_heads if self.use_multihead_rpb else 1
return h_rpb.OneDimHierarchicalRelativePositionBias(
num_cluster=hierarchy.num_cluster,
num_head=num_heads,
enable_param_axes=self.enable_param_axes,
name='1d_relative_position_bias',
)
def _gen_correction_mask(
self, hierarchy: th.TokenHierarchy) -> Dict[th.TokenBlockName, Array]:
"""Generate correction mask.
Args:
hierarchy: Token hierarchy.
Returns:
The correction mask. Its dict value array has the shape
<float>[packed_dim, num_clusters, num_clusters, 1]
"""
nc = hierarchy.num_cluster
half_nc = nc // 2
num_fine_block = hierarchy.num_fine_block
num_coarse_block = hierarchy.num_coarse_block
all_ones_block = jnp.ones((num_fine_block, nc, nc, 1), dtype=self.dtype)
right_block = np.ones((num_coarse_block, nc, nc, 1), dtype=self.dtype)
right_block[:, half_nc:, :half_nc] = 0
left_block = np.ones((num_coarse_block, nc, nc, 1), dtype=self.dtype)
left_block[:, :half_nc, half_nc:] = 0
correction_mask = {
th.TokenBlockName.RIGHT: jnp.concatenate(
(all_ones_block, right_block), axis=0
),
th.TokenBlockName.LEFT: jnp.concatenate(
(all_ones_block, left_block), axis=0
),
}
return correction_mask
def _duplicate_heads(self, softmax_partition: Array, head_dim: int) -> Array:
"""Duplicates entries in softmax_partition by head_dim times.
Args:
softmax_partition: Partition for the softmax calculation. Array shape is
[batch, length, num_heads]. This array does not have head_dim axis which
is to be added here.
head_dim: The head dimension size.
Returns:
New softmax_partition with added duplicated entries.
"""
softmax_partition = jnp.repeat(
softmax_partition[..., None], head_dim, axis=3)
new_shape = softmax_partition.shape[:2] + (self.num_heads * head_dim,)
return softmax_partition.reshape(new_shape)
class OneDimDecoderSelfAttention(OneDimHierarchicalAttention):
"""Decoder self-attention for one-dimension sequences."""
causal_mask: bool = True
def __call__(
self, # pytype: disable=signature-mismatch # overriding-default-value-checks
inputs: Array,
padding_mask: Array,
enable_dropout: Optional[bool] = False) -> Array:
return super().__call__(
inputs,
inputs,
query_padding_mask=padding_mask,
key_padding_mask=padding_mask,
enable_dropout=enable_dropout)
class OneDimEncoderSelfAttention(OneDimHierarchicalAttention):
"""Encoder self-attention for one-dimension sequences."""
causal_mask: bool = False
def __call__(
self, # pytype: disable=signature-mismatch # overriding-default-value-checks
inputs: Array,
padding_mask: Optional[Array] = None,
enable_dropout: Optional[bool] = False,
) -> Array:
return super().__call__(
inputs,
inputs,
query_padding_mask=padding_mask,
key_padding_mask=padding_mask,
enable_dropout=enable_dropout)
OneDimCrossAttention = OneDimHierarchicalAttention
| 40,618 | 39.700401 | 126 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/h_transformer_1d_architecture_test_utils.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties for h_transformer_1d_architecture_test."""
from typing import Callable
from flax import linen as nn
from jax import numpy as jnp
from flaxformer.architectures.h_transformer import h_attention
from flaxformer.architectures.h_transformer import h_transformer_1d_architecture
from flaxformer.architectures.h_transformer import h_transformer_utils as utils
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components import layer_norm
from flaxformer.components.attention import dense_attention
_EMBEDDING_INIT = nn.initializers.normal(stddev=1.0)
_ATTENTION_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal')
_MLP_KERNEL_INIT = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
_BIAS_INIT = nn.initializers.normal(stddev=1e-6)
def _token_embedder_factory(vocab_size: int, embed_size: int) -> nn.Module:
return embedding.Embed( # pytype: disable=wrong-arg-types # jax-types
num_embeddings=vocab_size,
features=embed_size,
cast_input_dtype=jnp.int32,
dtype=jnp.float32,
attend_dtype=jnp.float32,
embedding_init=_EMBEDDING_INIT,
name='token_embedder')
def _mlp_factory(dropout_rate: float = 0.1, embed_size: int = 13) -> nn.Module:
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2 * embed_size,
activations=('relu',),
intermediate_dropout_rate=dropout_rate,
final_dropout_rate=dropout_rate,
kernel_init=_MLP_KERNEL_INIT,
bias_init=_BIAS_INIT,
dtype=jnp.float32)
def _encoder_self_attention_factory(num_heads, num_clusters, qkv_features,
use_rpb, use_multihead_rpb) -> nn.Module:
return h_attention.OneDimEncoderSelfAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_heads,
num_clusters=num_clusters,
qkv_features=qkv_features,
dtype=jnp.float32,
kernel_init=_ATTENTION_KERNEL_INIT,
bias_init=_BIAS_INIT,
use_rpb=use_rpb,
use_multihead_rpb=use_multihead_rpb,
)
def _decoder_self_attention_factory(num_heads, num_clusters, qkv_features,
use_rpb, use_multihead_rpb):
return h_attention.OneDimDecoderSelfAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_heads,
num_clusters=num_clusters,
qkv_features=qkv_features,
dtype=jnp.float32,
kernel_init=_ATTENTION_KERNEL_INIT,
bias_init=_BIAS_INIT,
use_rpb=use_rpb,
use_multihead_rpb=use_multihead_rpb,
)
def _cross_attention_factory(num_heads, qkv_features):
return dense_attention.MultiHeadDotProductAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_heads,
qkv_features=qkv_features,
dtype=jnp.float32,
kernel_init=_ATTENTION_KERNEL_INIT,
bias_init=_BIAS_INIT,
head_dim=None,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1)
# The default numbers are consistent with the testdata files.
def config_encoder(
embed_size: int = 13,
scan_layers: bool = False,
layer_remat: utils.LayerRematOptions = utils.LayerRematOptions.LEGACY,
layer_norm_factory: Callable[..., nn.Module] = layer_norm.T5LayerNorm,
dropout_factory: Callable[
..., nn.Module] = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,)),
num_layers: int = 3,
vocab_size: int = 2000,
qkv_features: int = 512,
dropout_rate: float = 0.1,
num_heads: int = 4,
num_clusters: int = 2,
use_rpb: bool = True,
use_multihead_rpb: bool = True,
) -> h_transformer_1d_architecture.Encoder:
"""Configures an h-transformer encoder."""
def _encoder_layer_factory():
return h_transformer_1d_architecture.EncoderLayer(
attention=_encoder_self_attention_factory(num_heads, num_clusters,
qkv_features, use_rpb,
use_multihead_rpb),
mlp=_mlp_factory(dropout_rate=dropout_rate, embed_size=embed_size),
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
scanned=scan_layers)
return h_transformer_1d_architecture.Encoder(
layer_factory=_encoder_layer_factory,
input_dropout_factory=dropout_factory,
output_dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=num_layers,
layer_remat=layer_remat,
scan_layers=scan_layers,
token_embedder_factory=(
lambda: _token_embedder_factory(vocab_size, embed_size)))
# The default numbers are consistent with the testdata files.
def config_decoder_only(
embed_size: int = 13,
scan_layers: bool = False,
layer_remat: utils.LayerRematOptions = utils.LayerRematOptions.LEGACY,
layer_norm_factory: Callable[..., nn.Module] = layer_norm.T5LayerNorm,
dropout_factory: Callable[
..., nn.Module] = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,)),
num_layers: int = 3,
vocab_size: int = 2000,
qkv_features: int = 512,
dropout_rate: float = 0.1,
num_heads: int = 4,
num_clusters: int = 4,
use_rpb: bool = True,
use_multihead_rpb: bool = True,
) -> h_transformer_1d_architecture.DecoderOnly:
"""Configures an h-transformer DecoderOnly."""
def _decoder_only_layer_factory():
return h_transformer_1d_architecture.DecoderOnlyLayer(
attention=_decoder_self_attention_factory(num_heads, num_clusters,
qkv_features, use_rpb,
use_multihead_rpb),
mlp=_mlp_factory(dropout_rate=dropout_rate, embed_size=embed_size),
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
scanned=scan_layers)
return h_transformer_1d_architecture.DecoderOnly(
layer_factory=_decoder_only_layer_factory,
input_dropout_factory=dropout_factory,
output_dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=num_layers,
layer_remat=layer_remat,
scan_layers=scan_layers,
token_embedder_factory=(
lambda: _token_embedder_factory(vocab_size, embed_size)))
# The default numbers are consistent with the testdata files.
def config_decoder(
embed_size: int = 13,
scan_layers: bool = False,
parallel: bool = False,
layer_remat: utils.LayerRematOptions = utils.LayerRematOptions.LEGACY,
layer_norm_factory: Callable[..., nn.Module] = layer_norm.T5LayerNorm,
dropout_factory: Callable[
..., nn.Module] = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,)),
num_layers: int = 3,
vocab_size: int = 2000,
qkv_features: int = 512,
dropout_rate: float = 0.1,
num_heads: int = 4,
num_clusters: int = 2,
use_rpb: bool = True,
use_multihead_rpb: bool = True,
) -> h_transformer_1d_architecture.Decoder:
"""Configures an h-transformer Decoder."""
def _decoder_layer_factory():
return h_transformer_1d_architecture.DecoderLayer(
self_attention=_decoder_self_attention_factory(num_heads, num_clusters,
qkv_features, use_rpb,
use_multihead_rpb),
encoder_decoder_attention=None,
mlp=_mlp_factory(dropout_rate=dropout_rate, embed_size=embed_size),
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
parallel=parallel,
scanned=scan_layers)
return h_transformer_1d_architecture.Decoder(
layer_factory=_decoder_layer_factory,
input_dropout_factory=dropout_factory,
output_dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=num_layers,
layer_remat=layer_remat,
scan_layers=scan_layers,
token_embedder_factory=(
lambda: _token_embedder_factory(vocab_size, embed_size)))
# The default numbers are consistent with the testdata files.
def config_encoder_decoder(
embed_size: int = 13,
scan_layers: bool = False,
layer_remat: utils.LayerRematOptions = utils.LayerRematOptions.LEGACY,
layer_norm_factory: Callable[..., nn.Module] = layer_norm.T5LayerNorm,
dropout_factory: Callable[
..., nn.Module] = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,)),
num_layers: int = 3,
vocab_size: int = 2000,
qkv_features: int = 512,
dropout_rate: float = 0.1,
num_heads: int = 4,
num_clusters: int = 2,
use_rpb: bool = True,
use_multihead_rpb: bool = True,
) -> h_transformer_1d_architecture.EncoderDecoder:
"""Configures an h-transformer EncoderDecoder."""
def _encoder_layer_factory():
return h_transformer_1d_architecture.EncoderLayer(
attention=_decoder_self_attention_factory(num_heads, num_clusters,
qkv_features, use_rpb,
use_multihead_rpb),
mlp=_mlp_factory(dropout_rate=dropout_rate, embed_size=embed_size),
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
scanned=scan_layers)
def _decoder_layer_factory():
return h_transformer_1d_architecture.DecoderLayer(
self_attention=_decoder_self_attention_factory(num_heads, num_clusters,
qkv_features, use_rpb,
use_multihead_rpb),
encoder_decoder_attention=_cross_attention_factory(
num_heads, qkv_features),
mlp=_mlp_factory(dropout_rate=dropout_rate, embed_size=embed_size),
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
scanned=scan_layers)
def _encoder_factory(shared_token_embedder):
assert shared_token_embedder is None
return h_transformer_1d_architecture.Encoder(
layer_factory=_encoder_layer_factory,
input_dropout_factory=dropout_factory,
output_dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=num_layers,
layer_remat=layer_remat,
scan_layers=scan_layers,
token_embedder_factory=(
lambda: _token_embedder_factory(vocab_size, embed_size)))
def _decoder_factory(shared_token_embedder):
assert shared_token_embedder is None
return h_transformer_1d_architecture.Decoder(
layer_factory=_decoder_layer_factory,
input_dropout_factory=dropout_factory,
output_dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=num_layers,
layer_remat=layer_remat,
scan_layers=scan_layers,
token_embedder_factory=(
lambda: _token_embedder_factory(vocab_size, embed_size)))
return h_transformer_1d_architecture.EncoderDecoder(
encoder_factory=_encoder_factory,
decoder_factory=_decoder_factory,
scan_layers=scan_layers,
shared_token_embedder_factory=lambda: None,
)
| 11,786 | 38.159468 | 102 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/hierarchical_relative_position_bias.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utility classes to compute the hierarchical relative position bias (h-RPB).
The notion of the relative position bias is the same as that used in T5.
The h-RPB is added to the hierarchical similarity matrix which is then used
to compute the hierarchical attention (h-attention) matrix.
But the h-RPB is tightly coupled with the token hierarchy established by
the h-attention algorithm. At each level in the hierarchy, each token block
only attends to its immediate neighboring token blocks left and right.
So the total number of relative positions at each level is independent of
the sequence length. Since the same h-RPB is shared by all levels in the
hierarchy, the memory footprint of the h-RPB is independent of the
sequence length. Experiments have shown that it adds very little overhead
to the overall model training memory usage and runtime.
"""
from flax import linen as nn
from flax.linen import partitioning
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.h_transformer.partitioning import AxisName
from flaxformer.types import Initializer
class HierarchicalRelativePositionBias(nn.Module):
"""Base class for the Hierarchical Relative Position Bias (h-RPB).
Attributes:
position_bias_init: Positional bias initializer.
num_cluster: Number of clusters in h_attention.
num_head: Number of heads with different h-RPB. Setting num_head=1 means all
heads share the same h-RPB.
"""
position_bias_init: Initializer = nn.initializers.normal(stddev=0.1) # pytype: disable=annotation-type-mismatch # jax-types
num_cluster: int = 2
num_head: int = 1
enable_param_axes: bool = True
def _create_1d_relative_position_bias(
self, param_name: str = '1d_relative_position_bias') -> jnp.ndarray:
"""Creates a trainable one-dimensional relative position bias array.
Args:
param_name: Name for the trainable one-dimensional relative position bias
array.
Returns:
Trainable one-dimensional relative position bias array.
<float>[num_cluster, 3*num_cluster, num_head]
Notes:
A few small static arrays are calculated or allocated with numpy
and get folded into program constants. This is more efficient and
the memory foot print is as small as O(num_cluster).
"""
# Key tokens sit at positions with coordinates in the range [0, 3*nc].
key_length = 3 * self.num_cluster
key_positions = np.arange(key_length)
# Query tokens sit at positions with coordinates in the range [nc, 2*nc].
query_length = self.num_cluster
query_positions = np.arange(self.num_cluster, 2 * self.num_cluster)
# Compute the relative positions between each query and key pair.
relative_positions = key_positions.reshape(
(1, key_length)) - query_positions.reshape((query_length, 1))
# These indices are used by a bias lookup. So we shift the indices
# such that the smallest index is zero.
relative_positions -= np.min(relative_positions)
total_positions = query_length + key_length - 1
if self.enable_param_axes:
bias_params = partitioning.param_with_axes(
param_name,
self.position_bias_init,
(total_positions, self.num_head),
jnp.float32,
axes=(AxisName.RELPOS_BUCKETS, AxisName.HEADS),
)
else:
bias_params = self.param(
param_name, self.position_bias_init, (total_positions, self.num_head)
)
relative_pos_bias = jnp.take(bias_params, relative_positions, axis=0)
return relative_pos_bias
class OneDimHierarchicalRelativePositionBias(HierarchicalRelativePositionBias):
"""Computes 1D Hierarchical Relative Position Bias."""
def setup(self):
# The resulting array has shape (nc, 3*nc, num_head).
relative_position_bias = self._create_1d_relative_position_bias(
'1d_relative_position_bias')
# Split it into 3 blocks. They map to 3 key blocks, where the anchor
# query block sitting at the center position-1.
split_blocks = jnp.split(
relative_position_bias, [self.num_cluster, 2 * self.num_cluster],
axis=1)
position_bias_blocks = {}
for block_index, split_block in enumerate(split_blocks):
# Add (batch=1, num_block=1) to axis=(0,1) to match the shape
# (batch, num_block, num_cluster, num_cluster, num_head) for
# the hierarchical similarity array in the h-attention algorithm.
position_bias_blocks[str(block_index)] = jnp.expand_dims(
split_block, axis=(0, 1))
self.position_bias_blocks = position_bias_blocks
def __call__(self, block_coord: int) -> jnp.ndarray:
"""Retrieve 1D HierarchicalRelativePositionBias.
Args:
block_coord: This is the position of the key block. Specifically, -1 for
the left position, 1 for the right position, 0 for the center position
where the query block also sits.
Returns:
The relative position bias block for the specified block_coord.
<float>[batch=1, num_block=1, num_cluster, num_cluster, num_head].
"""
block_index = str(block_coord + 1)
return self.position_bias_blocks[block_index]
| 5,722 | 41.392593 | 127 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/h_transformer_1d_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines architecture classes for h_transformer_1d models."""
import inspect
from typing import Callable, Optional, Any
from absl import logging
from flax import linen as nn
from jax import numpy as jnp
from typing_extensions import Protocol
from flaxformer import transformer_common as common
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.h_transformer import h_transformer_utils as utils
from flaxformer.architectures.h_transformer import partitioning
from flaxformer.components import embedding
from flaxformer.components import transforms
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
_SCAN_AXIS = 1
class MakeEncoderFn(Protocol):
"""Signature for functions that will make a low-level Encoder."""
def __call__(
self,
*,
shared_token_embedder: Optional[embedding.Embed] = None,
spmd_annotations: Any = None,
) -> 'Encoder':
"""Makes a low-level Encoder instance.
Args:
shared_token_embedder: Shared token embedder instance, which should be
passed to the returned module. If this is non-None, you should use it
instead of providing your own token embedder.
spmd_annotations: Optional SPMD annotations for scanned layers.
Returns:
Encoder instance.
"""
class EncoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""H-Transformer encoder layer.
Attributes:
attention: The h_attention module.
mlp: The MLP module, applied after attention.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
parallel: Whether to call attention and mlp in parallel
sow_intermediates: Whether to track intermediates using Module.sow.
scanned: Whether this layer is being scanned over.
"""
attention: nn.Module
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
partitioner_factory: Callable[[], Any] = partitioning.Partitioner1D
parallel: bool = False
sow_intermediates: bool = False
scanned: bool = False
def setup(self):
self.pre_attention_layer_norm = self.layer_norm_factory()
self.post_attention_dropout = self.dropout_factory()
self.partitioner = self.partitioner_factory()
if not self.parallel:
self.pre_mlp_layer_norm = self.layer_norm_factory()
self.post_mlp_dropout = self.dropout_factory()
def _validate_inputs(self, inputs):
if inputs.ndim != 3:
raise ValueError(f'Expect inputs.ndim=3, but inputs.ndim={inputs.ndim}')
def __call__(self,
inputs: Array,
inputs_mask: Array,
*,
enable_dropout: bool = True) -> Array:
"""Applies a single h_transformer encoder layer.
Args:
inputs: Input data with shape <float>[batch, length, emb_dim].
inputs_mask: Input padding mask with shape <bool>[batch, length, emb_dim].
Entries are True for non-padding tokens and False for padding tokens.
enable_dropout: Enables dropout if set to True.
Returns:
Outputs from an h-transformer encoder layer.
Raises:
ValueError: This is triggered if inputs array has the wrong rank.
"""
self._validate_inputs(inputs)
layer_input = self.partitioner.annotate_layer_activation(inputs)
layer_input = self.pre_attention_layer_norm(layer_input)
layer_input = self.partitioner.annotate_layer_activation(layer_input)
if self.parallel:
y = (
self.attention(
layer_input, inputs_mask, enable_dropout=enable_dropout) +
self.mlp(layer_input, enable_dropout=enable_dropout))
# This scaling follows t5_architecture.py for compatibility. I suspect
# that it is to make the scale comparable to that of layer_input. It is
# possible that leaving it out makes no difference to the final results.
# TODO: Remove this scaling once the integration tests confirm
# that it is unnecessary.
y *= 2**-0.5
y = layer_input + self.post_attention_dropout(
y, deterministic=not enable_dropout)
else:
# Attention block.
x = self.attention(
layer_input, inputs_mask, enable_dropout=enable_dropout)
x = layer_input + self.post_attention_dropout(
x, deterministic=not enable_dropout)
x = self.partitioner.annotate_layer_activation(x)
# MLP block.
y = self.pre_mlp_layer_norm(x)
y = self.partitioner.annotate_layer_activation(y)
y = self.mlp(y, enable_dropout=enable_dropout)
y = x + self.post_mlp_dropout(y, deterministic=not enable_dropout)
y = self.partitioner.annotate_layer_activation(y)
if self.sow_intermediates:
self.sow('intermediates', 'activations', y)
# Scan expects functions to have a signature: fn(carry, in) --> carry, out
if self.scanned:
return y, None # pytype: disable=bad-return-type # jax-ndarray
else:
return y
class EncoderAndDecoderLayers(nn.Module, param_remapping.ParameterRemappable):
"""Base class for Encoder and Decoder layers.
Attributes:
layer_factory: A callable that returns an EncoderLayer or DecoderOnlyLayer.
input_dropout_factory: A callable that returns the dropout to apply to the
input.
output_dropout_factory: A callable that returns the dropout to apply to the
output. Perhaps for legacy rather than essential reasons, the broadcasting
pattern is sometimes different from input_dropout_factory().
layer_norm_factory: A callable that returns a layer norm.
num_layers: Number of layers to generate.
layer_remat: Whether and how to apply jax.remat to each layer to perform
recomputation in the backward pass.
scan_layers: Whether to scan over layers.
spmd_annotations: The spmd annotations needed for scanned layers.
"""
layer_factory: Callable[[], nn.Module]
input_dropout_factory: Callable[[], nn.Module]
output_dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
num_layers: int
layer_remat: utils.LayerRematOptions = utils.LayerRematOptions.LEGACY
scan_layers: bool = False
spmd_annotations: Any = None
def setup(self):
self.input_dropout = self.input_dropout_factory()
self.output_layer_norm = self.layer_norm_factory()
self.output_dropout = self.output_dropout_factory()
def _setup_layers(self, module_name: str,
num_arguments: int) -> Callable[..., Array]:
lyrf = utils.maybe_remat(
self.layer_factory,
self.layer_remat,
self.scan_layers,
static_argnums=(2,))
logging.info(
'Finished setting up a set of %d h-transformer encoder/decoder layers,',
self.num_layers)
if self.scan_layers:
initializing = self.is_mutable_collection('params')
# We scan the parameters along axis 1 as an XLA layout optimization.
params_spec = _SCAN_AXIS if initializing else transforms.ScanIn(
_SCAN_AXIS)
cache_spec = 0
scan_annotation = (
self.spmd_annotations[module_name]
if self.spmd_annotations is not None else None)
in_axes = (nn.broadcast,) * num_arguments
lyrf = transforms.factory_scan(
lyrf,
in_axes=in_axes,
variable_axes={
'params': params_spec,
'cache': cache_spec
},
split_rngs={
'params': True,
'dropout': True
},
length=self.num_layers,
data_transform=transforms.inner_scan_spmd(scan_annotation,
_SCAN_AXIS),
axes_collections=('params', 'cache'),
)
return lyrf()
else:
self.layers = [lyrf() for _ in range(self.num_layers)]
return common.TransparentLayerSequence(self.layers)
class EncoderAndDecoderBase(EncoderAndDecoderLayers):
"""Base class for Encoder and Decoder classes.
Attributes:
token_embedder_factory: A callable that returns a token embedder. Please
provide either this or `shared_token_embedder`.
shared_token_embedder: A callable that returns a token embedder shared
between both encoder and decoder.
"""
# Embedders: Either a token_embedder_factory factory or shared_token_embedder
# must be provided.
token_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
shared_token_embedder: Optional[embedding.Embed] = None
def setup(self):
super().setup()
self._setup_embedders()
def _setup_embedders(self):
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. The token_embedder_factory was '
f'{self.token_embedder_factory}, and shared_token_embedder was '
f'{self.shared_token_embedder}.')
if self.shared_token_embedder is not None:
self.embedder = self.shared_token_embedder
else:
self.token_embedder_factory: Callable[[], embedding.Embed]
self.embedder = self.token_embedder_factory()
logging.info('Finished setting up an embedder for h-transformer.')
class Encoder(EncoderAndDecoderBase):
"""A stack of input encoder layers."""
def setup(self):
super().setup()
self.encoder = self._setup_layers('encoder', num_arguments=2)
logging.info('Finished setting up h-transformer encoder.')
def __call__(self,
inputs: Array,
inputs_mask: Optional[Array] = None,
*,
enable_dropout: bool = True) -> Array:
"""Applies H-Transformer encoder on the inputs.
Args:
inputs: Input data with shape <float>[batch, length]
inputs_mask: Input padding mask with shape <bool>[batch, length], where
True for non-padding tokens and False for padding.
enable_dropout: Enables dropout if set to True.
Returns:
Outputs of an h-transformer encoder.
Raises:
ValueError: This is triggered if inputs array has the wrong rank.
"""
if inputs.ndim != 2: # (batch, len)
raise ValueError(f'Expect inputs.ndim=2, but inputs.ndim={inputs.ndim}')
embedded_inputs = self.embedder(inputs)
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
# Apply all encoder layers.
encoder_outputs = self.encoder(
embedded_inputs, inputs_mask, enable_dropout=enable_dropout)
if self.scan_layers:
encoder_outputs = encoder_outputs[0]
# Post-process the outputs of the final encoder layer.
encoder_outputs = self.output_layer_norm(encoder_outputs)
encoder_outputs = self.output_dropout(
encoder_outputs, deterministic=not enable_dropout)
return encoder_outputs
class MakeDecoderFn(Protocol):
"""Signature for functions that make a low-level Decoder instance."""
def __call__(
self,
*,
shared_token_embedder: Optional[embedding.Embed] = None,
spmd_annotations: Any = None,
) -> 'Decoder':
"""Makes a low-level Decoder instance.
Args:
shared_token_embedder: Shared token embedder instance, which should be
passed to the returned module. If this is non-None, you should use it
instead of providing your own token embedder.
spmd_annotations: Optional SPMD annotations for scanned layers.
Returns:
Decoder instance.
"""
class DecoderOnlyLayer(EncoderLayer):
"""H-Transformer decoder-only layer.
This decoder-only layer does not have cross attention. It is designed to be
used by DecoderOnly below.
A decoder performs exactly the same set of operations on the input as
those by an encoder. This is because all input tokens are available due to
standard teacher-forcing method during training phase. The implementation
for both encoder and decoder is the same if we do not use the cache for
decoder. This is fine for h-transformer Decoder because it has a linear
complexity. So the runtime gain from using the cache is smaller.
The only difference is that the attention component is an instance of
OneDimDecoderSelfAttention which does not attend to future tokens.
Attributes:
attention: An instance of a OneDimDecoderSelfAttention module.
"""
attention: nn.Module
class DecoderOnly(EncoderAndDecoderBase):
"""A stack of DecoderOnly layers.
Attributes:
output_logits_factory: A callable that returns the output logits. If not
provided, then the token embedders are used.
sow_intermediates: Whether to track intermediates using Module.sow.
"""
output_logits_factory: Optional[Callable[[], nn.Module]] = None
sow_intermediates: bool = False
def setup(self):
super().setup()
self.decoder = self._setup_layers('decoder', num_arguments=2)
self.output_logits_factory: Callable[[], nn.Module]
self.output_logits: Optional[nn.Module]
self.output_logits = (
self.output_logits_factory() if self.output_logits_factory else None)
logging.info('Finished setting up h-transformer decoder-only.')
def __call__(self,
inputs: Array,
inputs_mask: Optional[Array] = None,
decoder_target_tokens: Optional[Array] = None,
decoder_segment_ids: Optional[Array] = None,
decoder_positions: Optional[Array] = None,
decoder_causal_attention: Optional[Array] = None,
decode: Optional[bool] = False,
*,
enable_dropout: bool = True) -> Array:
"""Applies H-Transformer model on the inputs.
Args:
inputs: Input data with shape <float>[batch, length]
inputs_mask: Input padding mask with shape <bool>[batch, length], where
True for non-padding tokens and False for padding.
decoder_target_tokens: target token to the decoder.
decoder_segment_ids: decoder segmentation info for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
decoder_causal_attention: a binary mask indicating the "inputs" portion of
the concatenated sequence for a prefix LM.
decode: Whether to prepare and use an autoregressive cache. This is unused
in h-transformer.
enable_dropout: Enables dropout if set to True.
Returns:
Outputs of an h-transformer encoder.
Raises:
ValueError: This is triggered if inputs array has the wrong rank or
an unsupported argument is passed.
"""
if decoder_segment_ids is not None or decoder_positions is not None:
raise ValueError('Packed examples (segment IDs, positions) are not '
'supported by H-Transformer.')
if inputs.ndim != 2: # (batch, len)
raise ValueError(f'Expect inputs.ndim=2, but inputs.ndim={inputs.ndim}')
# These are in the argument list to conform to t5x.models.DecoderOnlyModel.
# They are not used.
del decoder_target_tokens
del decoder_segment_ids
del decoder_positions
del decoder_causal_attention
del decode
embedded_inputs = self.embedder(inputs)
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
decoder_outputs = self.decoder(
embedded_inputs, inputs_mask, enable_dropout=enable_dropout)
if self.scan_layers:
decoder_outputs = decoder_outputs[0]
# Post-process the outputs of the final decoder layer.
decoder_outputs = self.output_layer_norm(decoder_outputs)
decoder_outputs = self.output_dropout(
decoder_outputs, deterministic=not enable_dropout)
logit_mask = dense_attention.get_decoder_logit_mask(inputs,
decoder_outputs.dtype)
decoder_outputs = logit_mask * decoder_outputs
if self.sow_intermediates:
self.sow('intermediates', 'pre_logits_layer', decoder_outputs)
# Decoded Logits
if self.output_logits is not None:
self.output_logits: nn.Module
logits = self.output_logits(decoder_outputs)
else:
logits = self.embedder.attend(decoder_outputs) # pytype: disable=attribute-error
# Correctly normalizes pre-softmax logits for this shared embedder case.
logits = logits / jnp.sqrt(decoder_outputs.shape[-1])
return logits
class DecoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""H-Transformer decoder layer with cross attention.
Attributes:
self_attention: An instance of DecoderSelfAttention module.
encoder_decoder_attention: An instance of encoder-decoder cross-attention.
If this is None, then this is a decoder-only layer.
mlp: The MLP module, applied after both attention modules.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
parallel: whether to call attention and mlp in parallel
sow_intermediates: whether to track intermediates using Module.sow.
"""
self_attention: nn.Module
encoder_decoder_attention: Optional[nn.Module]
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
partitioner_factory: Callable[[], Any] = partitioning.Partitioner1D
parallel: bool = False
sow_intermediates: bool = False
scanned: bool = False
def setup(self):
self.pre_self_attention_layer_norm = self.layer_norm_factory()
self.partitioner = self.partitioner_factory()
if self.parallel:
self.dropout = self.dropout_factory()
else:
self.post_self_attention_dropout = self.dropout_factory()
self.pre_cross_attention_layer_norm = self.layer_norm_factory()
self.post_cross_attention_dropout = self.dropout_factory()
self.pre_mlp_layer_norm = self.layer_norm_factory()
self.post_mlp_dropout = self.dropout_factory()
def __call__(self,
decoder_inputs: Array,
decoder_inputs_mask: Array,
*,
enable_dropout: bool = True,
encoder_outputs: Optional[Array] = None,
encoder_decoder_mask: Optional[Array] = None,
logit_mask: Optional[Array] = None) -> Array:
"""Applies a single h_transformer decoder layer.
Args:
decoder_inputs: Input data for decoder with shape <float>[batch_size,
decoder_seq_length, decoder_hidden_size].
decoder_inputs_mask: Inputs mask for decoder with shape <bool>[batch_size,
decoder_seq_length].
enable_dropout: Enables dropout if set to True.
encoder_outputs: Encoder outputs with shape [batch_size,
encoder_seq_length, decoder_hidden_size]. If None, this is a DecoderOnly
layer.
encoder_decoder_mask: encoder-decoder attention mask with shape
<bool>[batch_size, 1, decoder_seq_length, encoder_seq_length].
logit_mask: a mask (e.g., padding logit mask) to be applied to the
attention logits, with shape <bool>[batch_size, decoder_seq_length, 1].
Returns:
Outputs from an h-transformer decoder layer with shape <float>[batch_size,
decoder_seq_length, decoder_hidden_size].
Raises:
ValueError: This is triggered if decoder_inputs array has the wrong rank
or self.encoder_decoder_attention is not provided when encoder_outputs
is not None.
"""
if decoder_inputs.ndim != 3:
raise ValueError('Expect decoder_inputs.ndim=3, but decoder_inputs.ndim='
f'{decoder_inputs.ndim}')
if encoder_outputs is not None and self.encoder_decoder_attention is None:
raise ValueError('Expected encoder_decoder_attention to be populated.')
layer_inputs = self.partitioner.annotate_layer_activation(decoder_inputs)
x = self.pre_self_attention_layer_norm(layer_inputs)
x = self.partitioner.annotate_layer_activation(x)
if self.parallel:
y = (
self.self_attention(
x, decoder_inputs_mask, enable_dropout=enable_dropout) +
self.mlp(x, enable_dropout=enable_dropout))
if encoder_outputs is not None:
y += self.encoder_decoder_attention(
x,
encoder_outputs,
mask=encoder_decoder_mask,
enable_dropout=enable_dropout)
y *= (3 if encoder_outputs is not None else 2)**-0.5
z = layer_inputs + self.dropout(y, deterministic=not enable_dropout)
else:
if logit_mask is not None:
x = logit_mask * x
x = self.self_attention(
x, decoder_inputs_mask, enable_dropout=enable_dropout)
x = layer_inputs + self.post_self_attention_dropout(
x, deterministic=not enable_dropout)
x = self.partitioner.annotate_layer_activation(x)
# Encoder-Decoder block.
if encoder_outputs is None:
# If encoder outputs not provided, skip attending from decoder to
# encoder. This results in a decoder only layer.
y = x
else:
y = self.pre_cross_attention_layer_norm(x)
y = self.partitioner.annotate_layer_activation(y)
if logit_mask is not None:
y = logit_mask * y
y = self.encoder_decoder_attention(
y,
encoder_outputs,
mask=encoder_decoder_mask,
enable_dropout=enable_dropout)
y = x + self.post_cross_attention_dropout(
y, deterministic=not enable_dropout)
y = self.partitioner.annotate_layer_activation(y)
# MLP block.
z = self.pre_mlp_layer_norm(y)
z = self.partitioner.annotate_layer_activation(z)
if logit_mask is not None:
z = logit_mask * z
z = self.mlp(z, enable_dropout=enable_dropout)
z = y + self.post_mlp_dropout(z, deterministic=not enable_dropout)
z = self.partitioner.annotate_layer_activation(z)
if self.sow_intermediates:
self.sow('intermediates', 'activations', z)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
# TODO: automate this detail.
if self.scanned:
return z, None # pytype: disable=bad-return-type # jax-ndarray
else:
return z
class Decoder(EncoderAndDecoderBase):
"""A stack of Decoder layers.
This module can be used with or without the encoder stack. To use without an
encoder, pass in encoder_outputs=None. This will bypass the encoder-decoder
attention and hence results in a decoder-only block.
Attributes:
output_logits_factory: A callable that returns the output logits. If not
provided, then the token embedders are used.
sow_intermediates: Whether to track intermediates using Module.sow.
"""
output_logits_factory: Optional[Callable[[], nn.Module]] = None
sow_intermediates: bool = False
def setup(self):
super().setup()
self.decoder = self._setup_layers('decoder', num_arguments=5)
self.output_logits_factory: Callable[[], nn.Module]
self.output_logits: Optional[nn.Module]
self.output_logits = (
self.output_logits_factory() if self.output_logits_factory else None)
logging.info('Finished setting up h-transformer decoder.')
def __call__(self,
decoder_input_tokens: Array,
encoder_outputs: Optional[Array] = None,
decoder_positions: Optional[Array] = None,
decoder_mask: Optional[Array] = None,
encoder_decoder_mask: Optional[Array] = None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True,
decode: Optional[bool] = False,
max_decode_length: Optional[int] = None,
prefill: Optional[bool] = False,
prefill_lengths: Optional[Array] = None) -> Array:
"""Applies H-Transformer model on the decoder_input_tokens.
Args:
decoder_input_tokens: Decoder input tokens with shape <int>[batch,
decoder_seq_length].
encoder_outputs: Encoder outputs with shape <float>[batch,
encoder_seq_length, encoder_hidden_size]. If None, decoder hidden layer
does not attend to encoder outputs, resulting in a decoder only block.
decoder_positions: Decoder subsequence positions for packed examples. This
is unused in h-transformer.
decoder_mask: Decoder input padding mask with shape <bool>[batch, length],
where True for non-padding tokens and False for padding tokens.
encoder_decoder_mask: The attention mask for the encoder outputs.
segment_ids: decoder segmentation info for packed examples. This is unused
in h-transformer.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache. This is unused
in h-transformer.
max_decode_length: An optional integer specifying the maximum decoding
length. This is unused in h-transformer.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache. This is unused in h-transformer.
Returns:
Outputs from an h-transformer decoder block with shape <float>[batch_size,
decoder_seq_length, decoder_hidden_size].
Raises:
ValueError: This is triggered if decoder_input_tokens or encoder_outputs
has the wrong rank or an unsupported argument is passed.
"""
if decoder_input_tokens.ndim != 2:
raise ValueError(
f'Expect decoder_input_tokens.ndim=2, but decoder_input_tokens.ndim={decoder_input_tokens.ndim}'
)
if encoder_outputs is not None and encoder_outputs.ndim != 3:
raise ValueError(
f'Expect encoder_outputs.ndim=3, but encoder_outputs.ndim={encoder_outputs.ndim}'
)
if segment_ids is not None or decoder_positions is not None:
raise ValueError('Packed examples (segment IDs, positions) are not '
'supported by H-Transformer.')
if prefill or decode or prefill_lengths is not None:
raise ValueError(
'Autoregressive cache is not supported by H-Transformer.')
# These are in the argument list to conform to t5_architecture.Decoder.
# They are not used.
del decoder_positions
del segment_ids
del decode
del max_decode_length
del prefill
del prefill_lengths
embedded_decoder_inputs = self.embedder(decoder_input_tokens)
embedded_decoder_inputs = self.input_dropout(
embedded_decoder_inputs, deterministic=not enable_dropout)
logit_mask = None
if encoder_outputs is not None:
# Only needs logit_mask for the dense cross_attention attending to
# the encoder_outputs.
logit_mask = dense_attention.get_decoder_logit_mask(
decoder_input_tokens, embedded_decoder_inputs.dtype)
decoder_outputs = self.decoder(
embedded_decoder_inputs,
decoder_mask,
enable_dropout=enable_dropout,
encoder_outputs=encoder_outputs,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask)
if self.scan_layers:
decoder_outputs = decoder_outputs[0]
# Post-process the outputs of the final decoder layer.
decoder_outputs = self.output_layer_norm(decoder_outputs)
decoder_outputs = self.output_dropout(
decoder_outputs, deterministic=not enable_dropout)
if logit_mask is not None:
decoder_outputs = logit_mask * decoder_outputs
if self.sow_intermediates:
self.sow('intermediates', 'pre_logits_layer', decoder_outputs)
# Decoded Logits
if self.output_logits is not None:
self.output_logits: nn.Module
logits = self.output_logits(decoder_outputs)
else:
logits = self.embedder.attend(decoder_outputs) # pytype: disable=attribute-error
# Correctly normalizes pre-softmax logits for this shared embedder case.
logits = logits / jnp.sqrt(decoder_outputs.shape[-1])
if self.sow_intermediates:
self.sow('intermediates', 'logits', logits)
return logits
class EncoderDecoder(nn.Module, param_remapping.ParameterRemappable):
"""H-Transformer EncoderDecoder Model for sequence to sequence translation.
Attributes:
encoder_factory: A callable that returns the lower-level Encoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `encoder_factory`.
decoder_factory: A callable that returns the lower-level Decoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `decoder_factory`.
scan_layers: whether to scan over layers.
shared_token_embedder_factory: A callable that returns an embedder that can
be shared between the encoder and decoder.
"""
encoder_factory: MakeEncoderFn
decoder_factory: MakeDecoderFn
scan_layers: bool = False
spmd_annotations: Any = None
shared_token_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
def setup(self):
self.shared_token_embedder_factory: Callable[[], embedding.Embed]
self.token_embedder = (
self.shared_token_embedder_factory()
if self.shared_token_embedder_factory else None)
# TODO: Clean up SPMD annotation code.
if self.spmd_annotations is None:
encoder_annotations = None
decoder_annotations = None
else:
encoder_annotations = self.spmd_annotations['encoder']
decoder_annotations = self.spmd_annotations['decoder']
encoder_factory_params = tuple(
inspect.signature(self.encoder_factory).parameters.keys())
if 'spmd_annotations' in encoder_factory_params:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=encoder_annotations)
else:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder)
decoder_factory_params = tuple(
inspect.signature(self.decoder_factory).parameters.keys())
if 'spmd_annotations' in decoder_factory_params:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=decoder_annotations)
else:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder)
logging.info('Finished setting up h-transformer encoder-decoder.')
@property
def encoder_embedder(self) -> embedding.Embed:
return self.encoder.embedder
@property
def decoder_embedder(self) -> embedding.Embed:
return self.decoder.embedder
def __call__(self,
encoder_input_tokens: Array,
decoder_input_tokens: Array,
decoder_target_tokens: Array,
encoder_segment_ids: Optional[Array] = None,
decoder_segment_ids: Optional[Array] = None,
encoder_positions: Optional[Array] = None,
decoder_positions: Optional[Array] = None,
*,
enable_dropout: bool = True,
decode: Optional[bool] = False,
max_decode_length: Optional[int] = None) -> Array:
"""Applies H-Transformer encoder-decoder model on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is a shifted version of the former.
Args:
encoder_input_tokens: Inputs to encoder with shape <int>[batch, length].
decoder_input_tokens: Inputs to decoder with shape <int>[batch, length].
decoder_target_tokens: Target tokens to the decoder with shape
<int>[batch, length].
encoder_segment_ids: encoder segmentation info for packed examples. This
is not used.
decoder_segment_ids: decoder segmentation info for packed examples. This
is not used.
encoder_positions: encoder subsequence positions for packed examples. This
is not used.
decoder_positions: decoder subsequence positions for packed examples. This
is not used.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache. This is not
used.
max_decode_length: An optional integer specifying the maximum decoding
length. This is not used.
Returns:
logits array from h-transformer with shape <float>[batch_size,
decoder_seq_length, decoder_hidden_size].
"""
if encoder_segment_ids is not None or decoder_segment_ids is not None or (
encoder_positions is not None or decoder_positions is not None):
raise ValueError('Packed examples (segment IDs, positions) are not '
'supported by H-Transformer.')
# These are here to conform to t5_architecture.EncoderDecoder.
# They are not used.
del encoder_segment_ids
del decoder_segment_ids
del encoder_positions
del decoder_positions
del decode
del max_decode_length
encoder_mask = encoder_input_tokens > 0
encoder_outputs = self.encoder(
encoder_input_tokens, encoder_mask, enable_dropout=enable_dropout)
decoder_mask = decoder_target_tokens > 0
encoder_decoder_mask = dense_attention.make_attention_mask(
decoder_mask, encoder_mask, dtype=encoder_outputs.dtype)
return self.decoder(
decoder_input_tokens,
encoder_outputs=encoder_outputs,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
enable_dropout=enable_dropout)
| 34,420 | 38.609896 | 106 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/h_transformer_utils.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes and functions for h_transformer architectures."""
import enum
from typing import Callable, Tuple
from flax import linen as nn
import jax
from flaxformer.components import transforms
@enum.unique
class LayerRematOptions(enum.Enum):
"""Options for layer remat configuration.
Attributes:
NONE: For no use of jax.remat.
MINIMAL: For recomputing only non-matmul ops in backprop.
FULL: For recomputing the whole layer in backprop.
LEGACY: For compatibility with existing configs. Previously
scan_layers=False implied NONE, scan_layers=True implied FULL.
"""
NONE = enum.auto()
MINIMAL = enum.auto()
FULL = enum.auto()
LEGACY = enum.auto()
def maybe_remat(lyrf: Callable[[], nn.Module], layer_remat: LayerRematOptions,
scan_layers: bool,
static_argnums: Tuple[int, ...]) -> Callable[[], nn.Module]:
"""Maybe applies jax.remat with the indicated policy to a layer factory.
Args:
lyrf: Encoder or decoder layer factory.
layer_remat: Config for per-layer remat. See commenst for LayerRematOptions.
scan_layers: Whether to use jax.lax.scan for the stack of layers.
static_argnums: The static_argnums to use for the jax.remat call.
Returns:
Potentially remat-wrapped layer factory.
Raises:
ValueError: This is triggered by an unsupported layer_mat option.
"""
if layer_remat == LayerRematOptions.LEGACY:
layer_remat = (
LayerRematOptions.FULL if scan_layers else LayerRematOptions.NONE)
if layer_remat == LayerRematOptions.NONE:
return lyrf
if layer_remat == LayerRematOptions.FULL:
remat_policy = None
elif layer_remat == LayerRematOptions.MINIMAL:
remat_policy = jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims
else:
raise ValueError('Unsupported layer_remat option.')
lyrf = transforms.factory_remat(
lyrf,
concrete=False,
prevent_cse=False,
static_argnums=static_argnums,
policy=remat_policy)
return lyrf
| 2,591 | 31 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/h_transformer_1d_architecture_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for h_transformer_1d_architecture."""
from absl.testing import absltest
from absl.testing import parameterized
from jax import random
import numpy as np
from flaxformer import testing_utils
from flaxformer.architectures.h_transformer import h_transformer_1d_architecture_test_utils as h_transformer_test_utils
from flaxformer.architectures.h_transformer import h_transformer_utils as utils
testdata_dir = 'flaxformer/architectures/h_transformer/testdata'
expected_files = testing_utils.ExpectedJsonFiles(testdata_dir)
check_params = expected_files.check_params_shapes_only
class EncoderTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.inputs = np.array([
[101, 183, 20, 75],
[101, 392, 19, 7],
],
dtype=np.int32)
self.embed_size = 13
batch, seq_len = self.inputs.shape
self.expected_output_shape = (batch, seq_len, self.embed_size)
self.rng_key = random.PRNGKey(0)
@parameterized.named_parameters(
dict(testcase_name='scan', scan_layers=True),
dict(testcase_name='no_scan', scan_layers=False),
)
def test_encoder_run(self, scan_layers):
encoder = h_transformer_test_utils.config_encoder(
embed_size=self.embed_size, scan_layers=scan_layers)
output, _ = encoder.init_with_output(
self.rng_key,
self.inputs,
enable_dropout=False,
)
self.assertEqual(output.shape, self.expected_output_shape)
@parameterized.named_parameters(
dict(
testcase_name='scan',
scan_layers=True,
layer_remat_options=[
utils.LayerRematOptions.MINIMAL, utils.LayerRematOptions.FULL
]),
dict(
testcase_name='no_scan',
scan_layers=False,
layer_remat_options=[
utils.LayerRematOptions.NONE, utils.LayerRematOptions.MINIMAL,
utils.LayerRematOptions.FULL
]),
)
def test_scan_and_remat(self, scan_layers, layer_remat_options):
"""Tests if encoder returns the same output for different scan/remat."""
outputs = []
for layer_remat in layer_remat_options:
encoder = h_transformer_test_utils.config_encoder(
embed_size=self.embed_size,
scan_layers=scan_layers,
layer_remat=layer_remat)
output, _ = encoder.init_with_output(
self.rng_key,
self.inputs,
enable_dropout=False,
)
outputs.append(output)
for other_output in outputs[1:]:
np.testing.assert_allclose(outputs[0], other_output, rtol=1.5e-5)
def test_encoder_shapes_per_layer(self):
encoder = h_transformer_test_utils.config_encoder()
output1, variables = encoder.init_with_output(
self.rng_key,
self.inputs,
enable_dropout=False,
)
reformatted = encoder.apply({},
variables['params'],
method=encoder.to_save_format)
check_params(reformatted, 'encoder_shapes_per_layer.json')
self.assertEqual(output1.shape, (2, 4, 13))
# Convert back to Flax module structure format and test again.
params2 = encoder.apply({}, reformatted, method=encoder.from_save_format)
output2 = encoder.apply(
{'params': params2},
self.inputs,
enable_dropout=False,
)
np.testing.assert_allclose(output1, output2, rtol=1e-8)
class DecoderOnlyTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.inputs = np.array([
[101, 183, 20, 75, 76, 78, 91, 102, 122, 187, 23, 76, 76, 87, 94, 121],
[101, 392, 19, 7, 76, 78, 91, 102, 122, 187, 23, 76, 76, 87, 94, 121],
],
dtype=np.int32)
self.embed_size = 13
self.vocab_size = 2000
(batch, seq_len) = self.inputs.shape
self.expected_output_shape = (batch, seq_len, self.vocab_size)
@parameterized.named_parameters(
dict(testcase_name='scan', scan_layers=True),
dict(testcase_name='no_scan', scan_layers=False),
)
def test_decoder_run(self, scan_layers):
decoder = h_transformer_test_utils.config_decoder_only(
embed_size=self.embed_size,
vocab_size=self.vocab_size,
scan_layers=scan_layers)
output, _ = decoder.init_with_output(
random.PRNGKey(0), self.inputs, enable_dropout=False)
self.assertEqual(output.shape, self.expected_output_shape)
@parameterized.named_parameters(
dict(
testcase_name='scan',
scan_layers=True,
layer_remat_options=[
utils.LayerRematOptions.MINIMAL, utils.LayerRematOptions.FULL
]),
dict(
testcase_name='no_scan',
scan_layers=False,
layer_remat_options=[
utils.LayerRematOptions.NONE, utils.LayerRematOptions.MINIMAL,
utils.LayerRematOptions.FULL
]),
)
def test_scan_and_remat(self, scan_layers, layer_remat_options):
"""Tests if decoder returns the same output for different scan/remat."""
outputs = []
for layer_remat in layer_remat_options:
decoder = h_transformer_test_utils.config_decoder_only(
embed_size=self.embed_size,
vocab_size=self.vocab_size,
scan_layers=scan_layers,
layer_remat=layer_remat)
output, _ = decoder.init_with_output(
random.PRNGKey(0), self.inputs, enable_dropout=False)
outputs.append(output)
for other_output in outputs[1:]:
np.testing.assert_allclose(
outputs[0], other_output, atol=1e-5, rtol=1.5e-5)
def test_decoder_shapes_per_layer(self):
decoder = h_transformer_test_utils.config_decoder_only(
embed_size=self.embed_size, vocab_size=self.vocab_size)
output1, variables = decoder.init_with_output(
random.PRNGKey(0),
self.inputs,
enable_dropout=False,
)
reformatted = decoder.apply({},
variables['params'],
method=decoder.to_save_format)
with self.subTest(name='check_params_and_output_shape'):
check_params(reformatted, 'decoder_only_shapes_per_layer.json')
self.assertEqual(output1.shape, self.expected_output_shape)
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
self.inputs,
enable_dropout=False,
)
with self.subTest(name='check_flax_module_outputs'):
np.testing.assert_allclose(output1, output2, rtol=1e-8)
class DecoderTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.inputs = np.array([
[101, 183, 20, 75],
[101, 392, 19, 7],
],
dtype=np.int32)
self.embed_size = 13
self.vocab_size = 2000
(batch, seq_len) = self.inputs.shape
self.expected_output_shape = (batch, seq_len, self.vocab_size)
@parameterized.named_parameters(
dict(testcase_name='scan_no_parallel', scan_layers=True, parallel=False),
dict(
testcase_name='no_scan_no_parallel',
scan_layers=False,
parallel=False),
dict(testcase_name='scan_parallel', scan_layers=True, parallel=True),
dict(testcase_name='no_scan_parallel', scan_layers=False, parallel=True),
)
def test_decoder_run(self, scan_layers, parallel):
decoder = h_transformer_test_utils.config_decoder(
embed_size=self.embed_size,
vocab_size=self.vocab_size,
scan_layers=scan_layers,
parallel=parallel)
output, _ = decoder.init_with_output(
random.PRNGKey(0), self.inputs, enable_dropout=False)
self.assertEqual(output.shape, self.expected_output_shape)
@parameterized.named_parameters(
dict(
testcase_name='scan',
scan_layers=True,
layer_remat_options=[
utils.LayerRematOptions.MINIMAL, utils.LayerRematOptions.FULL
]),
dict(
testcase_name='no_scan',
scan_layers=False,
layer_remat_options=[
utils.LayerRematOptions.NONE, utils.LayerRematOptions.MINIMAL,
utils.LayerRematOptions.FULL
]),
)
def test_scan_and_remat(self, scan_layers, layer_remat_options):
"""Tests if decoder returns the same output for different scan/remat."""
outputs = []
for layer_remat in layer_remat_options:
decoder = h_transformer_test_utils.config_decoder(
embed_size=self.embed_size,
vocab_size=self.vocab_size,
scan_layers=scan_layers,
layer_remat=layer_remat)
output, _ = decoder.init_with_output(
random.PRNGKey(0), self.inputs, enable_dropout=False)
outputs.append(output)
for other_output in outputs[1:]:
np.testing.assert_allclose(
outputs[0], other_output, atol=1e-5, rtol=1.5e-5)
def test_decoder_shapes_per_layer(self):
decoder = h_transformer_test_utils.config_decoder(
embed_size=self.embed_size, vocab_size=self.vocab_size)
output1, variables = decoder.init_with_output(
random.PRNGKey(0),
self.inputs,
enable_dropout=False,
)
reformatted = decoder.apply({},
variables['params'],
method=decoder.to_save_format)
with self.subTest(name='check_params_and_output_shape'):
check_params(reformatted, 'decoder_shapes_per_layer.json')
self.assertEqual(output1.shape, self.expected_output_shape)
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
self.inputs,
enable_dropout=False,
)
with self.subTest(name='check_flax_module_outputs'):
np.testing.assert_allclose(output1, output2, rtol=1e-8)
class EncoderDecoderTest(parameterized.TestCase):
def setUp(self):
super().setUp()
inputs = np.array([
[101, 183, 20, 75],
[101, 392, 19, 7],
], dtype=np.int32)
self.encoder_input_tokens = inputs
self.decoder_input_tokens = inputs
self.decoder_target_tokens = inputs
self.embed_size = 13
self.vocab_size = 2000
(batch, seq_len) = inputs.shape
self.expected_output_shape = (batch, seq_len, self.vocab_size)
@parameterized.named_parameters(
dict(testcase_name='scan', scan_layers=True),
dict(testcase_name='no_scan', scan_layers=False),
)
def test_encoder_decoder_run(self, scan_layers):
encoder_decoder = h_transformer_test_utils.config_encoder_decoder(
embed_size=self.embed_size,
vocab_size=self.vocab_size,
scan_layers=scan_layers)
output, _ = encoder_decoder.init_with_output(
random.PRNGKey(0),
encoder_input_tokens=self.encoder_input_tokens,
decoder_input_tokens=self.decoder_input_tokens,
decoder_target_tokens=self.decoder_target_tokens,
enable_dropout=False,
)
self.assertEqual(output.shape, self.expected_output_shape)
@parameterized.named_parameters(
dict(
testcase_name='scan',
scan_layers=True,
layer_remat_options=[
utils.LayerRematOptions.MINIMAL, utils.LayerRematOptions.FULL
]),
dict(
testcase_name='no_scan',
scan_layers=False,
layer_remat_options=[
utils.LayerRematOptions.NONE, utils.LayerRematOptions.MINIMAL,
utils.LayerRematOptions.FULL
]),
)
def test_scan_and_remat(self, scan_layers, layer_remat_options):
"""Tests if encoder_decoder returns the same output for different scan/remat."""
outputs = []
for layer_remat in layer_remat_options:
encoder_decoder = h_transformer_test_utils.config_encoder_decoder(
embed_size=self.embed_size,
vocab_size=self.vocab_size,
scan_layers=scan_layers,
layer_remat=layer_remat)
output, _ = encoder_decoder.init_with_output(
random.PRNGKey(0),
encoder_input_tokens=self.encoder_input_tokens,
decoder_input_tokens=self.decoder_input_tokens,
decoder_target_tokens=self.decoder_target_tokens,
enable_dropout=False,
)
outputs.append(output)
for other_output in outputs[1:]:
np.testing.assert_allclose(
outputs[0], other_output, atol=1e-5, rtol=1.5e-5)
def test_encoder_decoder_shapes_per_layer(self):
encoder_decoder = h_transformer_test_utils.config_encoder_decoder(
embed_size=self.embed_size, vocab_size=self.vocab_size)
output1, variables = encoder_decoder.init_with_output(
random.PRNGKey(0),
encoder_input_tokens=self.encoder_input_tokens,
decoder_input_tokens=self.decoder_input_tokens,
decoder_target_tokens=self.decoder_target_tokens,
enable_dropout=False,
)
reformatted = encoder_decoder.apply({},
variables['params'],
method=encoder_decoder.to_save_format)
with self.subTest(name='check_params_and_output_shape'):
check_params(reformatted, 'encoder_decoder_shapes_per_layer.json')
self.assertEqual(output1.shape, self.expected_output_shape)
# Convert back to Flax module structure format and test again.
params2 = encoder_decoder.apply({},
reformatted,
method=encoder_decoder.from_save_format)
output2 = encoder_decoder.apply(
{'params': params2},
encoder_input_tokens=self.encoder_input_tokens,
decoder_input_tokens=self.decoder_input_tokens,
decoder_target_tokens=self.decoder_target_tokens,
enable_dropout=False,
)
with self.subTest(name='check_flax_module_outputs'):
np.testing.assert_allclose(output1, output2, rtol=1e-8)
if __name__ == '__main__':
absltest.main()
| 14,614 | 35.5375 | 119 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/partitioning.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""SPMD model partitioning utilities for h-transformer."""
import abc
import dataclasses
import functools
from typing import Any, Dict, Tuple, Union
from flax.linen import spmd
from flaxformer.types import Array
# This adopts the standard_logical_axis_rules in t5x.partitioning.
# If a different set of rules is used, the names should be changed
# accordingly. A few new axis names specific to h-attention have
# been appended. Since no partitioning is expected along these
# axes, there is no need to add them to the logical_axis_rules. They
# will be assigned 'None' by default instead of 'data' or 'model'.
class AxisName:
"""All axis names supported in h-transformer."""
# Standard Transformer axis names. These are supported by
# T5x standard_logical_axis_rules and hence should work in
# param_with_axes().
BATCH: str = 'batch'
VOCAB: str = 'vocab'
EMBED: str = 'embed'
MLP: str = 'mlp'
HEADS: str = 'heads'
KV: str = 'kv'
JOINED_KV: str = 'joined_kv'
LENGTH: str = 'length'
RELPOS_BUCKETS: str = 'relpos_buckets'
# For 2d images or video frames.
HEIGHT: str = 'height'
WIDTH: str = 'width'
# The h-attention specific axis annotation.
# These are NOT supported by T5x standard_logical_axis_rules.
# So do not use them with param_with_axes() if T5x train loop is used.
# The excerption is 1) Add them to T5x standard_logical_axis_rules;
# 2) Use PAX train loop.
PACKED_DIM: str = 'packed_dim'
BLOCK: str = 'block'
CLUSTER: str = 'cluster'
ROW_CLUSTER: str = 'row_cluster'
COL_CLUSTER: str = 'col_cluster'
NEIGHBOR: str = 'neighbor'
REL_POSITION: str = 'rel_position'
UNMODELED: str = 'unmodeled'
UNMODELED_HEADS: str = 'unmodeled_heads'
UNMODELED_KV: str = 'unmodeled_kv'
@dataclasses.dataclass()
class PartitionerBase(metaclass=abc.ABCMeta):
"""Base class for partitioner."""
layer_output_axis_names: Tuple[str, ...] = ()
qkv_axis_names: Tuple[str, ...] = ()
coarse_qkv_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.PACKED_DIM,
AxisName.CLUSTER,
AxisName.HEADS,
AxisName.KV,
)
attention_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.PACKED_DIM,
AxisName.ROW_CLUSTER,
AxisName.COL_CLUSTER,
AxisName.HEADS,
)
correction_mask_axis_names: Tuple[str, ...] = (
AxisName.PACKED_DIM,
AxisName.ROW_CLUSTER,
AxisName.COL_CLUSTER,
AxisName.HEADS,
)
causal_mask_axis_names: Tuple[str, ...] = (
AxisName.ROW_CLUSTER,
AxisName.COL_CLUSTER,
AxisName.HEADS,
)
padding_mask_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.LENGTH,
# Last dim is added embedding_dim=1. So no need to partition.
AxisName.UNMODELED,
)
coarse_padding_mask_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.PACKED_DIM,
AxisName.CLUSTER,
# This is added heads=1. So no need to partition.
AxisName.UNMODELED_HEADS,
# This is added kv=1. So no need to partition.
AxisName.UNMODELED_KV,
)
singlehead_rpb_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.PACKED_DIM,
AxisName.ROW_CLUSTER,
AxisName.COL_CLUSTER,
# This is for heads=1. So no need to partition.
AxisName.UNMODELED_HEADS,
)
def _generic_annotation_fn(
self,
y: Union[Array, Dict[Any, Array]],
axis_names: Tuple[str, ...],
) -> Union[Array, Dict[Any, Array]]:
"""Generic axis annotation function."""
annotation_fn = functools.partial(
spmd.with_logical_constraint,
logical_axis_resources=axis_names,
)
axis_count = len(axis_names)
if isinstance(y, dict):
annotation = {}
for key, value in y.items():
assert (
axis_count == value.ndim
), f'Axis count {axis_count} does match {key} array ndim {value.ndim}.'
annotation[key] = annotation_fn(value)
else:
assert (
axis_count == y.ndim
), f'Axis count {axis_count} does match array ndim {y.ndim}.'
annotation = annotation_fn(y)
return annotation
def annotate_layer_activation(
self,
y: Union[Array, Dict[Any, Array]],
) -> Union[Array, Dict[Any, Array]]:
return self._generic_annotation_fn(y, self.layer_output_axis_names)
def annotate_multihead_qkv(
self,
qkv: Union[Array, Dict[Any, Array]],
) -> Union[Array, Dict[Any, Array]]:
return self._generic_annotation_fn(qkv, self.qkv_axis_names)
def annotate_softmax_partition(
self,
partition: Union[Array, Dict[Any, Array]],
) -> Union[Array, Dict[Any, Array]]:
return self.annotate_layer_activation(partition)
def annotate_coarse_qkv(
self,
qkv: Union[Array, Dict[Any, Array]],
) -> Union[Array, Dict[Any, Array]]:
return self._generic_annotation_fn(qkv, self.coarse_qkv_axis_names)
def annotate_attention(
self,
attention: Union[Array, Dict[Any, Array]],
) -> Union[Array, Dict[Any, Array]]:
return self._generic_annotation_fn(attention, self.attention_axis_names)
# The similarity and attention have the same shape and axis names.
# The same holds true for rpb and zero_block_mask.
def annotate_similarity(
self,
similarity: Union[Array, Dict[Any, Array]],
) -> Union[Array, Dict[Any, Array]]:
return self.annotate_attention(similarity)
@dataclasses.dataclass()
class Partitioner1D(PartitionerBase):
"""Partitoner for h-transformer-1d."""
layer_output_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.LENGTH,
AxisName.EMBED,
)
qkv_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.LENGTH,
AxisName.HEADS,
AxisName.KV,
)
@dataclasses.dataclass()
class Partitioner2D(PartitionerBase):
"""Partitoner for h-transformer-2d."""
layer_output_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.HEIGHT,
AxisName.WIDTH,
AxisName.EMBED,
)
qkv_axis_names: Tuple[str, ...] = (
AxisName.BATCH,
AxisName.HEIGHT,
AxisName.WIDTH,
AxisName.HEADS,
AxisName.KV,
)
| 6,712 | 29.103139 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/h_attention_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for h_attention.py."""
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen
from jax import random
import jax.numpy as jnp
import numpy as onp
from flaxformer import testing_utils
from flaxformer.architectures.h_transformer import h_attention
from flaxformer.architectures.h_transformer import token_hierarchy as th
class HAttention1DTest(parameterized.TestCase):
"""Test cases for h_attention."""
def setUp(self):
super().setUp()
self.batch_size = 2
self.num_heads = 4
self.head_dim = 2
self.feature_size = self.num_heads * self.head_dim
def test_bad_input_shape(self):
# Delibrately sets a wrong shape here to trigger the ValueError.
inputs_q = jnp.ones((1, 1, 8, 2))
with self.assertRaises(ValueError):
attention_module = h_attention.OneDimEncoderSelfAttention(
num_heads=self.num_heads, num_clusters=2, use_rpb=True)
rng = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
attention_module.init(rng, inputs_q, padding_mask=None)
def test_bad_padding_mask_shape(self):
# Delibrately sets a wrong shape here to trigger the ValueError.
inputs_q = jnp.ones((self.batch_size, 16, self.feature_size))
padding_mask = jnp.ones((self.batch_size, 16, 1, 1))
with self.assertRaises(ValueError):
attention_module = h_attention.OneDimEncoderSelfAttention(
num_heads=self.num_heads, num_clusters=2, use_rpb=True)
rng = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
attention_module.init(rng, inputs_q, padding_mask=padding_mask)
def test_large_num_clusters(self):
# Delibrately sets num_clusters > sequence_length//2. This used to trigger
# a bug. It has been fixed. So this should always pass.
num_clusters = 16
seq_len = 4
inputs_q = jnp.ones((self.batch_size, seq_len, self.feature_size))
rng = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
attention_module = h_attention.OneDimEncoderSelfAttention(
num_heads=self.num_heads, num_clusters=num_clusters, use_rpb=True)
result, _ = attention_module.init_with_output(
rng, inputs_q, padding_mask=None)
expected_shape = inputs_q.shape
self.assertEqual(result.shape, expected_shape)
@parameterized.named_parameters(
dict(
testcase_name='decoder_no_rpb_singlehead',
use_rpb=False,
use_multihead_rpb=False),
dict(
testcase_name='decoder_rpb_singlehead',
use_rpb=True,
use_multihead_rpb=False),
dict(
testcase_name='decoder_rpb_multihead',
use_rpb=True,
use_multihead_rpb=True),
)
def test_decoder_runs(self, use_rpb, use_multihead_rpb):
num_clusters = 2
num_level = 4
num_block = int(onp.exp2(num_level))
seq_len = num_clusters * num_block
inputs_q = jnp.ones((self.batch_size, seq_len, self.feature_size))
rng = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
attention_module = h_attention.OneDimDecoderSelfAttention(
num_heads=self.num_heads,
num_clusters=num_clusters,
use_rpb=use_rpb,
use_multihead_rpb=use_multihead_rpb)
result, _ = attention_module.init_with_output(
rng, inputs_q, padding_mask=None)
expected_shape = inputs_q.shape
self.assertEqual(result.shape, expected_shape)
@parameterized.named_parameters(
dict(
testcase_name='encoder_no_rpb_singlehead',
use_rpb=False,
use_multihead_rpb=False),
dict(
testcase_name='encoder_rpb_singlehead',
use_rpb=True,
use_multihead_rpb=False),
dict(
testcase_name='encoder_rpb_multihead',
use_rpb=True,
use_multihead_rpb=True),
)
def test_encoder_runs(self, use_rpb, use_multihead_rpb):
num_clusters = 2
num_level = 4
num_block = int(onp.exp2(num_level))
seq_len = num_clusters * num_block
inputs_q = jnp.ones((self.batch_size, seq_len, self.feature_size))
rng = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
attention_module = h_attention.OneDimEncoderSelfAttention(
num_heads=self.num_heads,
num_clusters=num_clusters,
use_rpb=use_rpb,
use_multihead_rpb=use_multihead_rpb)
result, _ = attention_module.init_with_output(
rng, inputs_q, padding_mask=None)
expected_shape = inputs_q.shape
self.assertEqual(result.shape, expected_shape)
@parameterized.named_parameters(
dict(
testcase_name='cross_attention_no_rpb_singlehead',
use_rpb=False,
use_multihead_rpb=False),
dict(
testcase_name='cross_attention_rpb_singlehead',
use_rpb=True,
use_multihead_rpb=False),
dict(
testcase_name='cross_attention_rpb_multihead',
use_rpb=True,
use_multihead_rpb=True),
)
def test_cross_attention_runs(self, use_rpb, use_multihead_rpb):
num_clusters = 2
num_level = 4
num_block = int(onp.exp2(num_level))
seq_len = num_clusters * num_block
inputs_q = jnp.ones((self.batch_size, seq_len, self.feature_size))
rng = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
attention_module = h_attention.OneDimCrossAttention(
num_heads=self.num_heads,
num_clusters=num_clusters,
use_rpb=use_rpb,
use_multihead_rpb=use_multihead_rpb)
result, _ = attention_module.init_with_output(rng, inputs_q, inputs_q)
expected_shape = inputs_q.shape
self.assertEqual(result.shape, expected_shape)
def test_attention_params(self):
num_block = 16
num_clusters = 4
seq_len = num_clusters * num_block
inputs_q = jnp.ones((self.batch_size, seq_len, self.feature_size))
rng = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
attention_module = h_attention.OneDimEncoderSelfAttention(
num_heads=self.num_heads,
num_clusters=num_clusters,
use_rpb=True,
use_multihead_rpb=True,
split_head_kernel=True,
)
result, variables = attention_module.init_with_output(
rng, inputs_q, padding_mask=None)
expected_shape = inputs_q.shape
self.assertEqual(result.shape, expected_shape)
expected_embed = f'embed={self.feature_size}'
expected_heads = f'heads={self.num_heads}'
expected_kv = f'kv={self.head_dim}'
expected_relpos = f'relpos_buckets={4*num_clusters - 1}'
# The bias term does not have split head shape. The heads are always merged.
expected_merged_kv = f'kv={self.feature_size}'
expected_params = {
'query_multihead_projection': {
'bias': ['float32', expected_merged_kv],
'kernel': ['float32', expected_embed, expected_heads, expected_kv],
},
'key_multihead_projection': {
'bias': ['float32', expected_merged_kv],
'kernel': ['float32', expected_embed, expected_heads, expected_kv],
},
'value_multihead_projection': {
'bias': ['float32', expected_merged_kv],
'kernel': ['float32', expected_embed, expected_heads, expected_kv],
},
'out': {
'bias': ['float32', expected_embed],
'kernel': ['float32', expected_merged_kv, expected_embed],
},
'1d_relative_position_bias': {
'1d_relative_position_bias': [
'float32', expected_relpos, expected_heads
],
},
}
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(variables['params'],
variables['params_axes']),
expected_params)
def test_self_attention_output(self):
x = jnp.array([[
0.08482573, 0.29561728, 0.33432317, 0.6481298, -0.7824855, 0.6298023,
-0.3278767, -1.6607414
],
[
1.9097669, 1.1209449, -0.8260815, 1.0434877, -0.453946,
0.8152592, -1.1234418, 0.2729053
]],
dtype=jnp.float32).T
x = jnp.expand_dims(x, 0)
# The H-similarity matrix computed by hand for the above input without
# projections, using constant interpolation and coarsening:
s = jnp.array([[
3.654405017, 2.165819418, -1.549263898, 2.047796353, 0.2592372306,
0.2592372306, -0.8335717156, -0.8335717156
],
[
2.165819418, 1.343907045, -0.8271601383, 1.361290584,
0.2592372306, 0.2592372306, -0.8335717156, -0.8335717156
],
[
-1.549263898, -0.8271601383, 0.7941826266, -0.6453210751,
0.1133933598, -0.4629130414, -0.5346589167, -0.5346589167
],
[
2.047796353, 1.361290584, -0.6453210751, 1.508938818,
-0.9808392381, 1.258906586, -0.5346589167, -0.5346589167
],
[
0.2592372306, 0.2592372306, 0.1133933598, -0.9808392381,
0.8183505286, -0.8628948204, 0.7665406749, 1.175621795
],
[
0.2592372306, 0.2592372306, -0.4629130414, 1.258906586,
-0.8628948204, 1.0612985, -1.122393763, -0.8234501969
],
[
-0.8335717156, -0.8335717156, -0.5346589167,
-0.5346589167, 0.7665406749, -1.122393763, 1.369624608,
0.2379251883
],
[
-0.8335717156, -0.8335717156, -0.5346589167,
-0.5346589167, 1.175621795, -0.8234501969, 0.2379251883,
2.8325393
]])
s = s / jnp.sqrt(2)
a = linen.softmax(s, axis=1)
target_out = a @ x[0]
attn = h_attention.OneDimEncoderSelfAttention(
num_heads=1,
num_clusters=2,
out_features=2,
broadcast_dropout=False,
dropout_rate=0.0,
use_rpb=False,
rescale_logits=True,
use_mxu=True,
interpolation_kernel_type=th.ConvKernelType.CONST,
max_similarity_mode='scan_all',
use_row_sum=False,
multihead_projection=False,
output_projection=False,
)
mask = jnp.ones((1, 8, 1))
key = random.PRNGKey(0)
variables = attn.init(key, x, mask)
out = attn.apply(variables, x, mask)
self.assertTrue(jnp.allclose(out, target_out, rtol=5e-5))
if __name__ == '__main__':
absltest.main()
| 11,252 | 37.145763 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/h_transformer/token_hierarchy_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for token_hierarchy.py."""
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.h_transformer import token_hierarchy
class OneDimTokenCoarseningTest(parameterized.TestCase):
"""Test cases for OneDimTokenCoarsening."""
@parameterized.named_parameters(
('sample', token_hierarchy.TokenCoarseningMethod.SAMPLE,
np.array([[[[1.], [2.]], [[5.], [6.]]]])),
('sum', token_hierarchy.TokenCoarseningMethod.SUM,
np.array([[[[4.], [6.]], [[12.], [14.]]]])),
('const_average', token_hierarchy.TokenCoarseningMethod.CONST_AVERAGE,
np.array([[[[2.], [3.]], [[6.], [7.]]]])),
)
def test_coarsening(self, method, expected_result):
batch_size = 1
seq_len = 4
num_head = 2
head_dim = 1
seq_shape = [batch_size, seq_len, num_head, head_dim]
data_size = np.prod(seq_shape)
inputs = jnp.arange(1, data_size + 1).reshape(tuple(seq_shape))
coasening_fn = token_hierarchy.OneDimTokenCoarsening(
method=method, coarsening_ratio=2)
result = coasening_fn(inputs)
logging.info('method = %s', method)
logging.info('result = %s', result)
logging.info('expected_result = %s', expected_result)
np.testing.assert_array_almost_equal(result, expected_result)
class OneDimInterpolationTest(parameterized.TestCase):
"""Test cases for OneDimTokenInterpolation."""
@parameterized.named_parameters(
('const', token_hierarchy.ConvKernelType.CONST, False,
np.array([[[1.], [1.], [2.], [2.], [3.], [3.]]])),
('linear_no_correction', token_hierarchy.ConvKernelType.LINEAR, False,
np.array([[[1.], [1.5], [2.], [2.5], [3.], [1.5]]])),
('linear_with_correction', token_hierarchy.ConvKernelType.LINEAR, True,
np.array([[[1.], [1.5], [2.], [2.5], [3.], [3.]]])),
)
def test_interpolation(self, conv_kernel_type, use_edge_correction,
expected_result):
batch_size = 1
seq_len = 3
feature_size = 1
seq_shape = [batch_size, seq_len, feature_size]
data_size = np.prod(seq_shape)
inputs = jnp.arange(1, data_size + 1).reshape(tuple(seq_shape))
interpolation_fn = token_hierarchy.OneDimTokenInterpolation(
conv_kernel_size=2,
conv_kernel_type=conv_kernel_type,
use_edge_correction=use_edge_correction)
result = interpolation_fn(inputs)
logging.info('conv_kernel_type = %s', conv_kernel_type)
logging.info('result = %s', result)
logging.info('expected_result = %s', expected_result)
np.testing.assert_array_almost_equal(result, expected_result)
class OneDimTokenHierarchyTest(parameterized.TestCase):
"""Test cases for OneDimTokenHierarchy."""
@parameterized.named_parameters(
('odd_num_cluster', 3, 4),
('wrong_num_block', 3, 7),
)
def test_bad_attribute_value(self, num_cluster, num_block):
seq_len = num_cluster * num_block
with self.assertRaises(ValueError):
token_hierarchy.OneDimTokenHierarchy(
seq_len=seq_len, num_cluster=num_cluster)
@parameterized.named_parameters(('causal_mask', True, {
token_hierarchy.TokenBlockName.ANCHOR:
np.array([[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]],
[[9., 10.], [11., 12.]], [[13., 14.], [15., 16.]]]]),
token_hierarchy.TokenBlockName.LEFT:
np.array([[[[0., 0.], [0., 0.]], [[5., 6.], [7., 8.]],
[[9., 10.], [11., 12.]], [[13., 14.], [15., 16.]],
[[0., 0.], [0., 0.]], [[9., 10.], [13., 14.]]]]),
}), ('non_causal_mask', False, {
token_hierarchy.TokenBlockName.ANCHOR:
np.array([[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]],
[[9., 10.], [11., 12.]], [[13., 14.], [15., 16.]]]]),
token_hierarchy.TokenBlockName.LEFT:
np.array([[[[0., 0.], [0., 0.]], [[5., 6.], [7., 8.]],
[[9., 10.], [11., 12.]], [[13., 14.], [15., 16.]],
[[0., 0.], [0., 0.]], [[10., 11.], [14., 15.]]]]),
token_hierarchy.TokenBlockName.RIGHT:
np.array([[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]],
[[9., 10.], [11., 12.]], [[0., 0.], [0., 0.]],
[[2., 3.], [6., 7.]], [[0., 0.], [0., 0.]]]]),
}))
def test_hierarchical_coarsen_without_padding(self, causal_mask,
expected_coarse_query):
batch_size = 1
num_head = 2
head_dim = 1
num_cluster = 2
num_block = 4
seq_len = num_block * num_cluster
seq_shape = [batch_size, seq_len, num_head, head_dim]
inputs = jnp.arange(1, 17).reshape(tuple(seq_shape))
hierarchy = token_hierarchy.OneDimTokenHierarchy(
seq_len=seq_len,
num_cluster=num_cluster,
for_self_attention=True,
causal_mask=causal_mask)
results = hierarchy.hierarchical_coarsen(
inputs, input_array_name=token_hierarchy.InputArrayName.QUERY)
coarse_query = results.packed_coarse_qkv
results = hierarchy.hierarchical_coarsen(
inputs, input_array_name=token_hierarchy.InputArrayName.KEY)
coarse_key = results.packed_coarse_qkv
results = hierarchy.hierarchical_coarsen(
inputs, input_array_name=token_hierarchy.InputArrayName.VALUE)
coarse_value = results.packed_coarse_qkv
partitioned_shape = tuple((batch_size, num_block, num_cluster, num_head))
diag_qkv = inputs.reshape(partitioned_shape)
expected_coarse_key = {
token_hierarchy.TokenBlockName.ANCHOR:
diag_qkv,
token_hierarchy.TokenBlockName.LEFT:
np.array([[[[0., 0.], [0., 0.]], [[1., 2.], [3., 4.]],
[[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]],
[[0., 0.], [0., 0.]], [[2., 3.], [6., 7.]]]]),
token_hierarchy.TokenBlockName.RIGHT:
np.array([[[[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]],
[[13., 14.], [15., 16.]], [[0., 0.], [0., 0.]],
[[10., 11.], [14., 15.]], [[0., 0.], [0., 0.]]]])
}
expected_coarse_value = {
token_hierarchy.TokenBlockName.ANCHOR:
diag_qkv,
token_hierarchy.TokenBlockName.LEFT:
np.array([[[[0., 0.], [0., 0.]], [[1., 2.], [3., 4.]],
[[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]],
[[0., 0.], [0., 0.]], [[4., 6.], [12., 14.]]]]),
token_hierarchy.TokenBlockName.RIGHT:
np.array([[[[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]],
[[13., 14.], [15., 16.]], [[0., 0.], [0., 0.]],
[[20., 22.], [28., 30.]], [[0., 0.], [0., 0.]]]])
}
for dict_key, coarse_q in coarse_query.items():
logging.info('coarse_q[%s] = %s', dict_key, coarse_q)
logging.info('expected_coarse_q = %s',
expected_coarse_query[dict_key][..., None])
np.testing.assert_array_almost_equal(
coarse_q, expected_coarse_query[dict_key][..., None])
logging.info('coarse_value = %s', coarse_value[dict_key])
logging.info('expected_coarse_value = %s',
expected_coarse_value[dict_key][..., None])
np.testing.assert_array_almost_equal(
coarse_value[dict_key], expected_coarse_value[dict_key][..., None])
logging.info('coarse_key = %s', coarse_key[dict_key])
logging.info('expected_coarse_key = %s',
expected_coarse_key[dict_key][..., None])
np.testing.assert_array_almost_equal(
coarse_key[dict_key], expected_coarse_key[dict_key][..., None])
@parameterized.named_parameters(
('boolean_mask', True),
('int_mask', False),
)
def test_hierarchical_coarsen_with_padding(self, boolean_mask):
batch_size = 1
num_head = 2
head_dim = 1
num_level = 2
num_cluster = 2
num_block = int(np.exp2(num_level))
seq_len = num_block * num_cluster
padding_len = 3
padding_mask = np.ones((batch_size, seq_len, 1))
padding_mask[:, -padding_len:] = 0
if boolean_mask:
padding_mask = padding_mask > 0
seq_shape = [batch_size, seq_len, num_head, head_dim]
data_size = np.prod(seq_shape)
inputs = jnp.arange(1, data_size + 1).reshape(tuple(seq_shape))
inputs *= padding_mask[..., None]
hierarchy = token_hierarchy.OneDimTokenHierarchy(
seq_len=seq_len,
num_cluster=num_cluster,
for_self_attention=True,
causal_mask=False)
results = hierarchy.hierarchical_coarsen(
inputs,
input_array_name=token_hierarchy.InputArrayName.QUERY,
padding_mask=padding_mask)
coarse_query = results.packed_coarse_qkv
results = hierarchy.hierarchical_coarsen(
inputs,
input_array_name=token_hierarchy.InputArrayName.KEY,
padding_mask=padding_mask)
aggregated_padding_mask = results.packed_aggregated_key_padding_mask
partitioned_shape = tuple((batch_size, num_block, num_cluster, num_head, 1))
diag_qkv = inputs.reshape(partitioned_shape)
expected_coarse_query = {
token_hierarchy.TokenBlockName.ANCHOR:
diag_qkv,
token_hierarchy.TokenBlockName.LEFT:
np.array([[[[0., 0.], [0., 0.]], [[5., 6.], [7., 8.]],
[[9., 10.], [0., 0.]], [[0., 0.], [0., 0.]],
[[0., 0.], [0., 0.]], [[9., 10.], [0., 0.]]]])[...,
None],
token_hierarchy.TokenBlockName.RIGHT:
np.array([[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]],
[[9., 10.], [0., 0.]], [[0., 0.], [0., 0.]],
[[2., 3.], [6., 7.]], [[0., 0.], [0., 0.]]]])[..., None],
}
partitioned_mask_shape = tuple((batch_size, num_block, num_cluster, 1, 1))
diag_padding_mask = padding_mask.reshape(partitioned_mask_shape).astype(
jnp.float32)
expected_padding_mask = {
token_hierarchy.TokenBlockName.ANCHOR:
diag_padding_mask,
token_hierarchy.TokenBlockName.LEFT:
np.array([[[[0.], [0.]], [[1.], [1.]], [[1.], [1.]], [[1.], [0.]],
[[0.], [0.]], [[2.], [2.]]]])[..., None],
token_hierarchy.TokenBlockName.RIGHT:
np.array([[[[1.], [1.]], [[1.], [0.]], [[0.], [0.]], [[0.], [0.]],
[[1.], [0.]], [[0.], [0.]]]])[..., None],
}
for dict_key, coarse_q in coarse_query.items():
logging.info('aggregated_padding_mask = %s',
aggregated_padding_mask[dict_key])
logging.info('expected_padding_mask = %s',
expected_padding_mask[dict_key])
np.testing.assert_array_equal(aggregated_padding_mask[dict_key],
expected_padding_mask[dict_key])
logging.info('coarse_q = %s', coarse_q)
logging.info('expected_coarse_q = %s', expected_coarse_query[dict_key])
np.testing.assert_array_almost_equal(coarse_q,
expected_coarse_query[dict_key])
@parameterized.named_parameters(
('const', token_hierarchy.ConvKernelType.CONST,
np.array([0, 0, 1, 1, 3, 3, 4, 4, 6, 6, 7, 7, 9, 9, 10, 10]).reshape(
(1, 16, 1))),
('linear_with_correction', token_hierarchy.ConvKernelType.LINEAR,
np.array([
0, 0.75, 1.5, 2.25, 3, 3.75, 4.5, 5.25, 6, 6.75, 7.5, 8.25, 9, 9.5,
10, 10
]).reshape((1, 16, 1))),
)
def test_interpolate_cumulative_sum(self, interpolation_kernel_type,
expected_results):
num_level = 3
num_cluster = 2
num_block = int(np.exp2(num_level))
seq_len = num_block * num_cluster
hierarchy = token_hierarchy.OneDimTokenHierarchy(
seq_len=seq_len,
num_cluster=num_cluster,
interpolation_kernel_type=interpolation_kernel_type,
for_self_attention=True,
causal_mask=False)
coarse_y = np.array([[[[0], [1]], [[2], [3]], [[4], [5]], [[6], [7]],
[[0], [1]], [[2], [3]]]])
actual_results = hierarchy.interpolate_cumulative_sum(coarse_y)
logging.info('interpolation_kernel_type=%s', interpolation_kernel_type)
logging.info('expected_results=%s', expected_results)
logging.info('actual_results=%s', actual_results)
np.testing.assert_array_almost_equal(actual_results, expected_results)
if __name__ == '__main__':
absltest.main()
| 13,102 | 41.267742 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/rotary_embedding_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rotary_embedding."""
from absl.testing import absltest
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.perceiver_ar import rotary_embedding
from flaxformer.components import embedding
class RotaryTest(absltest.TestCase):
def test_rotary_embedding(self):
"""Checks the shape of rotary encodings."""
batch = 2
qlen = 3
qheads = 4
d = 2 * 5
klen = 6
kheads = 7
maxlen = 8
q = np.ones((batch, qlen, qheads, d))
k = np.ones((batch, klen, kheads, d))
cos = np.ones((maxlen, d))
sin = np.ones((maxlen, d))
out_q, out_k = rotary_embedding.apply_rotary_embedding(q, k, cos, sin)
self.assertEqual(out_q.shape, q.shape)
self.assertEqual(out_k.shape, k.shape)
def test_rotary_embedding_multiquery(self):
"""Checks the shape of rotary encodings."""
batch = 2
qlen = 3
qheads = 4
d = 2 * 5
klen = 6
maxlen = 8
q = np.ones((batch, qlen, qheads, d))
k = np.ones((batch, klen, d))
cos = np.ones((maxlen, d))
sin = np.ones((maxlen, d))
out_q, out_k = rotary_embedding.apply_rotary_embedding(q, k, cos, sin)
self.assertEqual(out_q.shape, q.shape)
self.assertEqual(out_k.shape, k.shape)
def test_rotary_embedding_decode(self):
"""Checks the shape of rotary encodings."""
batch = 2
qlen = 1
qheads = 4
d = 2 * 5
klen = 6
maxlen = 8
q = np.ones((batch, qlen, qheads, d))
k = np.ones((batch, klen, d))
cos = np.ones((maxlen, d))
sin = np.ones((maxlen, d))
rotary_index = np.ones((batch,), dtype=np.int32)
out_q, out_k = rotary_embedding.apply_rotary_embedding(
q, k, cos, sin, decode=True, rotary_index=rotary_index)
self.assertEqual(out_q.shape, q.shape)
self.assertEqual(out_k.shape, k.shape)
def test_rotary_embedding_q_offset(self):
"""Checks the shape of rotary encodings."""
batch = 2
qlen = 3
qheads = 4
d = 2 * 5
klen = 6
kheads = 7
maxlen = 8
sin, cos = embedding.generate_fixed_pos_embedding(
d, maxlen, max_timescale=maxlen)
# First, generate with queries as long as keys.
q = np.ones((batch, klen, qheads, d))
k = np.ones((batch, klen, kheads, d))
out_full_q, out_full_k = rotary_embedding.apply_rotary_embedding(
q, k, cos, sin)
self.assertEqual(out_full_q.shape, q.shape)
self.assertEqual(out_full_k.shape, k.shape)
# Then with shorter queries and an offset.
short_q = np.ones((batch, qlen, qheads, d))
out_short_q, out_short_k = rotary_embedding.apply_rotary_embedding(
short_q, k, cos, sin, q_position_offset=jnp.array([2, 3]))
self.assertEqual(out_short_q.shape, short_q.shape)
self.assertEqual(out_short_k.shape, k.shape)
np.testing.assert_allclose(out_short_k, out_full_k)
# The shorter queries with offsets should be equivalent to a slice of the
# full query output.
np.testing.assert_allclose(out_short_q[0], out_full_q[0, 2:5])
np.testing.assert_allclose(out_short_q[1], out_full_q[1, 3:])
def test_rotary_embedding_to_subset(self):
"""Checks the shape of rotary encodings."""
batch = 2
qheads = 4
d = 2 * 6
qklen = 6
kheads = 7
maxlen = 8
# First, generate with queries as long as keys.
q = np.ones((batch, qklen, qheads, d))
k = np.ones((batch, qklen, kheads, d))
out_halfrot_q, out_halfrot_k = rotary_embedding.apply_rotary_embedding_to_subset(
q, k, max_timescale=maxlen, fraction_to_rotate=0.5)
self.assertEqual(out_halfrot_q.shape, q.shape)
self.assertEqual(out_halfrot_k.shape, k.shape)
# First half of dims should be rotated and therefore not the same as input.
# Second half should match input.
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(out_halfrot_q[..., :d // 2], q[..., :d // 2])
np.testing.assert_allclose(out_halfrot_q[..., d // 2:], q[..., d // 2:])
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(out_halfrot_k[..., :d // 2], k[..., :d // 2])
np.testing.assert_allclose(out_halfrot_k[..., d // 2:], k[..., d // 2:])
if __name__ == '__main__':
absltest.main()
| 4,786 | 31.344595 | 85 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/attention_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attention."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.perceiver_ar import attention
class AttentionTest(parameterized.TestCase):
def test_make_causal_mask_with_padding(self):
x = jnp.array([[7, 0, 0], [8, 5, 0]])
sequence_lengths = jnp.array([3, 3])
y = attention.make_causal_mask(
x, num_latents=3, sequence_lengths=sequence_lengths)
self.assertEqual(y.shape, (2, 1, 3, 3))
# Padding is not treated in a special way. So they need to be zeroed out
# separately.
expected_y = jnp.array([[[1., 0., 0.], [1., 1., 0.], [1., 1., 1.]]],
jnp.float32)
np.testing.assert_allclose(y[0], expected_y)
np.testing.assert_allclose(y[1], expected_y)
def test_make_causal_mask(self):
x = jnp.ones((1, 3))
sequence_lengths = jnp.array([3])
y = attention.make_causal_mask(
x, num_latents=3, sequence_lengths=sequence_lengths)
self.assertEqual(y.shape, (1, 1, 3, 3))
expected_y = jnp.array([[[[1., 0., 0.], [1., 1., 0.], [1., 1., 1.]]]],
jnp.float32)
np.testing.assert_allclose(y, expected_y)
def test_make_causal_mask_fewer_latents_vary_sequence_lengths(self):
x = jnp.ones((4, 3))
sequence_lengths = jnp.array([0, 1, 2, 3])
y = attention.make_causal_mask(
x, num_latents=2, sequence_lengths=sequence_lengths)
self.assertEqual(y.shape, (4, 1, 2, 3))
expected_y = jnp.array([
[[[1., 0., 0.], [1., 1., 0.]]],
[[[1., 0., 0.], [1., 1., 0.]]],
[[[1., 0., 0.], [1., 1., 0.]]],
[[[1., 1., 0.], [1., 1., 1.]]],
], jnp.float32)
np.testing.assert_allclose(y, expected_y)
def test_make_decoder_mask_lm(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 0]])
mask = attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
num_latents=4,
sequence_lengths=jnp.array([3]),
dtype=jnp.float32)
expected_mask = jnp.array([[[[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],
[0, 0, 0, 0]]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_lm_smaller_latents(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 0]])
mask = attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
num_latents=2,
sequence_lengths=jnp.array([3]),
dtype=jnp.float32)
expected_mask = jnp.array([[[[1, 1, 0, 0], [1, 1, 1, 0]]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 3, 4, 0]])
decoder_causal_attention = jnp.array([[1, 1, 1, 0, 0, 0]])
mask = attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
num_latents=6,
sequence_lengths=jnp.array([5]),
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask = jnp.array(
[[[[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0]]]],
dtype=jnp.float32)
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_smaller_latents(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 3, 4, 0]])
decoder_causal_attention = jnp.array([[1, 1, 1, 0, 0, 0]])
mask = attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
num_latents=2,
sequence_lengths=jnp.array([5]),
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask = jnp.array([[[[1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0]]]],
dtype=jnp.float32)
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_multiple_elements(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 0], [4, 5, 0, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0], [1, 0, 0, 0]])
mask = attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
num_latents=4,
sequence_lengths=jnp.array([3, 2]),
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask0 = jnp.array([[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],
[0, 0, 0, 0]])
expected_mask1 = jnp.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0]])
self.assertEqual(mask.shape, (2, 1, 4, 4))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
np.testing.assert_array_equal(mask[1, 0], expected_mask1)
def test_make_decoder_mask_prefix_lm_multiple_elements_smaller_latents(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 0], [4, 5, 0, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0], [1, 0, 0, 0]])
mask = attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
num_latents=2,
sequence_lengths=jnp.array([3, 2]),
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask0 = jnp.array([[1, 1, 0, 0], [1, 1, 1, 0]])
expected_mask1 = jnp.array([[1, 0, 0, 0], [1, 1, 0, 0]])
self.assertEqual(mask.shape, (2, 1, 2, 4))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
np.testing.assert_array_equal(mask[1, 0], expected_mask1)
if __name__ == '__main__':
absltest.main()
| 6,168 | 40.682432 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/slicing.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver AR slicing utilities."""
import functools
import jax
from jax import lax
from jax.experimental import maps
import jax.numpy as jnp
from t5x import partitioning
from flaxformer.types import Array
def get_sequence_lengths(decoder_target_tokens: Array) -> Array:
"""Return non-padding lengths of sequences in the batch."""
return (decoder_target_tokens > 0).astype(jnp.int32).sum(axis=-1)
def sequence_slice_start(sequence_length: Array, num_latents: int) -> Array:
"""Calculate start index for slicing a sequence."""
end = jnp.maximum(num_latents, sequence_length)
start = end - num_latents
return start
def _slice_sequences(x: Array, sequence_length: Array, num_latents: int,
axis: int) -> Array:
start = sequence_slice_start(
sequence_length=sequence_length, num_latents=num_latents)
return lax.dynamic_slice_in_dim(x, start, num_latents, axis=axis)
def slice_sequences_vmap(x: Array, sequence_lengths: Array, num_latents: int,
axis_within_vmap: int) -> Array:
"""Slice sequences using vmap for Perceiver AR usage.
Given the length of sequences and the number of latents, each sequence within
the batch will be sliced to start at max(num_latents, length) - num_latents
with a length of num_latents.
Args:
x: Array to slice, expected to be of shape [batch, ...].
sequence_lengths: Length of the supplied sequences with shape [batch].
num_latents: Number of Perceiver AR latents.
axis_within_vmap: Axis to slice, from within the vmap where the batch axis
will be hidden.
Returns:
Sliced input array.
"""
return jax.vmap(
functools.partial(
_slice_sequences, num_latents=num_latents,
axis=axis_within_vmap))(x, sequence_lengths)
def slice_sequences_xmap(x: Array, sequence_lengths: Array, num_latents: int,
axis_within_xmap: int) -> Array:
"""Slice sequences using xmap for Perceiver AR usage.
Given the length of sequences and the number of latents, each sequence within
the batch will be sliced to start at max(num_latents, length) - num_latents
with a length of num_latents.
This method should be used for slicing sequences that are partitioned, such
as the inputs to self-attention.
xmap is used to work around XLA partitioning issues with gathers.
If regular vmap is used, a bunch of extra allgathers are added.
Requires the following flags:
--experimental_xmap_spmd_lowering=True
--experimental_xmap_spmd_lowering_manual=True
Args:
x: Array to slice, expected to be of shape [batch, length, embedding].
sequence_lengths: Length of the supplied sequences with shape [batch].
num_latents: Number of Perceiver AR latents.
axis_within_xmap: Axis to slice, from within the xmap where the batch and
embedding axis will be hidden.
Returns:
Sliced input array.
"""
if (jax.devices()[0].platform != 'cpu' and
partitioning.global_mesh_defined()):
xmap_axis_resources = {'batch': 'data', 'embed': 'model'}
xmap_embed_axis = 'embed'
else:
xmap_axis_resources = {}
xmap_embed_axis = None
return maps.xmap(
functools.partial(
_slice_sequences, num_latents=num_latents, axis=axis_within_xmap),
in_axes=(['batch', None, xmap_embed_axis], ['batch']),
out_axes=['batch', None, xmap_embed_axis],
axis_resources=xmap_axis_resources)(x, sequence_lengths)
| 4,025 | 34.946429 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/perceiver_ar_architecture_test_utils.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for perceiver_ar_architecture_test."""
from flax import linen as nn
from jax import numpy as jnp
from flaxformer.architectures.perceiver_ar import decoder_layer
from flaxformer.architectures.perceiver_ar import dense_attention
from flaxformer.architectures.perceiver_ar import parallel_fused_decoder
from flaxformer.architectures.perceiver_ar import perceiver_ar_architecture
from flaxformer.architectures.t5 import t5_architecture_test_utils
from flaxformer.components import dense
from flaxformer.components import layer_norm
def make_attention1(num_attn_heads, dtype, use_rotary_embedding=False):
"""First test configuration for attention."""
return dense_attention.MultiHeadDotProductAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
head_dim=None,
kernel_init=t5_architecture_test_utils.ATTENTION_KERNEL_INIT,
bias_init=t5_architecture_test_utils.BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
use_rotary_embedding=use_rotary_embedding)
def test_make_decoder_only1(
num_latents: int, parallel: bool) -> perceiver_ar_architecture.DecoderOnly:
"""Returns a DecoderOnly."""
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return decoder_layer.DecoderLayer(
self_attention=make_attention1(
num_attn_heads, dtype, use_rotary_embedding=True),
encoder_decoder_attention=None,
mlp=t5_architecture_test_utils.make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=None,
num_latents=num_latents,
parallel=parallel)
def make_output_logits():
return dense.DenseGeneral( # pytype: disable=wrong-arg-types # jax-types
4,
dtype=dtype,
kernel_init=t5_architecture_test_utils.FINAL_KERNEL_INIT,
bias_init=t5_architecture_test_utils.BIAS_INIT,
use_bias=False)
def _make_decoder(*, shared_token_embedder=None):
assert shared_token_embedder is None
return perceiver_ar_architecture.Decoder(
num_layers=2,
token_embedder_factory=(
lambda: t5_architecture_test_utils.make_token_emb1(4, dtype)),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=make_output_logits,
dtype=dtype,
num_latents=num_latents,
)
return perceiver_ar_architecture.DecoderOnly(
decoder_factory=_make_decoder, num_latents=num_latents)
def make_parallel_fused_transformer_config(
num_latents: int) -> perceiver_ar_architecture.DecoderOnly:
"""Returns a DecoderOnly with parallel=True."""
dtype = jnp.bfloat16
num_attn_heads = 8
num_features = 13
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_mq_attention(num_attn_heads, dtype):
"""First test configuration for attention."""
return dense_attention.MultiQueryDotProductAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
out_features=num_features,
head_dim=None,
kernel_init=t5_architecture_test_utils.ATTENTION_KERNEL_INIT,
bias_init=t5_architecture_test_utils.BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
rescale_logits=True,
use_rotary_embedding=True)
def _make_fusion_mlp(dtype):
"""First test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
out_dim=13,
precomputed_intermediates=True,
fuse_kernels=False,
activations=('swish', 'linear'),
kernel_init=t5_architecture_test_utils.MLP_KERNEL_INIT,
bias_init=t5_architecture_test_utils.BIAS_INIT,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=dtype)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return parallel_fused_decoder.ParallelFusedDecoderLayer(
self_attention=_make_mq_attention(num_attn_heads, dtype),
mlp=_make_fusion_mlp(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=None,
num_latents=num_latents)
def _make_output_logits():
return dense.DenseGeneral( # pytype: disable=wrong-arg-types # jax-types
4,
dtype=dtype,
kernel_init=t5_architecture_test_utils.FINAL_KERNEL_INIT,
bias_init=t5_architecture_test_utils.BIAS_INIT,
use_bias=False)
def _embedder():
return t5_architecture_test_utils.make_token_emb1(2_000, dtype,
num_features)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return perceiver_ar_architecture.Decoder(
num_layers=2,
token_embedder_factory=_embedder,
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=_make_output_logits,
dtype=dtype,
num_latents=num_latents)
return perceiver_ar_architecture.DecoderOnly(
decoder_factory=_make_decoder, num_latents=num_latents)
| 6,286 | 36.646707 | 105 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/dense_attention.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dense attention classes and mask/weighting functions."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
import functools
from typing import Callable, Optional, Tuple
from aqt.jax_legacy.jax import flax_layers as aqt_flax_layers
from aqt.jax_legacy.jax import quant_config as aqt_config
from aqt.jax_legacy.jax import quantization as aqt
from flax import linen as nn
from flax.core import variables
from flax.linen import initializers
from flax.linen import partitioning as flax_partitioning
from flax.linen.linear import default_kernel_init
from flax.training import common_utils
import jax
from jax import lax
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer.architectures.perceiver_ar import rotary_embedding
from flaxformer.components import dense
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
RulesFallback = flax_partitioning.RulesFallback
class MultiHeadDotProductAttention(nn.Module, dense_attention.DenseAttention):
"""Multi-head dot-product attention.
Forked from the main Flaxformer implementation to allow passing in query
position offset information.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
head_dim: dimension of each head. If unspecified, it defaults to
qkv_features // num_heads.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
qkv_kernel_init: optional initializer for the fused qkv kernel. If None,
kernel_init will be used instead.
kv_kernel_init: optional initializer for the fused kv kernel. If None,
kernel_init will be used instead.
q_kernel_init: optional initializer for the query (q) kernel. If None,
kernel_init will be used instead.
bias_init: initializer for the bias of the Dense layers.
attention_fn: dot_product_attention or compatible function. Accepts query,
key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,,
num_heads, value_channels]``
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
output_projection: Project the output of `attention_fn` to `out_features`.
If False, returns the output of `attention_fn` without a projection.
sow_intermediates: whether to track intermediates using Module.sow.
split_head_kernel: whether to store QKVO variables with a split head
dimension.
kernels_to_fuse: Which kernels to fuse, if any.
use_rotary_embedding: whether to use rotary embeddings.
"""
num_heads: int
use_bias: bool
dtype: DType = jnp.float32
qkv_features: Optional[int] = None
head_dim: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.
precision: Optional[lax.Precision] = None
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
qkv_kernel_init: Optional[Initializer] = None
kv_kernel_init: Optional[Initializer] = None
q_kernel_init: Optional[Initializer] = None
bias_init: Initializer = initializers.zeros
rescale_logits: bool = False
attention_fn: Callable[[Array, Array, Array], Array] = staticmethod(
dense_attention.dot_product_attention
)
use_extra_logit: bool = False
float32_logits: bool = False
output_projection: bool = True
# TODO: Remove out_features and output_projection.
sow_intermediates: bool = False
split_head_kernel: bool = False
kernels_to_fuse: Optional[str] = None
use_rotary_embedding: bool = False
rotary_embedding_max_timescale: float = 1e4
rotary_embedding_fraction_to_rotate: float = 1.0
# Whether to shard over the head dimension, setting this to False when the
# number of heads is not divisible your activation num_partitions
sharding_over_head_dimension: bool = True
q_conv: Optional[nn.Module] = None
k_conv: Optional[nn.Module] = None
v_conv: Optional[nn.Module] = None
def update_cache_prefill(
self, key: Array, value: Array, cached_key: variables.Variable,
cached_value: variables.Variable, cache_index: variables.Variable,
prefill_lengths: Array
) -> Tuple[Array, Array, Array, Array, Array, Array]:
"""Update the autoregressive cache for multiple timesteps at once.
This is useful for things like a prefix-lm where the encoder section of the
input is visible bidirectionally. The key and value for this section need to
be computed in a single shot, as a step by step approach would result in
causal attention.
Args:
key: The calculated key used in attention. [batch..., length, num_heads,
features_per_head]
value: The calculated value used in attention. [batch..., length,
num_heads, features_per_head]
cached_key: The cache of previous keys. [batch..., num_heads,
features_per_head, length]
cached_value: The cache of previous values. [batch..., num_heads,
features_per_head, length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch]
prefill_lengths: The number of timesteps we should fill in the cache.
[batch]
Returns:
The key, value, and the last timestep we just filled in the cache.
We also return the new cache values for now because assigning to a
variable inside of a method doesn't work. These returns will be removed
eventually.
"""
# Make a reference to the data underlaying the variable for ease of
# use.
cache_index.value = prefill_lengths
# Note, the cache index is now a vector
# of batch size so that each example can start just after it's
# prefix which can be different lengths for different examples.
cur_index = cache_index.value
# Move the sequence dimension to the end to match the cache shapes.
key_cached = jnp.moveaxis(key, -3, -1)
value_cached = jnp.moveaxis(value, -3, -1)
# Reshape the index so the batch is at the beginning, default
# broadcasting behavior is to add singleton dims to the front but
# we need them at the end.
batch_first_index = jnp.reshape(
cur_index, (-1,) + tuple(1 for _ in range(cached_key.value.ndim - 1)))
# Calculate a mask that will set any position past the prefix to zero
# when applied to the key.
key_mask = (
lax.broadcasted_iota(jnp.int32, cached_key.value.shape,
cached_key.value.ndim - 1) < batch_first_index)
value_mask = (
lax.broadcasted_iota(jnp.int32, cached_value.value.shape,
cached_value.value.ndim - 1) < batch_first_index)
# Set the caches with the calculated key and values but hide anything
# past the prefix.
cached_key_value = key_cached * key_mask
cached_value_value = value_cached * value_mask
return (key, value, cur_index, cached_key_value, cached_value_value,
prefill_lengths)
def update_cache_decode(
self, key: Array, value: Array, cached_key: variables.Variable,
cached_value: variables.Variable, cache_index: variables.Variable
) -> Tuple[Array, Array, Array, Array, Array, Array]:
"""Update the next timestep in the autoregressive cache.
This is used during step by step decoding where each key and value we get
are a single (the next) timestep.
Args:
key: The calculated key used in attention. [batch..., 1, num_heads,
features_per_head]
value: The calculated value used in attention. [batch..., 1, num_heads,
features_per_head]
cached_key: The cache of previous keys. [batch..., num_heads,
features_per_head, length]
cached_value: The cache of previous values. [batch..., num_heads,
features_per_head, length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch] if we are decoding after doing a prefill or [1] if we
are starting with step-by-step decoding.
Returns:
The key, value, and the last timestep we just filled in the cache. Note:
this index is the last timestep we just fill, the actual value of the
`cache_index` is already increased to point to the next timestep to fill.
We also return the new cache values for now because assigning to a
variable inside of a method doesn't work. These returns will be removed
eventually.
"""
cache_length = cached_key.value.shape[-1]
# Create a OHE of the current index. NOTE: the index is increased
# below.
# Note: We reshape the index into a column vector so that it will work
# if the index is a scalar or a vector with different cache positions
# from different elements in a batch.
cur_index = jnp.reshape(cache_index.value, (-1,))
one_hot_indices = jax.nn.one_hot(cur_index, cache_length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did
# for the cached ones above.
# Note these are currently the key and value of a single position,
# since we feed one position at a time.
one_token_key = jnp.moveaxis(key, -3, -1)
one_token_value = jnp.moveaxis(value, -3, -1)
# The one hot indices are now either [1, length] for a scalar index or
# [batch size, length] for examples where there are different lengths
# of prefixes. We need to add dims for num_heads and num_features as
# broadcasting doesn't work for the batched version.
one_hot_indices = jnp.expand_dims(
jnp.expand_dims(one_hot_indices, axis=1), axis=1)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
# Key/Value have seq lengths of 1 while one_hot has a seq_length
# of length. key/value will broadcast their value to each timestep
# and the onehot will mask all but the correct timesteps.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key_value = key
cached_value_value = value
cache_index_value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -3)
value = jnp.moveaxis(value, -1, -3)
return (key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value)
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
precomputed_qkv: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
query_position_offset: Optional[Array] = None) -> Array:
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode`.
During decoding mode, this method is called twice, by `init` and
`apply`. In the former, inputs_q: [batch..., length, qkv_features] and
inputs_kv: [batch..., length, qkv_features]
During apply, query, key and value all have the shape: [batch * beam, 1,
qkv_features] where the batch dimension is added to include multiple beams.
Note that the batch dimension is different during the init and apply calls.
This is because the cached variables are directly passed-in during `apply`
method. In other words, the cache variables such as `cached_key` are
initialized with `batch` dim, expanded by tiling in the beam search function
to `batch * beam` dimension, and passed to the `apply` method as part of a
variable dict.
Args:
inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`.
inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`.
mask: attention mask of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
bias: attention bias of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
precomputed_qkv: when using fused implementations QKVO are defined outside
this module and we only use the module to run computations.
decode: Whether to prepare and use an autoregressive cache.
enable_dropout: Enables dropout if set to True.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
query_position_offset: Optional query position offset to use when
calculating rotary encoding. Useful when the length of the queries is
different than the length of the keys and the query position does not
start at 0.
Returns:
If output_projection is True, then output of shape
`[batch_sizes..., length, out_features]`, where out_features is set to
features if not provided. If output_projection is False, then output of
shape `[batch_sizes..., length, num_heads, head_dim]`.
"""
dense_attention.validate_dense_attention_call_parameter_shapes(
inputs_q, inputs_kv, mask, bias, self.num_heads)
qkv_kernel_init = (
self.qkv_kernel_init
if self.qkv_kernel_init is not None else self.kernel_init)
kv_kernel_init = (
self.kv_kernel_init
if self.kv_kernel_init is not None else self.kernel_init)
q_kernel_init = (
self.q_kernel_init
if self.q_kernel_init is not None else self.kernel_init)
if precomputed_qkv is not None:
raise ValueError('Support for precomputed QKVO not implemented.')
rotary_index = None
features = self.out_features or inputs_q.shape[-1]
qkv_features = self.qkv_features or inputs_q.shape[-1]
if self.head_dim is None:
head_dim = qkv_features // self.num_heads
else:
head_dim = self.head_dim
if self.kernels_to_fuse and not self.split_head_kernel:
raise ValueError('Un-reshaped kernels are required when using QKV fused '
'kernel optimization.')
# Is attention logit rescaling explicit or folded into initializer?
if self.rescale_logits:
query_init = q_kernel_init
else:
if self.kernels_to_fuse:
raise ValueError('Cannot fold in logit normalization to query '
'initializer when using fused kernels.')
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
query_init = lambda *args: q_kernel_init(*args) / depth_scaling
make_dense = functools.partial(
dense.DenseGeneral,
axis=-1,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
)
# Project inputs_q to multi-headed q/k/v
# dimensions are then [batch..., length, num_heads, features_per_head]
if self.kernels_to_fuse is None:
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query')(
inputs_q)
key = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='key')(
inputs_kv)
value = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='value')(
inputs_kv)
# TODO: should we fuse/slice along depth or head dim?
elif self.kernels_to_fuse == 'qkv':
if inputs_q is not inputs_kv:
raise ValueError('qkv fusion is only supported in self-attention mode '
'(when inputs_q is inputs_kv).')
# 'qkv' fusion mode implies self-attention
qkv = make_dense(
kernel_init=qkv_kernel_init,
features=(3, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='qkv_fused')(
inputs_q)
query = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 0, 1, -3), -3)
key = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 1, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 2, 1, -3), -3)
elif self.kernels_to_fuse == 'kv':
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query')(
inputs_q)
kv = make_dense(
kernel_init=kv_kernel_init,
features=(2, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='kv_fused')(
inputs_kv)
key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 1, 1, -3), -3)
else:
raise ValueError('Incorrect kernel fusion mode specified.')
# Multi Dconv Head Attention options:
if self.q_conv is not None:
query = self.q_conv( # pylint: disable=not-callable
query,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.k_conv is not None:
key = self.k_conv( # pylint: disable=not-callable
key,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.v_conv is not None:
value = self.v_conv( # pylint: disable=not-callable
value,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.sharding_over_head_dimension:
# Note: We don't use `activation_partitioning.with_sharding_migration`
# here because we do often want this 2D sharded. However, if rules are
# valid, they should result in 2D sharding. We don't need to raise errors
# if both result in 2D sharding (which with_sharding_migration does).
if flax_partitioning.get_axis_rules():
query = flax_partitioning.with_sharding_constraint(
query, ('batch', 'length', 'heads', 'kv'))
key = flax_partitioning.with_sharding_constraint(
key, ('batch', 'length', 'heads', 'kv'))
value = flax_partitioning.with_sharding_constraint(
value, ('batch', 'length', 'heads', 'kv'))
else:
query = activation_partitioning.with_sharding(query, 2)
key = activation_partitioning.with_sharding(key, 2)
value = activation_partitioning.with_sharding(value, 2)
query: Array = query # hint to quiet pytype.
key: Array = key
value: Array = value
if prefill and decode:
raise ValueError('prefill and decode cannot both be true at the same'
'time. If you are using a prefix LM with bidirectional '
'attention on the inputs, please make a call with '
'prefill=True that includes an attention mask that '
'covers your inputs first and then make your decoding '
'calls.')
if prefill or decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension
# [batch..., length, num_heads, features_per_head], but we cache them as
# [batch..., num_heads, features_per_head, length] as a TPU fusion
# optimization. This also enable the "scatter via one-hot broadcast"
# trick, which means we do a one-hot broadcast instead of a scatter/gather
# operations, which gives a 3-4x speedup in practice.
swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3])
cached_key = self.variable('cache', 'cached_key', jnp.zeros,
swap_dims(key.shape), key.dtype)
cached_value = self.variable('cache', 'cached_value', jnp.zeros,
swap_dims(value.shape), value.dtype)
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.int32))
rotary_index = cache_index.value
if is_initialized:
# Here we are in "apply()".
*batch_dims, num_heads, features_per_head, length = (
cached_key.value.shape)
if prefill:
if prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(
mask[:, 0, 0, :], axis=-1).astype(cache_index.value.dtype)
(key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value) = self.update_cache_prefill(
key, value, cached_key, cached_value, cache_index,
prefill_lengths)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
elif decode:
# Check the shape of the cached key against the input query.
expected_shape = tuple(batch_dims) + (1, num_heads, features_per_head)
if expected_shape != query.shape:
raise ValueError('Autoregressive cache shape error, '
'expected query shape %s instead got %s.' %
(expected_shape, query.shape))
(key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value) = self.update_cache_decode(
key, value, cached_key, cached_value, cache_index)
# Enforcing the Causal mask over previous positions and selecting only
# the bias value for the current index is only needed during decode
# mode where a single example is feed at a time. In prefill mode we
# uses these as provided, that same way it is done in a normal forward
# pass, like when computing logits during training.
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
#
# Add trailing dims to the current index so it can either
# broadcast over the batch dim or it can just be batch size.
mask = dense_attention.combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length),
tuple(batch_dims) +
(1, 1, length)) <= jnp.reshape(cur_index, (-1, 1, 1, 1)))
# Grab the correct relative attention bias during decoding. This is
# only required during single step decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
# If we are doing prefix decoding where cur index is a vector the
# result will be [batch, heads, 1, :]. If cur_index is a scalar
# like in encdec decoding, the result will be [1, heads, 1, :].
# We use a one-hot einsum rather than a slice to avoid introducing
# a Gather op that is currently lowered poorly by SPMD passes,
# adding expensive all-reduce and all-gather operations.
bias = jnp.einsum(
'bq, bhqk->bhk',
common_utils.onehot(cur_index, num_classes=length), bias)
bias = jnp.expand_dims(bias, 2)
# Currently, updating a variable inside of a method is not handled
# in flax, so we return the actual values and assign them in the main
# compacted call for now.
# TODO: Move variable assignment inside of the
# cache update functions once variable references are tracked across
# transform boundaries.
cache_index.value = cache_index_value
cached_key.value = cached_key_value
cached_value.value = cached_value_value
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype))
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = dense_attention.combine_biases(attention_bias, bias)
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
if self.use_rotary_embedding:
# use rotary embeddings before attention
# https://arxiv.org/abs/2104.09864
# TODO: Put it in a new class
query, key = rotary_embedding.apply_rotary_embedding_to_subset(
query,
key,
max_timescale=self.rotary_embedding_max_timescale,
fraction_to_rotate=self.rotary_embedding_fraction_to_rotate,
decode=decode,
rotary_index=rotary_index,
query_position_offset=query_position_offset)
# Compute attention.
x = self.attention_fn(
query,
key,
value,
bias=attention_bias,
broadcast_dropout=self.broadcast_dropout,
rescale_logits=self.rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
enable_dropout=enable_dropout,
dtype=self.dtype,
precision=self.precision,
use_extra_logit=self.use_extra_logit,
float32_logits=self.float32_logits,
) # pytype: disable=wrong-keyword-args
if not self.output_projection:
return x
# Back to the original inputs dimensions.
out = dense.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
kernel_axis_names=['heads', 'kv', 'embed'],
name='out')( # pytype: disable=wrong-arg-types
x)
return out
class MultiQueryDotProductAttention(nn.Module, dense_attention.DenseAttention):
"""Multi-query dot-product attention.
Forked from the main Flaxformer implementation to allow passing in query
position offset information.
This is a variant of the MultiHeadDotProductAttention. The key and the value
have 1 head whereas query has 1 or more heads. This variant, called
"multi-query" attention, was introduced in Shazeer 2019
(https://arxiv.org/abs/1911.02150).
Attributes:
num_heads: number of attention heads for query. Features (i.e.
inputs_q.shape[-1]) should be divisible by the number of heads.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
head_dim: dimension of each head. If unspecified, it defaults to
qkv_features // num_heads.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
q_kernel_init: optional initializer for the query (q) kernel. If None,
kernel_init will be used instead.
bias_init: initializer for the bias of the Dense layers.
attention_fn: dot_product_attention or compatible function. Accepts query,
key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,,
num_heads, value_channels]``
use_extra_logit: whether to use a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
use_rotary_embedding: whether to use RoPE embeddings.
use_aqt: whether to use aqt quantization.
weight_params: Parameters for weight quantization.
act_params: Parameters for acitvation quantization.
"""
num_heads: int
use_bias: bool
dtype: DType = jnp.float32
qkv_features: Optional[int] = None
head_dim: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.
precision: Optional[lax.Precision] = None
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
q_kernel_init: Optional[Initializer] = None
bias_init: Initializer = initializers.zeros
rescale_logits: bool = False
attention_fn: Callable[[Array, Array, Array], Array] = staticmethod(
dense_attention.dot_product_attention_multiquery)
use_extra_logit: bool = False
float32_logits: bool = False
use_rotary_embedding: bool = False
rotary_embedding_max_timescale: float = 1e4
rotary_embedding_fraction_to_rotate: float = 1.0
split_head_kernel: bool = False
q_conv: Optional[nn.Module] = None
k_conv: Optional[nn.Module] = None
v_conv: Optional[nn.Module] = None
use_aqt: Optional[bool] = False
weight_params: Optional[aqt.QuantOps.WeightParams] = None
act_params: Optional[aqt.QuantOps.ActHParams] = None
possibly_use_quantized_vars: bool = False
def update_cache_prefill(
self, key: Array, value: Array, cached_key: variables.Variable,
cached_value: variables.Variable, cache_index: variables.Variable,
prefill_lengths: Array
) -> Tuple[Array, Array, Array, variables.Variable, variables.Variable,
variables.Variable]:
"""Update the autoregressive cache for multiple timesteps at once.
This is useful for things like a prefix-lm where the encoder section of the
input is visible bidirectionally. The key and value for this section need to
be computed in a single shot, as a step by step approach would result in
causal attention.
Args:
key: The calculated key used in attention. [batch..., length,
features_per_head]
value: The calculated value used in attention. [batch..., length,
features_per_head]
cached_key: The cache of previous keys. [batch..., features_per_head,
length]
cached_value: The cache of previous values. [batch..., features_per_head,
length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch]
prefill_lengths: The number of timesteps we should fill in the cache.
[batch]
Returns:
The key, value, and the last timestep we just filled in the cache.
"""
cache_index.value = prefill_lengths
# Make a reference to the data underlaying the variable for ease of
# use.
cur_index = cache_index.value
# Move the sequence dimension to the end to match the cache shapes.
key_cached = jnp.moveaxis(key, -2, -1)
value_cached = jnp.moveaxis(value, -2, -1)
# Reshape the index so the batch is at the beginning, default
# broadcasting behavior is to add singleton dims to the front but
# we need them at the end.
batch_first_index = jnp.reshape(
cur_index, (-1,) + tuple(1 for _ in range(cached_key.value.ndim - 1)))
# Calculate a mask that will set any position past the prefix to zero
# when applied to the key.
key_mask = (
lax.broadcasted_iota(jnp.int32, cached_key.value.shape,
cached_key.value.ndim - 1) < batch_first_index)
value_mask = (
lax.broadcasted_iota(jnp.int32, cached_value.value.shape,
cached_value.value.ndim - 1) < batch_first_index)
# Set the caches with the calculated key and values but hide anything
# past the prefix.
cached_key_value = key_cached * key_mask
cached_value_value = value_cached * value_mask
return (key, value, cur_index, cached_key_value, cached_value_value, # pytype: disable=bad-return-type # jax-ndarray
prefill_lengths)
def update_cache_decode(
self, key: Array, value: Array, cached_key: variables.Variable,
cached_value: variables.Variable, cache_index: variables.Variable
) -> Tuple[Array, Array, Array, variables.Variable, variables.Variable,
variables.Variable]:
"""Update the next timestep in the autoregressive cache.
This is used during step by step decoding where each key and value we get
are a single (the next) timestep.
Args:
key: The calculated key used in attention. [batch..., 1,
features_per_head]
value: The calculated value used in attention. [batch..., 1,
features_per_head]
cached_key: The cache of previous keys. [batch..., features_per_head,
length]
cached_value: The cache of previous values. [batch..., features_per_head,
length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch]
Returns:
The key, value, and the last timestep we just filled in the cache. Note:
this index is the last timestep we just fill, the actual value of the
`cache_index` is already increased to point to the next timestep to fill.
"""
cache_length = cached_key.value.shape[-1]
# Create a OHE of the current index.
# NOTE: the index is increased below.
cur_index = jnp.reshape(cache_index.value, (-1,))
one_hot_indices = jax.nn.one_hot(cur_index, cache_length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did
# for the cached ones above.
# Note these are currently the key and value of a single position,
# since we feed one position at a time.
# [batch..., length, features_per_head] -> [batch...,
# features_per_head, length]
one_token_key = jnp.moveaxis(key, -2, -1)
one_token_value = jnp.moveaxis(value, -2, -1)
# The one hot indices are now either [1, length] for a scalar index or
# [batch size, length] for examples where there are different lengths
# of prefixes. We need to add dims for and num_features as
# broadcasting doesn't work for the batched version.
one_hot_indices = jnp.expand_dims(one_hot_indices, axis=1)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key_value = key
cached_value_value = value
cache_index_value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -2)
value = jnp.moveaxis(value, -1, -2)
return (key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value)
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
precomputed_qkv: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
query_position_offset: Optional[Array] = None) -> Array:
"""Applies multi-query dot product attention on the input data.
Projects the inputs into multi-headed query and single-headed key and value
vectors, applies dot-product attention and project the results to an output
vector.
Args:
inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`.
inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`.
mask: attention mask of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
bias: attention bias of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
precomputed_qkv: 3-tuple of precomputed query, key, value arrays, only
used for parallel, fused-parameter optimizations.
decode: Whether to prepare and use an autoregressive cache.
enable_dropout: Enables dropout if set to True.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
query_position_offset: Optional query position offset to use when
calculating rotary encoding. Useful when the length of the queries is
different than the length of the keys and the query position does not
start at 0.
Returns:
output of shape `[batch_sizes..., length, features]`.
"""
dense_attention.validate_dense_attention_call_parameter_shapes(
inputs_q, inputs_kv, mask, bias, self.num_heads)
q_kernel_init = (
self.q_kernel_init
if self.q_kernel_init is not None else self.kernel_init)
rotary_index = None
features = self.out_features or inputs_q.shape[-1]
qkv_features = self.qkv_features or inputs_q.shape[-1]
if self.head_dim is None:
head_dim = qkv_features // self.num_heads
else:
head_dim = self.head_dim
# Is attention logit rescaling explicit or folded into initializer?
if self.rescale_logits:
query_init = q_kernel_init
else:
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
query_init = lambda *args: q_kernel_init(*args) / depth_scaling
def dense_output(
features,
axis,
kernel_init,
kernel_axis_names,
name,
inputs,
reshape_kernel=True,
):
if self.use_aqt:
if self.weight_params is None and self.act_params is None:
raise ValueError(
'If use_aqt is True, either of weights or acts quantization need '
'to be specified using arguments `weight_params` or `act_params`.'
)
# TODO: Push the "quantized vs not" decision down into
# the AQT library. Currently we make that decision here, because the AQT
# library doesn't support DenseGeneral.
aqt_context = aqt_config.DynamicContext(
update_bounds=False, collect_acts_stats=False)
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = self.weight_params.half_shift if self.weight_params else False
aqt_hparams = aqt_flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=self.act_params, # currently supports fixed bounds only.
quant_type=aqt.QuantType.AQT,
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
return aqt_flax_layers.DenseAqt(
features=features,
hparams=aqt_hparams,
train=enable_dropout,
dynamic_context=aqt_context,
paxis_name=None,
# No "cross-replica" reduction expressed in the XLA graph at this
# stage. Will be imposed later, automatically, by XLA SPMD.
use_bias=self.use_bias,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
dtype=self.dtype,
kernel_axis_names=kernel_axis_names,
# we do not have reshape kernel option here but we explicitly
# reshape kernel.
precision=self.precision,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
name=name,
)(inputs, padding_mask=None)
else:
return dense.DenseGeneral(
axis=axis,
features=features,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
kernel_init=kernel_init,
precision=self.precision,
kernel_axis_names=kernel_axis_names,
reshape_kernel=reshape_kernel,
name=name)(
inputs)
# Project inputs_q to multi-headed q and single-headed k and v
# query dimension is then [batch..., length, num_heads, features_per_head]
# key and value dimensions are [batch..., length, features_per_head].
if precomputed_qkv is None:
query = dense_output(
features=(self.num_heads, head_dim),
axis=-1,
kernel_init=query_init,
kernel_axis_names=['embed', 'heads', 'kv'],
name='query',
inputs=inputs_q,
reshape_kernel=not self.split_head_kernel,
)
key = dense_output(
features=head_dim,
axis=-1,
kernel_init=self.kernel_init,
kernel_axis_names=['embed', 'kv'],
name='key',
inputs=inputs_kv)
value = dense_output(
features=head_dim,
axis=-1,
kernel_init=self.kernel_init,
kernel_axis_names=['embed', 'kv'],
name='value',
inputs=inputs_kv)
else:
query, key, value = precomputed_qkv
# Multi Dconv Head Attention options:
if self.q_conv is not None:
query = self.q_conv( # pylint: disable=not-callable
query,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.k_conv is not None:
key = self.k_conv( # pylint: disable=not-callable
key,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.v_conv is not None:
value = self.v_conv( # pylint: disable=not-callable
value,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
sharding_prefix = 'attn_decode' if decode else 'attn_encode'
bias_sharding = (f'{sharding_prefix}_batch', f'{sharding_prefix}_heads',
f'{sharding_prefix}_q_length',
f'{sharding_prefix}_kv_length')
# Note: We don't use `activation_partitioning.with_sharding_migration` here
# because we do often want this 2D sharded. However, if rules are valid,
# they should result in 2D sharding. We don't need to raise errors if both
# result in 2D sharding (which with_sharding_migration does).
if flax_partitioning.get_axis_rules():
query = flax_partitioning.with_sharding_constraint(
query, ('batch', 'length', 'heads', 'kv'))
else:
query = activation_partitioning.with_sharding(query, 2)
if prefill and decode:
raise ValueError('prefill and decode cannot both be true at the same'
'time. If you are using a prefix LM with bidirectional '
'attention on the inputs, please make a call with '
'prefill=True that includes an attention mask that '
'covers your inputs first and then make your decoding '
'calls.')
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if prefill or decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension
# [batch..., length, features_per_head], but we cache them as
# [batch..., features_per_head, length] as a TPU fusion
# optimization. This also enable the "scatter via one-hot broadcast"
# trick, which means we do a one-hot broadcast instead of a scatter/gather
# operations, which gives a 3-4x speedup in practice.
swap_dims = lambda x: x[:-2] + tuple(x[i] for i in [-1, -2])
cached_key = flax_partitioning.variable_with_axes(
'cache',
'cached_key',
jnp.zeros,
swap_dims(key.shape),
key.dtype,
axes=('cache_batch', 'cache_kv', 'cache_length'),
fallback=RulesFallback.NO_CONSTRAINT)
cached_value = flax_partitioning.variable_with_axes(
'cache',
'cached_value',
jnp.zeros,
swap_dims(value.shape),
value.dtype,
axes=('cache_batch', 'cache_kv', 'cache_length'),
fallback=RulesFallback.NO_CONSTRAINT)
cache_index = flax_partitioning.variable_with_axes(
'cache',
'cache_index',
jnp.zeros,
query.shape[0],
jnp.int32,
axes=('cache_batch',),
fallback=RulesFallback.NO_CONSTRAINT)
rotary_index = cache_index.value
if is_initialized:
# Here we are in "apply()".
*batch_dims, features_per_head, length = cached_key.value.shape
if prefill:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it. Note, the cache index is now a vector
# of batch size so that each example can start just after it's
# prefix which can be different lengths for different examples.
if prefill_lengths is None:
prefill_lengths = jnp.sum(
mask[:, 0, 0, :], axis=-1).astype(cache_index.value.dtype)
(key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value) = self.update_cache_prefill(
key, value, cached_key, cached_value, cache_index,
prefill_lengths)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
elif decode:
# Check the shape of the cached key against the input query.
expected_query_shape = tuple(batch_dims) + (1, self.num_heads,
features_per_head)
if expected_query_shape != query.shape:
raise ValueError('Autoregressive cache shape error, '
'expected query shape %s instead got %s.' %
(expected_query_shape, query.shape))
expected_key_shape = tuple(batch_dims) + (1, features_per_head)
if expected_key_shape != key.shape:
raise ValueError('Autoregressive cache shape error, '
'expected key shape %s instead got %s.' %
(expected_key_shape, key.shape))
# value and key should have the same shape.
if expected_key_shape != value.shape:
raise ValueError('Autoregressive cache shape error, '
'expected value shape %s instead got %s.' %
(expected_key_shape, value.shape))
(key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value) = self.update_cache_decode(
key, value, cached_key, cached_value, cache_index)
# Enforcing the Causal mask over previous positions and selecting only
# the bias value for the current index is only needed during decode
# mode where a single example is feed at a time. In prefill mode we
# uses these as provided, that same way it is done in a normal forward
# pass, like when computing logits during training.
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
#
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
#
# Add trailing dims to the current index so it can either
# broadcast over the batch dim or it can just be batch size.
mask = dense_attention.combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length),
tuple(batch_dims) +
(1, 1, length)) <= jnp.reshape(cur_index, (-1, 1, 1, 1)))
mask = flax_partitioning.with_sharding_constraint(
mask, (f'{sharding_prefix}_batch', None, None, None),
fallback=RulesFallback.NO_CONSTRAINT)
# Grab the correct relative attention bias during decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
# If we are doing prefix decoding where cur index is a vector the
# result will be [batch, heads, 1, :]. If cur_index is a scalar
# like in encdec decoding, the result will be [1, heads, 1, :]
# We use a one-hot einsum rather than a slice to avoid introducing
# a Gather op that is currently lowered poorly by SPMD passes,
# adding expensive all-reduce and all-gather operations.
bias = jnp.einsum(
'bq, bhqk->bhk',
common_utils.onehot(cur_index, num_classes=length), bias)
bias = jnp.expand_dims(bias, 2)
bias = flax_partitioning.with_sharding_constraint(
bias, bias_sharding, fallback=RulesFallback.NO_CONSTRAINT)
# Currently, updating a variable inside of a method is not handled
# in flax, so we return the actual values and assign them in the main
# compacted call for now.
# TODO: Move variable assignment inside of the
# cache update functions once variable references are tracked across
# transform boundaries.
cache_index.value = cache_index_value
cached_key.value = cached_key_value
cached_value.value = cached_value_value
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype))
attention_bias = flax_partitioning.with_sharding_constraint(
attention_bias, bias_sharding, fallback=RulesFallback.NO_CONSTRAINT)
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = dense_attention.combine_biases(attention_bias, bias)
attention_bias = flax_partitioning.with_sharding_constraint(
attention_bias, bias_sharding, fallback=RulesFallback.NO_CONSTRAINT)
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
# During decode we typically want to reshard at this point from sharding by
# by head to sharding by batch. Give new names to the sharding axes to allow
# this reshard.
query = flax_partitioning.with_sharding_constraint(
query, (f'{sharding_prefix}_batch', f'{sharding_prefix}_q_length',
f'{sharding_prefix}_heads', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT)
key = flax_partitioning.with_sharding_constraint(
key, (f'{sharding_prefix}_batch', f'{sharding_prefix}_kv_length', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT)
value = flax_partitioning.with_sharding_constraint(
value,
(f'{sharding_prefix}_batch', f'{sharding_prefix}_kv_length', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT)
if self.use_rotary_embedding:
# use rotary embeddings before attention
# https://arxiv.org/abs/2104.09864
# TODO: Figure out if this should be put in a new class.
query, key = rotary_embedding.apply_rotary_embedding_to_subset(
query,
key,
max_timescale=self.rotary_embedding_max_timescale,
fraction_to_rotate=self.rotary_embedding_fraction_to_rotate,
decode=decode,
rotary_index=rotary_index,
query_position_offset=query_position_offset)
# Apply attention.
x = self.attention_fn(
query,
key,
value,
bias=attention_bias,
broadcast_dropout=self.broadcast_dropout,
rescale_logits=self.rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
enable_dropout=enable_dropout,
dtype=self.dtype,
precision=self.precision,
use_extra_logit=self.use_extra_logit,
float32_logits=self.float32_logits) # pytype: disable=wrong-keyword-args
# During decode we typically want to reshard at this point from sharding by
# batch to sharding by head. Return to the old names of the sharding axes to
# allow this reshard.
x = flax_partitioning.with_sharding_constraint(
x, (f'{sharding_prefix}_batch', f'{sharding_prefix}_q_length',
f'{sharding_prefix}_heads', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT)
x = flax_partitioning.with_sharding_constraint(
x, ('batch', 'length', 'heads', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT)
if precomputed_qkv is None:
kernel_axis_names = ['heads', 'kv', 'embed']
# TODO: activation quantization support is unimplemented
# here.
if self.use_aqt and self.weight_params is not None:
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = self.weight_params.half_shift if self.weight_params else False
aqt_hparams = aqt_flax_layers.DenseGeneralAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=None, # currently supports fixed bounds only.
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
out = aqt_flax_layers.DenseGeneralAqt(
hparams=aqt_hparams,
train=enable_dropout,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
kernel_axis_names=kernel_axis_names,
reshape_kernel=not self.split_head_kernel,
name='out')( # pytype: disable=wrong-arg-types
x)
else:
# Back to the original inputs dimensions.
out = dense.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
kernel_axis_names=kernel_axis_names,
reshape_kernel=not self.split_head_kernel,
name='out')( # pytype: disable=wrong-arg-types
x)
else:
# in fused parallel layer, fused outer dense operation is external
out = x
return out
| 57,763 | 44.41195 | 122 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/t5_models.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains "model" classes for T5 models."""
import enum
import functools
from typing import Any, Callable, Mapping, MutableMapping, Optional, Tuple
from absl import logging
import flax
from flax import linen as nn
from flax import traverse_util
from flax.training import common_utils
import jax
from jax import lax
import jax.numpy as jnp
import seqio
from t5x import decoding
from t5x import losses
from t5x import models
from t5x import optimizers
from flaxformer.architectures.perceiver_ar import slicing
PyTree = Any
def _crop_sequences(sequences: jnp.ndarray,
lengths: jnp.ndarray) -> jnp.ndarray:
"""Crop sequences by replacing positions beyond length with padding."""
return jnp.where(
jnp.arange(sequences.shape[-1])[jnp.newaxis, :] < lengths[:, jnp.newaxis],
sequences, 0)
class CroppingMethod(enum.Enum):
"""Perceiver AR training cropping methods.
NONE: Cropping will be done in the data pipeline, so no online cropping
is needed.
FULL_LATENTS: Random placement of latents between the beginning and end
of the sequence where loss is calculated. As many latents as possible are
allocated positions.
Advantage: Loss over as many tokens as possible, better use of compute.
Disadvantage: May bias against learning to generate positions toward the
beginning or end of sequences because they will be selected less
frequently. For prefix tasks, does not match latent positions at inference
time.
FULL_LATENTS_WITH_PREFIX: Same as FULL_LATENTS, but allows the beginning
of the window to a prefix where loss is not calculated, up to the point
where only 1 position has loss. This matches inference behavior for a prefix
task because (depending on the decoding_latent_reset_fill setting) the first
inferred position can utilize all previous latents allocated to the prefix.
Advantage: Loss over as many tokens as possible while still matching
inference latent placement.
Disadvantage: Prefix positions do not have loss calculated, so there are
fewer positions with loss than with FULL_LATENTS. Also still has
some of the bias issues fixed with EQUAL_POSITION_LIKELIHOOD.
EQUAL_POSITION_LIKELIHOOD: Random placement of latents such that every
sequence position within the loss mask is equally likely to have loss
calculated on it. Achieved by letting the latent "window" extend beyond the
edges of the sequence and then cropping/masking any invalid positions.
Advantage: Every position is equally likely to be trained.
Disadvantage: Loss over fewer positions, wasted compute. For example, with
a sequence length of 8192 and 2048 latent positions, each training batch
will be only 80% non-padding tokens.
"""
NONE = 1
FULL_LATENTS = 2
FULL_LATENTS_WITH_PREFIX = 3
EQUAL_POSITION_LIKELIHOOD = 4
def crop_train_batch(
rng: Optional[jax.random.KeyArray],
batch: Mapping[str, jnp.ndarray],
cropping_method: CroppingMethod,
num_latents: int,
) -> Mapping[str, jnp.ndarray]:
"""Apply random cropping to a training batch.
Perceiver AR can utilize a longer input sequence than the number of latents
and therefore outputs positions for loss. In order to train the model to be
able to generate outputs with a variety of input context lengths, random
cropping of the input sequence is used.
Args:
rng: PRNG key.
batch: T5X training batch.
cropping_method: Type of cropping method to use.
num_latents: Number of latents in the Perceiver AR model.
Returns:
A cropped batch.
"""
first_loss_idx = jnp.argmax(batch['decoder_loss_weights'] == 1, axis=-1)
last_loss_idx = batch['decoder_loss_weights'].shape[-1] - 1 - jnp.argmax(
jnp.flip(batch['decoder_loss_weights'] == 1, axis=-1), axis=-1)
logging.info('Using cropping method "%s".', cropping_method)
if cropping_method == CroppingMethod.NONE:
return batch
if cropping_method == CroppingMethod.FULL_LATENTS:
# "naive" crop selection. always results in a full batch.
min_crop_start = first_loss_idx
max_crop_start = last_loss_idx - num_latents + 1
elif cropping_method == CroppingMethod.FULL_LATENTS_WITH_PREFIX:
# FULL_LATENTS, but allows including all but 1 latent in the prefix portion.
min_crop_start = first_loss_idx - num_latents + 1
min_crop_start = jnp.maximum(min_crop_start, 0)
max_crop_start = last_loss_idx - num_latents + 1
elif cropping_method == CroppingMethod.EQUAL_POSITION_LIKELIHOOD:
# "fair" crop selection. all positions equally likely.
min_crop_start = first_loss_idx - num_latents + 1
max_crop_start = last_loss_idx
else:
raise ValueError(f'Unknown cropping method: {cropping_method}')
seq_crop_first_idx = jax.random.randint(
rng, [batch['decoder_loss_weights'].shape[0]], min_crop_start,
max_crop_start + 1)
seq_crop_end = jnp.minimum(seq_crop_first_idx + num_latents,
last_loss_idx + 1)
seq_crop_start = jnp.maximum(seq_crop_first_idx, 0)
batch = jax.tree_map(
functools.partial(_crop_sequences, lengths=seq_crop_end), batch)
# Handle the loss weights specifically to ensure that loss isn't
# calculated for positions before seq_crop_start. This ensures that all
# token positions have an equal likelihood of being counted in the loss.
# Specifically, it handles cases where the crop over a sequence of length
# 8192 is something like [8000:8192]. Even if there are 2048 latents
# allocated to [6144:8192], loss is only calculated on [8000:8192].
batch['decoder_loss_weights'] = jnp.where(
jnp.arange(batch['decoder_loss_weights'].shape[-1])[jnp.newaxis, :] >=
seq_crop_start[:, jnp.newaxis], batch['decoder_loss_weights'], 0)
return batch
class PerceiverARModel(models.DecoderOnlyModel):
"""Model class for Perceiver AR decoder-only model.
Implements Perceiver AR as described in https://arxiv.org/abs/2202.07765.
Decouples input length from most of the compute requirements by utilizing
an initial cross-attention layer over the inputs to a smaller number of
latents for processing with the self-attention stack.
"""
def __init__(
self,
module: nn.Module,
vocabulary: seqio.Vocabulary,
optimizer_def: optimizers.OptimizerDefType,
num_latents: int,
decoding_latent_reset_fill: Optional[int] = None,
disable_fast_decoding_cache: bool = False,
decode_fn: models.DecodeFnCallable = decoding.temperature_sample,
inputs_bidirectional_attention: bool = False,
feature_converter_cls: Optional[Callable[...,
seqio.FeatureConverter]] = None,
label_smoothing: float = 0.0,
z_loss: float = 0.0,
loss_normalizing_factor: Optional[float] = None,
train_cropping_method: CroppingMethod = CroppingMethod.FULL_LATENTS,
):
self._num_latents = num_latents
self._disable_fast_decoding_cache = disable_fast_decoding_cache
self._configured_decoding_latent_reset_fill = decoding_latent_reset_fill
self._train_cropping_method = train_cropping_method
super().__init__(
module=module,
vocabulary=vocabulary,
optimizer_def=optimizer_def,
decode_fn=decode_fn,
inputs_bidirectional_attention=inputs_bidirectional_attention,
feature_converter_cls=feature_converter_cls,
label_smoothing=label_smoothing,
z_loss=z_loss,
loss_normalizing_factor=loss_normalizing_factor,
)
def get_decoding_latent_reset_fill(self, input_length: int) -> int:
if self._configured_decoding_latent_reset_fill is not None:
decoding_latent_reset_fill = self._configured_decoding_latent_reset_fill
else:
# If not specified, use some reasonable defaults that try to pick a good
# balance between using as many latents as possible (more "compute" per
# predicted token) and doing as few reset steps as possible (full forward
# passes that are more expensive).
# For large numbers of latents, fill all but the final 128 positions.
# Example: 2048 latents, 1920 reset fill.
# For small numbers of latents, just do half.
decoding_latent_reset_fill = max(self._num_latents - 128,
self._num_latents // 2, 1)
# For shorter sequences, make sure we use the largest fill possible.
# For example, if there are 2048 latents, the default reset fill from above
# would be 1920. But if the sequence length is 2049, then we'll have to do
# 1 reset step, so we might as well use the full 2048 latents and get the
# maximum "compute" available.
decoding_latent_reset_fill = max(
decoding_latent_reset_fill,
self._num_latents - max(0, input_length - self._num_latents - 1))
if decoding_latent_reset_fill <= 0:
raise ValueError(f'decoding_latent_reset_fill must be > 0, but got '
f'{decoding_latent_reset_fill}')
if decoding_latent_reset_fill > self._num_latents:
raise ValueError(
f'decoding_latent_reset_fill must be <= num_latents '
f'({self._num_latents}), but got {decoding_latent_reset_fill}')
logging.info(
'decoding_latent_reset_fill: for configured fill %r, num_latents %d, '
'and input length %d, using fill of %d.',
self._configured_decoding_latent_reset_fill, self._num_latents,
input_length, decoding_latent_reset_fill)
return decoding_latent_reset_fill
def eval_fn(
self,
params: PyTree,
batch: Mapping[str, jnp.ndarray],
) -> Tuple[jnp.ndarray, models.MetricsMap]:
"""Computes loss and metrics during the evaluation.
Args:
params: model parameters.
batch: a batch of inputs.
Returns:
loss: the loss computed for the given inputs and parameters.
aux:
weight_sum: sum of the per-token weights applied to the loss.
metrics: a mapping of metrics computed for this batch.
"""
return self.loss_fn(
params=params,
batch=batch,
dropout_rng=None,
is_eval=True,
)
def loss_fn(
self,
params: PyTree,
batch: Mapping[str, jnp.ndarray],
dropout_rng: Optional[jax.random.KeyArray],
is_eval: bool = False,
) -> Tuple[jnp.ndarray, models.MetricsMap]:
"""Loss function used for training with a cross-entropy loss."""
if dropout_rng is None:
# TODO: Add RNG ability to T5X during eval.
# TODO: In addition to random crops during eval, perhaps also take
# decoding_latent_reset_fill into account and only report eval loss on
# the final positions since this will more closely match what will
# happen during inference and scoring.
if is_eval:
logging.info(
'Eval loss_fn: no RNG key present, so cropping method of "%s" '
'will not occur.', self._train_cropping_method)
else:
raise ValueError('Required dropout_rng was not supplied.')
else:
crop_train_rng, dropout_rng = jax.random.split(dropout_rng)
batch = crop_train_batch(
crop_train_rng,
batch=batch,
cropping_method=self._train_cropping_method,
num_latents=self._num_latents)
logits = self._compute_logits(params, batch, dropout_rng)
sequence_lengths = slicing.get_sequence_lengths(
batch['decoder_target_tokens'])
assert self._num_latents == logits.shape[-2]
targets = slicing.slice_sequences_vmap(
batch['decoder_target_tokens'],
sequence_lengths=sequence_lengths,
num_latents=self._num_latents,
axis_within_vmap=-1)
weights = slicing.slice_sequences_vmap(
batch['decoder_loss_weights'],
sequence_lengths=sequence_lengths,
num_latents=self._num_latents,
axis_within_vmap=-1)
loss_normalizing_factor, weights = losses.get_loss_normalizing_factor_and_weights(
self._loss_normalizing_factor,
batch={
'decoder_target_tokens': targets,
'decoder_loss_weights': weights
})
loss, z_loss, _ = losses.compute_weighted_cross_entropy(
logits,
targets=targets,
weights=weights,
label_smoothing=self._label_smoothing,
z_loss=self._z_loss,
loss_normalizing_factor=loss_normalizing_factor)
metrics = self._compute_metrics(
logits=logits, targets=targets, mask=weights, loss=loss, z_loss=z_loss)
return loss, metrics
def _compute_logits_from_slice(
self,
decoding_state: decoding.DecodingState,
params: PyTree,
decoder_causal_attention: jnp.ndarray,
max_decode_length: int,
) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
"""Token slice to logits from decoder model."""
# Implement a cache reset step as described in Appendix E.3 of the Perceiver
# AR paper (https://arxiv.org/pdf/2202.07765.pdf)
decoding_latent_reset_fill = self.get_decoding_latent_reset_fill(
decoding_state.sequences.shape[1])
def get_cache_by_layers(cache):
return traverse_util.flatten_dict(
cache, is_leaf=lambda k, x: 'cache_index' in x)
def tree_map_self_att_cache(map_fn, cache):
"""Map a function over just the self-attention cache layers."""
cache_by_layers = get_cache_by_layers(cache)
new_cache_by_layers = {}
for layer_name, layer_cache in cache_by_layers.items():
# Only modify params that have 'layer' in the name to avoid things like
# position encodings.
# The first layer is cross-attention, so don't modify it.
if 'layer' in '/'.join(layer_name) and 'layers_0' not in layer_name:
layer_cache = jax.tree_map(map_fn, layer_cache)
new_cache_by_layers[layer_name] = layer_cache
return flax.core.freeze(traverse_util.unflatten_dict(new_cache_by_layers))
def reset_step():
assert self._num_latents >= decoding_latent_reset_fill
# Create a version of the kv cache that has
# decoding_latent_reset_fill positions instead of self._num_latents
# positions.
def prepare_reset_cache(x):
# Modify key and value, but not index.
if x.ndim > 1 and x.shape[-1] == self._num_latents:
return x[..., :decoding_latent_reset_fill] * 0
else:
return x
reset_cache = tree_map_self_att_cache(prepare_reset_cache,
decoding_state.cache)
# Note that it's possible to reuse the cached activations for the
# cross-attention layer, but that would be fairly difficult to do with
# the current cache API.
# To ensure masking is calculated correctly, construct target_ids by
# shifting inputs left and adding a placeholder value for the current
# position.
target_ids = jnp.pad(decoding_state.sequences[:, 1:], [[0, 0], [0, 1]])
target_ids = jax.vmap(lambda x, y: x.at[y].set(1))(
target_ids, decoding_state.cur_index)
# Do a full forward pass of the model to predict the next tokens, filling
# in the partial cache with the smaller number of latents as wel do.
logits, new_vars = self.module.apply(
{
'params': params,
'cache': reset_cache,
},
decoder_input_tokens=decoding_state.sequences,
decoder_target_tokens=target_ids,
enable_dropout=False,
decoder_causal_attention=decoder_causal_attention,
decode=False,
max_decode_length=max_decode_length,
prefill=True,
prefill_lengths=decoding_state.cur_index + 1,
mutable=['cache'],
num_latents=decoding_latent_reset_fill)
# Now expand the kv cache size back to self._num_latents.
def expand_reset_cache(x):
# Modify key and value, but not index.
if x.ndim > 1 and x.shape[-1] == decoding_latent_reset_fill:
padding = [(0, 0)] * x.ndim
padding[-1] = (0, self._num_latents - decoding_latent_reset_fill)
return jnp.pad(x, padding)
else:
return x
new_cache = tree_map_self_att_cache(expand_reset_cache, new_vars['cache'])
logits_idx = jnp.minimum(logits.shape[-2] - 1, decoding_state.cur_index)
flat_logits = jax.vmap(
functools.partial(lax.dynamic_slice_in_dim, slice_size=1,
axis=-2))(logits, logits_idx)
return flat_logits, new_cache
def regular_step():
flat_logits, new_vars = self.module.apply(
{
'params': params,
'cache': decoding_state.cache
},
decoding_state.cur_token,
decoding_state.cur_token,
enable_dropout=False,
decode=True,
max_decode_length=max_decode_length,
mutable=['cache'])
return flat_logits, new_vars['cache']
# Determine if a reset step is needed based on whether the kv cache in
# a self-attention layer is full.
needs_reset = False
for layer_name, layer_cache in get_cache_by_layers(
decoding_state.cache).items():
# Ignore non-layer parameters like position encodings.
if 'layer' not in '/'.join(layer_name):
continue
# Ignore the cross-attention layer since it never gets "full".
if 'layers_0' in layer_name:
continue
needs_reset |= (layer_cache['cache_index'] >=
layer_cache['cached_key'].shape[-1]).any()
if self._disable_fast_decoding_cache:
logging.info(
'Fast decoding is disabled, always using reset steps with a latent'
'fill of %d positions', decoding_latent_reset_fill)
flat_logits, new_flat_cache = reset_step()
elif decoding_state.sequences.shape[-1] > self._num_latents:
logging.info('Using a reset step latent fill of %d positions',
decoding_latent_reset_fill)
flat_logits, new_flat_cache = lax.cond(needs_reset, reset_step,
regular_step)
elif decoding_state.sequences.shape[-1] == self._num_latents:
# If num_latents is the same as sequence length, there's no need to
# use or compile reset_setp.
logging.info('Using regular decoding without any reset steps.')
flat_logits, new_flat_cache = regular_step()
else:
raise ValueError(
f'Sequence length ({decoding_state.sequences.shape[-1]}) < '
f'num_latents ({self._num_latents}) is not currently supported.')
# Remove sequence length dimension since it's always 1 during decoding.
flat_logits = jnp.squeeze(flat_logits, axis=1)
return flat_logits, new_flat_cache
def predict_batch_with_aux(
self,
params: PyTree,
batch: Mapping[str, jnp.ndarray],
rng: Optional[jax.random.KeyArray] = None,
*,
return_all_decodes: bool = False,
num_decodes: int = 1,
decoder_params: Optional[MutableMapping[str, Any]] = None,
) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
"""Predict with prefix.
Mostly copied from DecoderOnlyModel with minor modifications for preparing
the tokens_ids_to_logits function.
`decoder_params` can be used to pass dynamic configurations to
`self.decode_fn`. An example usage is to pass different random seed (i.e.,
`jax.random.PRNGKey(seed)` with different `seed` value). This can be done by
setting `decoder_params['decode_rng'] = jax.random.PRNGKey(seed)`.
Although this method is short, there are a few subtle points that. We use a
running example to make these points clear.
```
Example
inputs = [9, 4, 6, 1]
targets = [3, 9, 1]
seqio.DecoderFeatureConverter will generate these set of features
decoder_target_tokens = [9, 4, 6, 1, 3, 9, 1, 0, 0]
decoder_input_tokens = [0, 9, 4, 6, 1, 3, 9, 1, 0]
decoder_causal_attention = [1, 1, 1, 1, 1, 0, 0, 0, 0]
The output of this function is (`a` through `e` are the sampled token
ids):
sampled_sequences = [9, 4, 6, 1, a, b, c, d, e].
```
Given these set of features, we make a few important observation.
1) When a decoder-only model is used for a supervised learning with "inputs"
and "targets", one way to handle this is to concatenate the "inputs" and
"targets". For training, we use teacher forcing for the entire
concatenated sequence. For inference, on the other hand, we don't have
the targets. This requires that we use teacher forcing on the "inputs"
portion while using the generated token as the input token for the next
decoding step. For evaluation, we do have "targets" but we only want to
use them for computing metrics, i.e., by comparing to the sequence
generated by the model.
This function is currently used for evaluation mode, but by ignoring
"targets", it can be extended for the inference mode.
2) During evaluation mode, the targets portion is zeroed out and they are
filled with the sampled token ids. The inputs portion is kept intact.
3) Note that `decoder_causal_attention` has an additional 1 after the final
"inputs" token. This is because the position where the last "inputs"
token (in this case 1) is input and the output is the first "target"
token (in this case 3) can be included in the non-causal attention
region.
This results in an alignment between `decoder_input_tokens` and
`decoder_causal_attention` because the former is shifted to the right by
one position. So we use `decoder_causal_attention` as a binary mask to
zero out the target tokens in `decoder_input_tokens`.
Note:
In order to use a custom self._decode_fn with this model it must support:
1) Decoding from a partially decoded state by accepting a vector of
`initial_indices` that specify where in the input to start decoding
from.
2) Using a vector as the loop counter to support different examples being
a different number of steps into their decoding loop.
3) Be able to handle one batch element reaching `max_decode_length`
before the others without it causing the model to prematurely stop
decoding.
Args:
params: Model parameters.
batch: Batch element with the model features specified in
seqio.DecoderFeatureConverter.
rng: An optional RNG key to use during prediction, which is passed as
'decode_rng' to the decoding function.
return_all_decodes: If True, will return all batch_size * num_decodes
samples from the model as an array of shape [batch_size, num_decodes,
sequence_length]. Otherwise returns only the most likely samples as an
array of shape [batch_size, sequence_length].
num_decodes: Number of decoded sequences to be returned.
decoder_params: Additional (model-independent) parameters for the decoder.
Returns:
Sampled sequences, an array of shape [batch, max_decode_length].
"""
if 'decoder_causal_attention' not in batch:
raise ValueError(
'Batch does not have the right format for text generation: probably '
'because `task_feature_lengths` passed to the feature converter does '
'not have both `inputs` and `targets`.'
)
# since decoder_input_tokens is shifted to the right and
# `decoder_causal_attention` has one more 1 than the number of inputs
# tokens, this masks out targets portion of the decoder_input_tokens.
inputs = batch['decoder_input_tokens'] * batch['decoder_causal_attention']
# TODO: Minor decoding performance improvement: Ideally
# _compute_kv_cache would prefill the cache with enough space left over to
# not immediately trigger a cache reset step if the sequence length is
# already longer than self._num_latents.
prefilled_cache, initial_index = self._compute_kv_cache(
params, inputs, batch['decoder_causal_attention']
)
target_shape = batch['decoder_input_tokens'].shape
max_decode_length = target_shape[1]
# Note that the version of decoder_causal_attention to be passed to the
# model during inference needs to be calculated by
# _get_decoder_causal_attention, which will correctly set it to None if
# inputs_bidirectional_attention is False.
tokens_ids_to_logits = functools.partial(
self._compute_logits_from_slice,
params=params,
decoder_causal_attention=self._get_decoder_causal_attention(batch),
max_decode_length=max_decode_length)
if decoder_params is None:
decoder_params = {}
if rng is not None:
if decoder_params.get('decode_rng') is not None:
raise ValueError(
f'Got RNG both from the `rng` argument ({rng}) and '
f"`decoder_params['decode_rng']` ({decoder_params['decode_rng']}). "
'Please specify one or the other.')
decoder_params['decode_rng'] = rng
# Using the above-defined single-step decoder function, run temperature
# sampling with the prefix.
# [batch, max_decode_length]
scanned = hasattr(self.module, 'scan_layers') and self.module.scan_layers
if 'eos_id' not in decoder_params:
decoder_params['eos_id'] = self.output_vocabulary.eos_id
decoded_sequences, scores = self._decode_fn(
inputs=inputs,
cache=prefilled_cache,
tokens_to_logits=tokens_ids_to_logits,
num_decodes=num_decodes,
initial_index=initial_index,
cache_offset=1 if scanned else 0,
**decoder_params,
)
if not return_all_decodes:
# Search returns [n_batch, n_beam/decodes, n_length] with the beam/decode
# dimension sorted in increasing order of log-probability.
# `scores` is [batch, beam/decode_size]
# We take the highest scoring sequence (-1) and its score
decoded_sequences = decoded_sequences[:, -1, :]
# Beam search returns []
aux = {'scores': scores[:, -1]}
else:
# We return all samples and scores, rather than just the top ones.
aux = {'scores': scores}
return models.remove_prefix(decoded_sequences, initial_index), aux
def score_batch(
self,
params: PyTree,
batch: Mapping[str, jnp.ndarray],
return_intermediates: bool = False,
) -> jnp.ndarray:
"""Compute log likelihood score on a batch.
Perceiver AR will return only num_latents outputs for a given forward pass,
but we want scores for all inputs positions. This method does multiple
forward passes, striding over the input as determined by
decoding_latent_reset_fill. The results are combined into a single logits
array and summed for the final score.
Args:
params: Model params.
batch: Batch to score.
return_intermediates: Whether to return model intermediates. Not currently
supported for Perceiver AR.
Returns:
Sequence scores with shape [batch].
"""
if return_intermediates:
raise NotImplementedError('return_intermediates is not yet supported.')
decoder_target_tokens = batch['decoder_target_tokens']
weights = batch['decoder_loss_weights']
input_length = decoder_target_tokens.shape[-1]
sequence_lengths = slicing.get_sequence_lengths(
decoder_target_tokens=decoder_target_tokens)
def get_token_scores(logits):
return -losses.cross_entropy_with_logits(
logits,
common_utils.onehot(
decoder_target_tokens, logits.shape[-1], on_value=1, off_value=0),
z_loss=0.0)[0] * weights
# Calculate stride given decoding_latent_reset_fill.
# For example, if decoding_latent_reset_fill=num_latents, then in decoding
# we would use only the final latent position to predict the next token.
# The equivalent behavior here is a stride of 1.
decoding_latent_reset_fill = self.get_decoding_latent_reset_fill(
input_length)
stride = self._num_latents - decoding_latent_reset_fill + 1
logging.info(
'decoding_latent_reset_fill is %d and num_latents is %d, so using a '
'stride of %d for scoring.', decoding_latent_reset_fill,
self._num_latents, stride)
# Loop forward using strides.
def body(state):
slice_end = jnp.maximum(state['slice_end'] + stride, self._num_latents)
slice_end = jnp.minimum(slice_end, sequence_lengths)
loop_batch = jax.tree_map(
functools.partial(_crop_sequences, lengths=slice_end), batch)
loop_logits = self._compute_logits(
params=params, batch=loop_batch, dropout_rng=None)
loop_logits = jnp.pad(loop_logits, [(0, 0),
(0, input_length - self._num_latents),
(0, 0)])
loop_logits_shift = jnp.maximum(slice_end - self._num_latents, 0)
loop_logits = jax.vmap(functools.partial(jnp.roll,
axis=0))(loop_logits,
loop_logits_shift)
if 'logits' not in state:
# Should happen only during the initialization pass so we can get the
# dtype and vocabulary dimension from the actual model outputs.
# During the lax.while_loop, we can't modify this.
state['logits'] = jnp.zeros_like(loop_logits)
logits = jnp.where(
jnp.arange(input_length)[jnp.newaxis, :, jnp.newaxis] >=
state['slice_end'][:, jnp.newaxis, jnp.newaxis], loop_logits,
state['logits'])
new_state = {
'logits': logits,
'slice_end': slice_end,
}
return new_state
def cond(state):
done = state['slice_end'] >= sequence_lengths
return jnp.any(~done)
# Start where loss starts to be calculated.
slice_end = jnp.argmax(weights > 0, axis=1)
init_state = {
'slice_end': slice_end,
}
# Run the first iteration outside the while_loop to initialize state dict
# with logits that match the shape/dtype of the actual model outputs.
init_state = body(init_state)
final_state = lax.while_loop(
cond_fun=cond, body_fun=body, init_val=init_state)
logits = final_state['logits']
token_scores = get_token_scores(logits)
sequence_scores = token_scores.sum(-1)
return sequence_scores
| 31,087 | 40.176159 | 86 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/perceiver_ar_architecture_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perceiver_ar_architecture."""
from absl.testing import absltest
from jax import random
import numpy as np
from flaxformer import testing_utils
from flaxformer.architectures.perceiver_ar import perceiver_ar_architecture_test_utils as perceiver_ar_test_utils
expected_files = testing_utils.ExpectedJsonFiles(
'flaxformer/architectures/perceiver_ar/'
'testdata')
check_params = expected_files.check_params_shapes_only
class DecoderOnlyTest(absltest.TestCase):
def test_decoder_shapes_per_layer(self):
"""Tests if the decoder parameter have the expected shapes."""
decoder = perceiver_ar_test_utils.test_make_decoder_only1(
num_latents=2, parallel=False)
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'decoder_shapes_per_layer.json')
self.assertEqual(output.shape, (2, 2, 4))
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_parallel_decoder_shapes_per_layer(self):
"""Tests if the decoder parameter have the expected shapes."""
decoder = perceiver_ar_test_utils.test_make_decoder_only1(
num_latents=2, parallel=True)
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'parallel_decoder_shapes_per_layer.json')
self.assertEqual(output.shape, (2, 2, 4))
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_decoder_shapes_fused_parallel(self):
"""Tests if the decoder parameter have the expected shapes."""
decoder = perceiver_ar_test_utils.make_parallel_fused_transformer_config(
num_latents=2)
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'decoder_shapes_fused_parallel.json')
self.assertEqual(output.shape, (2, 2, 4))
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
output = output.astype(np.float32)
output2 = output2.astype(np.float32)
np.testing.assert_allclose(output, output2, rtol=1e-8)
if __name__ == '__main__':
absltest.main()
| 4,857 | 34.720588 | 113 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/attention.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver AR attention utilities."""
from typing import Optional
import jax.numpy as jnp
from flaxformer.architectures.perceiver_ar import slicing
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
def make_causal_mask(x: Array,
sequence_lengths: Array,
num_latents: int,
dtype: DType = jnp.float32) -> Array:
"""Make a causal mask for self-attention.
The self-attention weights will be `[batch, heads, num_latents, len]` and this
function will produce a causal mask of shape `[batch, 1, num_latents, len]`.
Note that a causal mask does not depend on the values of x; it only depends on
the shape. If x has padding elements, they will not be treated in a special
manner.
Args:
x: Input array of shape `[batch, len]`
sequence_lengths: Input sequence lengths of shape `[batch]`
num_latents: Number of Perceiver AR latents.
dtype: Mask return dtype
Returns:
A `[batch..., 1, len, len]` shaped causal mask for 1d attention.
"""
if x.ndim != 2:
raise ValueError(
f'Inputs must have a shape of [batch, len], but got {x.shape}')
# [batch, num_latents]
query_idxs = jnp.broadcast_to(
jnp.arange(num_latents, dtype=jnp.int32), x.shape[:-1] + (num_latents,))
# [batch]
query_idxs_offset = jnp.maximum(0, sequence_lengths - num_latents)
# Expand to [batch, 1]
query_idxs_offset = jnp.expand_dims(query_idxs_offset, axis=-1)
# [batch, num_latents]
query_idxs += query_idxs_offset
# [batch, input_length]
key_idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)
# [batch, 1, num_latents, input_length]
return dense_attention.make_attention_mask(
query_idxs, key_idxs, jnp.greater_equal, dtype=dtype)
def make_decoder_mask(
decoder_target_tokens: Array,
sequence_lengths: Array,
num_latents: int,
dtype: DType,
decoder_causal_attention: Optional[Array] = None) -> Array:
"""Compute the self-attention mask for a decoder.
Same as dense_attention.make_decoder_mask, but includes slicing to create the
correct mask size for Perceiver AR usage.
Args:
decoder_target_tokens: decoder output tokens. [batch, length]
sequence_lengths: Input sequence lengths.
num_latents: Number of Perceiver AR latents.
dtype: dtype of the output mask.
decoder_causal_attention: a binary mask indicating which position should
only attend to earlier positions in the sequence. Others will attend
bidirectionally. [batch, length]
Returns:
the combined decoder mask.
"""
masks = []
# The same mask is applied to all attention heads. So the head dimension is 1,
# i.e., the mask will be broadcast along the heads dim.
# [batch, 1, num_latents, length]
causal_mask = make_causal_mask(
decoder_target_tokens,
num_latents=num_latents,
sequence_lengths=sequence_lengths,
dtype=dtype)
# Positions with value 1 in `decoder_causal_attention` can attend
# bidirectionally.
if decoder_causal_attention is not None:
# [batch, 1, num_latents, length]
inputs_mask = dense_attention.make_attention_mask(
# [batch, num_latents]
query_input=slicing.slice_sequences_vmap(
decoder_causal_attention,
sequence_lengths=sequence_lengths,
num_latents=num_latents,
axis_within_vmap=-1),
# [batch, input_length]
key_input=decoder_causal_attention,
pairwise_fn=jnp.logical_and,
dtype=dtype)
masks.append(jnp.logical_or(causal_mask, inputs_mask).astype(dtype))
else:
masks.append(causal_mask)
# Padding mask.
masks.append(
dense_attention.make_attention_mask(
# [batch, num_latents]
query_input=slicing.slice_sequences_vmap(
decoder_target_tokens,
sequence_lengths=sequence_lengths,
num_latents=num_latents,
axis_within_vmap=-1) > 0,
# [batch, input_length]
key_input=decoder_target_tokens > 0,
dtype=dtype))
return dense_attention.combine_masks(*masks, dtype=dtype)
| 4,788 | 34.474074 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/decoder_layer.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains "architecture" classes for T5 models.
These are combinators which assemble components (LayerNorm, MLP, etc.) into
networks.
"""
from __future__ import annotations
from typing import Callable, Optional, Tuple
from flax import linen as nn
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.perceiver_ar import perceiver_ar_architecture
from flaxformer.components import relative_position_biases
from flaxformer.components import rich_attention_position_scores
from flaxformer.types import Array
# pylint: disable=not-callable
# pytype: disable=not-callable
class DecoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""Transformer decoder layer.
Forked from the original to support Perceiver AR slicing.
Attributes:
self_attention: An instance of a self-attention module.
encoder_decoder_attention: Encoder-decoder attention module. This must be
non-None if attending to encoded representations.
mlp: The MLP module, applied after both attention modules.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
relative_position_bias_factory: A callable that returns relative position
bias instances. This should only be used for per-layer relative position
biases; please use `shared_relative_position_bias` if they are shared
among layers.
shared_relative_position_bias: An instance of a shared relative position
bias module, usually owned by the Decoder.
activation_partitioning_dims: When set to 2, partitions intermediate
variables containing the input and output of the decoder layer.
parallel: whether to call attention and mlp in parallel
sow_intermediates: whether to track intermediates using Module.sow.
num_latents: Number of latents and outputs.
"""
self_attention: nn.Module
encoder_decoder_attention: Optional[nn.Module]
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
relative_position_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_relative_position_bias: Optional[nn.Module] = None
activation_partitioning_dims: int = 1
parallel: bool = False
sow_intermediates: bool = False
scanned: bool = False
# num_latents is actually required, but has to be marked as optional because
# we don't yet require Python 3.10, which provides keyword-only dataclasses.
num_latents: Optional[int] = None
def setup(self):
if self.num_latents is None:
raise ValueError('num_latents must be specified.')
if (self.relative_position_bias_factory is not None and
self.shared_relative_position_bias is not None):
raise ValueError(
'Please set at most one of relative_position_bias_factory and shared_relative_position_bias. '
'(They can both be None however, e.g. for absolute position embeds.)')
self.relpos_bias = (
self.relative_position_bias_factory()
if self.relative_position_bias_factory is not None else
self.shared_relative_position_bias)
# TODO: Support relative position bias.
if self.relpos_bias is not None:
raise NotImplementedError(
'Relative position bias support not yet implemented for Perceiver AR.'
)
if self.parallel:
self.layer_norm = self.layer_norm_factory()
self.dropout = self.dropout_factory()
else:
self.pre_self_attention_layer_norm = self.layer_norm_factory()
self.post_self_attention_dropout = self.dropout_factory()
self.pre_cross_attention_layer_norm = self.layer_norm_factory()
self.post_cross_attention_dropout = self.dropout_factory()
self.pre_mlp_layer_norm = self.layer_norm_factory()
self.post_mlp_dropout = self.dropout_factory()
def get_bias(self, max_decode_length: Optional[int], decode: bool,
layer_input: Array,
encoded: Array) -> Tuple[Optional[Array], Optional[Array]]:
decoder_bias = None
encoder_decoder_bias = None
if self.relpos_bias:
if isinstance(self.relpos_bias,
relative_position_biases.RelativeAttentionAPI):
if max_decode_length:
relpos_length = max_decode_length
else:
relpos_length = layer_input.shape[-2]
# during decoding, the layer will be called with decode=True first to
# initialize the decoder cache, including a cached relpos bias cache.
# the prefill codepath will call this once again with decode=False,
# which is slightly wasteful but generally harmless. During subsequent
# decode steps, this will be called with decode=True and will reuse the
# cached bias. this significantly improves performance during decoding
# with many decode steps.
decoder_bias = self.relpos_bias(
relpos_length, relpos_length, False, decode=decode)
elif isinstance(self.relpos_bias,
rich_attention_position_scores.RichAttentionApi):
decoder_bias = self.relpos_bias(
layer_input,
layer_input,
bidirectional=False,
is_cross_attention=False)
encoder_decoder_bias = self.relpos_bias(
layer_input, encoded, bidirectional=False, is_cross_attention=True)
else:
raise TypeError(
f'{type(self.relpos_bias)} is not a supported relative position '
f'bias factory.\nInstance value: {self.relpos_bias}')
return decoder_bias, encoder_decoder_bias
def __call__(self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
num_latents: Optional[int] = None,
sequence_lengths: Optional[Array] = None) -> Array:
"""Applies EncoderDecoder1DBlock module.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: Input data from encoder with shape [batch_size,
encoder_seq_length, decoder_hidden_size]. If None, block is Decoder
only.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask with shape [
batch_size, 1, decoder_seq_length, encoder_seq_length].
logit_mask: a mask (e.g., padding logit mask) to be applied to the
attention logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
num_latents: Used to override the number of output Perceiver AR latents
during decoding.
sequence_lengths: Lengths of all target sequences. Required for Perceiver
AR operation.
Returns:
output after transformer encoder-decoder block.
"""
if num_latents and num_latents > self.num_latents:
raise ValueError(
f'Overridden num_latents ({num_latents}) must be <= self.num_latents '
f'({self.num_latents}).')
num_latents = num_latents or self.num_latents
layer_input = targets
del targets
# Decoder block.
assert layer_input.ndim == 3
layer_input = activation_partitioning.with_sharding_migration(
layer_input,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if prefill and prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(
decoder_mask[:, 0, 0, :], axis=-1).astype(jnp.int32)
if self.parallel:
x = self.layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
layer_input_residual, x_queries, query_position_offset, logit_mask_queries = (
perceiver_ar_architecture.create_residuals_and_queries(
layer_input,
x,
logit_mask,
num_latents=num_latents,
sequence_lengths=sequence_lengths))
# Shared relative position embedding attention biases.
decoder_bias, encoder_decoder_bias = self.get_bias(
max_decode_length, decode, layer_input=x, encoded=encoded)
y = (
self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
query_position_offset=query_position_offset) + self.mlp(
x_queries,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout))
if encoded is not None:
y += self.encoder_decoder_attention(
x,
encoded,
encoder_decoder_mask,
encoder_decoder_bias,
enable_dropout=enable_dropout)
y *= (3 if encoded is not None else 2)**-0.5
z = layer_input_residual + self.dropout(
y, deterministic=not enable_dropout)
else:
# layer_input is derived from decoder_input_tokens.
x = self.pre_self_attention_layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
layer_input_residual, x_queries, query_position_offset, logit_mask_queries = (
perceiver_ar_architecture.create_residuals_and_queries(
layer_input,
x,
logit_mask,
num_latents=num_latents,
sequence_lengths=sequence_lengths))
if logit_mask is not None:
# When using QKV fusion, x and x_queries must be the exact same
# Python object, so reuse the object if possible.
if x is x_queries and logit_mask is logit_mask_queries:
x = logit_mask * x
x_queries = x
else:
x = logit_mask * x
x_queries = logit_mask_queries * x_queries
# Shared relative position embedding attention biases.
decoder_bias, encoder_decoder_bias = self.get_bias(
max_decode_length, decode, layer_input=x, encoded=encoded)
# The first and second arguments to the attention are the same,
# i.e., this is a self-attention layer.
x = self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
query_position_offset=query_position_offset)
x = layer_input_residual + self.post_self_attention_dropout(
x, deterministic=not enable_dropout)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Encoder-Decoder block.
if encoded is None:
# If encoder outputs not provided, skip attending from decoder to
# encoder. This results in a decoder only block.
y = x
else:
if self.encoder_decoder_attention is None:
raise ValueError('Expected encoder_decoder_attention to be populated '
'when called with `encoded` inputs.')
y = self.pre_cross_attention_layer_norm(
x, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
y = logit_mask_queries * y
y = self.encoder_decoder_attention(
y,
encoded,
encoder_decoder_mask,
encoder_decoder_bias,
enable_dropout=enable_dropout)
y = x + self.post_cross_attention_dropout(
y, deterministic=not enable_dropout)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# MLP block.
z = self.pre_mlp_layer_norm(
y, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
z = logit_mask_queries * z
z = self.mlp(
z,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout)
z = y + self.post_mlp_dropout(z, deterministic=not enable_dropout)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'activations', z)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
# TODO: automate this detail.
if self.scanned:
return z, None # pytype: disable=bad-return-type # jax-ndarray
else:
return z
| 15,474 | 39.4047 | 104 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/parallel_fused_decoder.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parallel Transformer decoder layer with fused parameters."""
import functools
from typing import Callable, Optional
from absl import logging
from aqt.jax_legacy.jax import flax_layers as aqt_flax_layers
from aqt.jax_legacy.jax import quant_config as aqt_config
from aqt.jax_legacy.jax import quantization as aqt
from flax import linen as nn
from jax import lax
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.perceiver_ar import dense_attention
from flaxformer.architectures.perceiver_ar import perceiver_ar_architecture
from flaxformer.components import dense
from flaxformer.types import Array
# pylint: disable=not-callable
# pytype: disable=not-callable
class ParallelFusedDecoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""Parallel Transformer decoder layer with fused parameters.
Forked from the original to support Perceiver AR slicing.
Attributes:
self_attention: An instance of a self-attention module.
mlp: The MLP module, applied after both attention modules.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
relative_position_bias_factory: A callable that returns relative position
bias instances. This should only be used for per-layer relative position
biases; please use `shared_relative_position_bias` if they are shared
among layers.
shared_relative_position_bias: An instance of a shared relative position
bias module, usually owned by the Decoder.
activation_partitioning_dims: When set to 2, partitions intermediate
variables containing the input and output of the decoder layer.
sow_intermediates: Whether to track intermediates using Module.sow.
is_quant_finetune_mode: Whether the layer is loaded for quantization
finetuning. It's only applied in the context of quantization.
num_latents: Number of latents and outputs.
"""
self_attention: nn.Module
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
relative_position_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_relative_position_bias: Optional[nn.Module] = None
activation_partitioning_dims: int = 1
sow_intermediates: bool = False
scanned: bool = False
use_aqt: bool = False
weight_params: Optional[aqt.QuantOps.WeightParams] = None
act_params: Optional[aqt.QuantOps.ActHParams] = None
possibly_use_quantized_vars: bool = False
is_quant_finetune_mode: bool = False
# num_latents is actually required, but has to be marked as optional because
# we don't yet require Python 3.10, which provides keyword-only dataclasses.
num_latents: Optional[int] = None
def setup(self):
if self.num_latents is None:
raise ValueError('num_latents must be specified.')
if self.activation_partitioning_dims != 1:
logging.warning('ParallelFusedDecoderLayer.activation_partitioning_dims '
'is deprecated and will soon be removed.')
if (self.relative_position_bias_factory is not None and
self.shared_relative_position_bias is not None):
raise ValueError(
'Please set at most one of relative_position_bias_factory and shared_relative_position_bias. '
'(They can both be None however, e.g. for absolute position embeds.)')
self.relpos_bias = (
self.relative_position_bias_factory()
if self.relative_position_bias_factory is not None else
self.shared_relative_position_bias)
# TODO: Support relative position bias.
if self.relpos_bias is not None:
raise NotImplementedError(
'Relative position bias support not yet implemented for Perceiver AR.'
)
self.layer_norm = self.layer_norm_factory()
self.dropout = self.dropout_factory()
if not isinstance(self.self_attention,
dense_attention.MultiQueryDotProductAttention):
raise TypeError('ParallelFusedDecoderLayer requires Multiquery '
'attention.')
num_heads = self.self_attention.num_heads
if self.self_attention.head_dim is not None:
head_dim = self.self_attention.head_dim
else:
head_dim = self.self_attention.qkv_features // num_heads
if self.self_attention.out_features is None:
raise ValueError('ParallelFusedDecoderLayer requires self-attention'
'with manually specified out_features.')
embed_dim = self.self_attention.out_features
n_activations = len(self.mlp.activations)
mlp_intermediate_dim = self.mlp.intermediate_dim
if mlp_intermediate_dim % num_heads != 0:
raise ValueError('num_heads must divide mlp intermediate dimension')
fused_out_dims = (num_heads,
(mlp_intermediate_dim // num_heads) * n_activations +
head_dim)
# TODO: move the AQT branching code complexity out to the
# configuration system here and other places in Flaxformer.
def make_dense(
axis,
features,
use_bias,
dtype,
kernel_init,
bias_init,
reshape_kernel,
kernel_axis_names,
name,
):
if self.use_aqt:
if self.weight_params is None and self.act_params is None:
raise ValueError(
'If use_aqt is True, either of weights or acts quantization need '
'to be specified using arguments `weight_params` or `act_params`.'
)
aqt_context = aqt_config.DynamicContext(
update_bounds=False, collect_acts_stats=False)
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = self.weight_params.half_shift if self.weight_params else False
aqt_hparams = aqt_flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=self.act_params, # currently supports fixed bounds only.
quant_type=aqt.QuantType.AQT,
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
if kernel_axis_names == ('heads', 'o_wo_fused', 'embed'):
assert axis == (-2, -1)
kernel_axis_names = ('joined_o_wo_fused', 'embed')
aqt_dense = aqt_flax_layers.DenseAqt(
features=features,
hparams=aqt_hparams,
train=self.is_quant_finetune_mode,
dynamic_context=aqt_context,
paxis_name=None,
# No "cross-replica" reduction expressed in the XLA graph at this
# stage. Will be imposed later, automatically, by XLA SPMD.
use_bias=use_bias,
kernel_init=kernel_init,
bias_init=bias_init,
dtype=dtype,
name=name,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
kernel_axis_names=kernel_axis_names)
# we do not have reshape kernel option here but we explicitly
# reshape kernel.
return functools.partial(aqt_dense, padding_mask=None)
else:
return dense.DenseGeneral(
axis=axis,
features=features,
use_bias=use_bias,
dtype=dtype,
kernel_init=kernel_init,
bias_init=bias_init,
reshape_kernel=reshape_kernel,
name=name,
kernel_axis_names=kernel_axis_names)
self.make_dense = make_dense
self.q_wi_fused_args = dict(
axis=-1,
features=fused_out_dims,
use_bias=self.self_attention.use_bias,
dtype=self.self_attention.dtype,
kernel_init=self.self_attention.kernel_init,
bias_init=self.self_attention.bias_init,
reshape_kernel=False,
name='q_wi_fused',
kernel_axis_names=('embed', 'heads', 'q_wi_fused'))
self.kv_fused_args = dict(
axis=-1,
features=(1, 2 * head_dim),
use_bias=self.self_attention.use_bias,
dtype=self.self_attention.dtype,
kernel_init=self.self_attention.kernel_init,
bias_init=self.self_attention.bias_init,
reshape_kernel=False,
name='kv_fused',
kernel_axis_names=('embed', 'multiquery_heads', 'kv_fused'))
self.o_wo_fused_args = dict(
axis=(-2, -1),
features=embed_dim,
use_bias=self.self_attention.use_bias,
dtype=self.self_attention.dtype,
kernel_init=self.self_attention.kernel_init,
bias_init=self.self_attention.bias_init,
reshape_kernel=False,
name='o_wo_fused',
# o_wo_fused = mlp//heads + head_dim
kernel_axis_names=('heads', 'o_wo_fused', 'embed'))
@nn.compact
def __call__(self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
num_latents: Optional[int] = None,
sequence_lengths: Optional[Array] = None) -> Array:
"""Applies ParallelFusedDecoder1DBlock module.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: required to be None, block is Decoder only, only kept for
__call__ signature uniformity.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: required to be None, block is Decoder only, only
kept for __call__ signature uniformity.
logit_mask: a mask (e.g., padding logit mask) to be applied to the
attention logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
num_latents: Used to override the number of output Perceiver AR latents
during decoding.
sequence_lengths: Lengths of all target sequences. Required for Perceiver
AR operation.
Returns:
output after transformer encoder-decoder block.
"""
if num_latents and num_latents > self.num_latents:
raise ValueError(
f'Overridden num_latents ({num_latents}) must be <= self.num_latents '
f'({self.num_latents}).')
num_latents = num_latents or self.num_latents
assert encoded is None, 'only pure decoder layer supported.'
assert encoder_decoder_mask is None, 'only pure decoder layer supported.'
layer_input = targets
del targets
# Shared relative position embedding attention biases.
if self.relpos_bias:
if decode and max_decode_length:
decoder_bias = self.relpos_bias(max_decode_length, max_decode_length,
False)
else:
decoder_bias = self.relpos_bias(layer_input.shape[-2],
layer_input.shape[-2], False)
else:
decoder_bias = None
# Decoder block.
assert layer_input.ndim == 3
layer_input = activation_partitioning.with_sharding_migration(
layer_input,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if prefill and prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(
decoder_mask[:, 0, 0, :], axis=-1).astype(jnp.int32)
x = self.layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
num_heads = self.self_attention.num_heads
if self.self_attention.head_dim is not None:
head_dim = self.self_attention.head_dim
else:
head_dim = self.self_attention.qkv_features // num_heads
n_activations = len(self.mlp.activations)
mlp_intermediate_dim = self.mlp.intermediate_dim
layer_input_residual, x_queries, query_position_offset, logit_mask_queries = (
perceiver_ar_architecture.create_residuals_and_queries(
layer_input,
x,
logit_mask,
num_latents=num_latents,
sequence_lengths=sequence_lengths))
del logit_mask_queries
# Use local fused Q + W_i to calculate fused results.
# [batch, length, embed], [heads, mlp//heads * n_act + head_dim] ->
# [batch, length, heads, mlp//heads * n_act + head_dim]
q_wi = self.make_dense(**self.q_wi_fused_args)(x_queries)
# Slice out query.
query = lax.dynamic_slice_in_dim(q_wi, 0, head_dim, -1)
# Slice out MLP inputs.
int_size = mlp_intermediate_dim // num_heads
# wi[i]: [batch, length, heads, mlp//heads]
wi = [
lax.dynamic_slice_in_dim(q_wi, head_dim + i * int_size, int_size, -1)
for i in range(n_activations)
]
# Use local fused K + V to calculate fused results.
kv = self.make_dense(**self.kv_fused_args)(x)
kv = activation_partitioning.with_sharding(kv, 1)
# Slice out key.
key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, head_dim, -1), -2)
# Slice out value.
value = jnp.squeeze(
lax.dynamic_slice_in_dim(kv, head_dim, head_dim, -1), -2)
precomputed_qkv = (query, key, value)
# y_att: [batch, length, heads, head_dim]
y_att = self.self_attention(
x_queries,
x,
mask=decoder_mask,
bias=decoder_bias,
precomputed_qkv=precomputed_qkv,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
query_position_offset=query_position_offset)
# y_mlp: [batch, length, heads, mlp//heads]
y_mlp = self.mlp(
wi,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout)
# y_fused: [batch, length, heads, mlp//heads + head_dim]
y_fused = jnp.concatenate([y_att, y_mlp], axis=-1)
if self.use_aqt and self.weight_params is not None:
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = self.weight_params.half_shift if self.weight_params else False
aqt_hparams = aqt_flax_layers.DenseGeneralAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=None, # currently supports fixed bounds only.
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
y_out = aqt_flax_layers.DenseGeneralAqt(
**self.o_wo_fused_args,
hparams=aqt_hparams,
train=self.is_quant_finetune_mode,
possibly_use_quantized_vars=self.possibly_use_quantized_vars)(
y_fused)
else:
y_out = dense.DenseGeneral(**self.o_wo_fused_args)(y_fused)
# y *= 2**-0.5
z = layer_input_residual + self.dropout(
y_out, deterministic=not enable_dropout)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'activations', z)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
# TODO: automate this detail.
if self.scanned:
return z, None # pytype: disable=bad-return-type # jax-ndarray
else:
return z
| 17,040 | 40.563415 | 104 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.