id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
184,947 | import math
import sys
from typing import Iterable
import torch
import torch.nn as nn
import utils
def train_one_epoch(model: torch.nn.Module, d_vae: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
log_writer=None, lr_scheduler=None, start_steps=None,
lr_schedule_values=None, wd_schedule_values=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for step, (batch, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# assign learning rate & weight decay for each step
it = start_steps + step # global training iteration
if lr_schedule_values is not None or wd_schedule_values is not None:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples, images, bool_masked_pos = batch
images = images.to(device, non_blocking=True)
samples = samples.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(device, non_blocking=True)
with torch.no_grad():
input_ids = d_vae.get_codebook_indices(images).flatten(1)
bool_masked_pos = bool_masked_pos.flatten(1).to(torch.bool)
labels = input_ids[bool_masked_pos]
with torch.cuda.amp.autocast():
outputs = model(samples, bool_masked_pos=bool_masked_pos, return_all_tokens=False)
loss = nn.CrossEntropyLoss()(input=outputs, target=labels)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
mlm_acc = (outputs.max(-1)[1] == labels).float().mean().item()
metric_logger.update(mlm_acc=mlm_acc)
if log_writer is not None:
log_writer.update(mlm_acc=mlm_acc, head="loss")
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if lr_scheduler is not None:
lr_scheduler.step_update(start_steps + step)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()} | null |
184,948 | from math import sqrt
import os
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dall_e import load_model
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs | null |
184,949 | from math import sqrt
import os
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
from dall_e import load_model
def default(val, d):
return val if exists(val) else d | null |
184,950 | from math import sqrt
import os
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dall_e import load_model
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner | null |
184,951 | import os
import argparse
import json
from pathlib import Path
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
import utils
import modeling_finetune
from timm.models import create_model
def load_model(model, checkpoint_file, model_key, model_prefix):
if checkpoint_file.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
checkpoint_file, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(checkpoint_file, map_location='cpu')
checkpoint_model = None
for model_key in model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
utils.load_state_dict(model, checkpoint_model, prefix=model_prefix)
def train(model, linear_classifier, optimizer, loader, epoch, avgpool, amp_forward):
linear_classifier.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
assert avgpool
for (inp, target) in metric_logger.log_every(loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if amp_forward:
with torch.cuda.amp.autocast():
intermediate_output = model.get_intermediate_layers(inp)
else:
intermediate_output = model.get_intermediate_layers(inp)
output = []
for each_layer in intermediate_output:
cls_rep = each_layer[:, 0]
mean_rep = torch.mean(each_layer[:, 1:], dim=1)
output.append(torch.cat((cls_rep, mean_rep), dim=-1).float())
output = linear_classifier(output)
# compute cross entropy loss
loss = 0
for each_output in output:
loss += nn.CrossEntropyLoss()(each_output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# log
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def validate_network(val_loader, model, linear_classifier, avgpool, amp_forward):
linear_classifier.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
assert avgpool
module = linear_classifier.module if hasattr(linear_classifier, 'module') else linear_classifier
for inp, target in metric_logger.log_every(val_loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if amp_forward:
with torch.cuda.amp.autocast():
intermediate_output = model.get_intermediate_layers(inp)
else:
intermediate_output = model.get_intermediate_layers(inp)
output = []
for each_layer in intermediate_output:
cls_rep = each_layer[:, 0]
mean_rep = torch.mean(each_layer[:, 1:], dim=1)
output.append(torch.cat((cls_rep, mean_rep), dim=-1).float())
all_output = linear_classifier(output)
for i, output in enumerate(all_output):
loss = nn.CrossEntropyLoss()(output, target)
if module.num_labels >= 5:
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
else:
acc1, = utils.accuracy(output, target, topk=(1,))
batch_size = inp.shape[0]
post_str = '_layer%d' % i
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1' + post_str].update(acc1.item(), n=batch_size)
if module.num_labels >= 5:
metric_logger.meters['acc5' + post_str].update(acc5.item(), n=batch_size)
eval_results = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
updated_results = {}
for key in eval_results:
if '_' in key:
this_key, classifier_idx = key.split('_')
if classifier_idx not in updated_results:
updated_results[classifier_idx] = {}
updated_results[classifier_idx][this_key] = eval_results[key]
print("Eval result = %s" % json.dumps(updated_results, indent=2))
return updated_results
class LinearClassifier(nn.Module):
"""Linear layer to train on top of frozen features"""
def __init__(self, num_layers, dim, num_labels=1000):
super(LinearClassifier, self).__init__()
self.num_labels = num_labels
self.linear = nn.ModuleList()
self.num_classifier = num_layers
for i in range(self.num_classifier):
linear = nn.Linear(dim, num_labels)
linear.weight.data.normal_(mean=0.0, std=0.01)
linear.bias.data.zero_()
self.linear.append(linear)
def forward(self, x_list):
results = []
for i, linear in enumerate(self.linear):
results.append(linear(x_list[i]))
return results
def eval_linear(args):
utils.init_distributed_mode(args)
# print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
mean = (0.485, 0.456, 0.406) if args.imagenet_default_mean_and_std else (0.5, 0.5, 0.5)
std = (0.229, 0.224, 0.225) if args.imagenet_default_mean_and_std else (0.5, 0.5, 0.5)
# ============ preparing data ... ============
train_transform = pth_transforms.Compose([
pth_transforms.RandomResizedCrop(224),
pth_transforms.RandomHorizontalFlip(),
pth_transforms.ToTensor(),
pth_transforms.Normalize(mean, std),
])
val_transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize(mean, std),
])
print("train_transform = %s" % str(train_transform))
print("val_transform = %s" % str(val_transform))
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform)
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform)
global_rank = utils.get_rank()
world_size = utils.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset_train, num_replicas=world_size, rank=global_rank, shuffle=True)
train_loader = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# ============ building network ... ============
model = create_model(
args.model, pretrained=False, num_classes=0, drop_rate=0, drop_path_rate=0,
attn_drop_rate=0, drop_block_rate=None, use_mean_pooling=False,
use_shared_rel_pos_bias=args.rel_pos_bias, use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
)
model.cuda()
model.eval()
print(f"Model {args.model} built.")
# load weights to evaluate
load_model(model=model, checkpoint_file=args.pretrained_weights, model_key=args.checkpoint_key, model_prefix="")
linear_classifier = LinearClassifier(
dim=model.embed_dim * (1 + int(args.avgpool_patchtokens)),
num_labels=args.num_labels, num_layers=model.get_num_layers())
linear_classifier = linear_classifier.cuda()
if world_size > 1:
linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu])
print("Model = %s" % str(linear_classifier))
# set optimizer
learning_rate = args.lr or args.base_lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256
# use absolute or linear scaled learning rate
if args.optimizer.lower() == "sgd":
optimizer = torch.optim.SGD(
linear_classifier.parameters(), learning_rate, momentum=0.9,
weight_decay=0, # we do not apply weight decay
)
else:
optimizer = torch.optim.AdamW(
linear_classifier.parameters(), learning_rate, weight_decay=1e-4,
)
print(f"Optimizer = %s" % str(optimizer))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": 0.}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=linear_classifier,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
for epoch in range(start_epoch, args.epochs):
train_loader.sampler.set_epoch(epoch)
train_stats = train(
model, linear_classifier, optimizer, train_loader, epoch, args.avgpool_patchtokens, args.amp_forward)
scheduler.step()
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if epoch % args.val_freq == 0 or epoch == args.epochs - 1:
test_stats = validate_network(
val_loader, model, linear_classifier, args.avgpool_patchtokens, args.amp_forward)
for classifier_key in test_stats:
classifier = test_stats[classifier_key]
print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {classifier['acc1']:.1f}%")
best_acc = max(best_acc, classifier["acc1"])
print(f'Max accuracy so far: {best_acc:.2f}%')
log_stats = {**{k: v for k, v in log_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()}}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
save_dict = {
"epoch": epoch + 1,
"state_dict": linear_classifier.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar"))
print("Training of the supervised linear classifier on frozen features completed.\n"
"Top-1 test accuracy: {acc:.1f}".format(acc=best_acc)) | null |
184,952 | import os
import argparse
import json
from pathlib import Path
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
import utils
import modeling_finetune
from timm.models import create_model
The provided code snippet includes necessary dependencies for implementing the `bool_flag` function. Write a Python function `def bool_flag(s)` to solve the following problem:
Parse boolean arguments from the command line.
Here is the function:
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag") | Parse boolean arguments from the command line. |
184,953 | import os
import copy
import pytorch_lightning as pl
from vlmo.config import ex
from vlmo.modules import VLMo
from vlmo.datamodules.multitask_datamodule import MTDataModule
from pytorch_lightning.plugins import environments as pl_env
from pytorch_lightning.utilities.distributed import rank_zero_info
class OMPIClusterEnvironment(pl_env.ClusterEnvironment):
def __init__(self):
super().__init__()
# def creates_children(self) -> bool:
# # return True if the cluster is managed (you don't launch processes yourself)
# assert (
# "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ
# ) # this cluster is managed
# return True
def creates_processes_externally(self):
return True
def world_size(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_SIZE"])
def set_world_size(self, size: int):
pass
def global_rank(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_RANK"])
def set_global_rank(self, rank: int):
pass
def local_rank(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
def node_rank(self) -> int:
if "NODE_RANK" in os.environ:
return int(os.environ["NODE_RANK"])
else:
return 0
def master_address(self) -> str:
return os.environ["MASTER_ADDR"]
def master_port(self) -> int:
return int(os.environ["MASTER_PORT"])
def get_cluster_plugin(num_gpus=1, num_nodes=1):
if num_nodes > 1 or (
num_nodes == 1 and "OMPI_COMM_WORLD_SIZE" in os.environ
):
rank_zero_info("ClusterPlugin: using OMPI Cluster Environment")
return OMPIClusterEnvironment()
if num_gpus >= 1:
rank_zero_info("ClusterPlugin: using Lightning Cluster Environment")
return pl_env.LightningEnvironment()
return None | null |
184,954 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import numpy as np
import vlmo.modules.multiway_transformer
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from vlmo.modules import heads, objectives, vlmo_utils
from pytorch_lightning.utilities.distributed import rank_zero_info
from scipy import interpolate
from timm.models import create_model
def convert_to_textpt_ckpt(state_dict, module):
new_state_dict = {}
# Merge relative_position_bias_table from all layer into one tensor,
# so we can use one op for gather the relative position bias for speed up
relative_position_bias_tables = {}
for key in state_dict:
value = state_dict[key]
if "relative_position_bias_table" in key:
# transformer.blocks.0.attn.relative_position_bias_table
layer_idx = int(key.split(".attn.")[0].split('.')[-1])
relative_position_bias_tables[layer_idx] = value
continue
if "mlp" in key:
key_imag = "transformer." + key.replace("mlp", "mlp_imag")
new_state_dict[key_imag] = value
elif "norm2" in key:
key_imag = "transformer." + key.replace("norm2", "norm2_imag")
new_state_dict[key_imag] = value
else:
new_key = "transformer." + key
new_state_dict[new_key] = value
if len(relative_position_bias_tables) > 0:
tensor_list = []
for layer_idx in sorted(relative_position_bias_tables.keys()):
tensor_list.append(relative_position_bias_tables[layer_idx])
relative_position_bias_table = torch.cat(tensor_list, dim=1)
num_distence, _ = relative_position_bias_table.shape
all_relative_position_bias_table = module.relative_position_bias_table.data.clone()
all_relative_position_bias_table[:num_distence, :] = relative_position_bias_table
new_state_dict["relative_position_bias_table"] = all_relative_position_bias_table
return new_state_dict | null |
184,955 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import numpy as np
import vlmo.modules.multiway_transformer
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from vlmo.modules import heads, objectives, vlmo_utils
from pytorch_lightning.utilities.distributed import rank_zero_info
from scipy import interpolate
from timm.models import create_model
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
return pos_embed_checkpoint | null |
184,956 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import numpy as np
import vlmo.modules.multiway_transformer
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from vlmo.modules import heads, objectives, vlmo_utils
from pytorch_lightning.utilities.distributed import rank_zero_info
from scipy import interpolate
from timm.models import create_model
def convert_deepspeed_ckpt(state_dict):
new_state_dict = {}
for key in state_dict:
if key.startswith("module."):
new_key = key[len("module."):]
value = state_dict[key]
new_state_dict[new_key] = value
else:
new_state_dict[key] = state_dict[key]
return new_state_dict | null |
184,957 | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from pytorch_lightning.utilities.distributed import rank_zero_info
class MultiWayTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
need_relative_position_embed=True,
use_abs_pos_emb=False,
layer_scale_init_values=0.1,
vlffn_start_layer_index=10,
config=None,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
need_relative_position_embed (bool): enable relative position bias on self-attention
use_abs_pos_emb (bool): enable abs pos emb
layer_scale_init_values (float or None): layer scale init values, set None to disable
vlffn_start_layer_index (int): vl-ffn start index
config: (dict): other hyper from pytorch-lighting
"""
super().__init__()
drop_path_rate = drop_path_rate if config is None else config["drop_path_rate"]
rank_zero_info("drop path rate: {}".format(drop_path_rate))
self.use_abs_pos_emb = use_abs_pos_emb
self.need_relative_position_embed = need_relative_position_embed
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
self.num_heads = num_heads
self.vlffn_start_layer_index = vlffn_start_layer_index
if config["loss_names"]["textmlm"] > 0:
self.vlffn_start_layer_index = depth
rank_zero_info("Set vlffn_start_layer_index={} for text-only pretraining".format(self.vlffn_start_layer_index))
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if self.use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
with_vlffn=(i >= self.vlffn_start_layer_index),
layer_scale_init_values=layer_scale_init_values,
max_text_len=config["max_text_len"],
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def visual_embed(self, _x):
x = self.patch_embed(_x)
x = x.flatten(2).transpose(1, 2)
B, L, _ = x.shape
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
x_mask = torch.ones(x.shape[0], x.shape[1])
return x, x_mask
def vlmo_base_patch16(pretrained=False, **kwargs):
img_size = kwargs.pop("img_size", 224)
model = MultiWayTransformer(
img_size=img_size, patch_size=16, embed_dim=768, depth=12, num_heads=12,
mlp_ratio=4, qkv_bias=True, vlffn_start_layer_index=10,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
184,958 | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from pytorch_lightning.utilities.distributed import rank_zero_info
class MultiWayTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
need_relative_position_embed=True,
use_abs_pos_emb=False,
layer_scale_init_values=0.1,
vlffn_start_layer_index=10,
config=None,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
need_relative_position_embed (bool): enable relative position bias on self-attention
use_abs_pos_emb (bool): enable abs pos emb
layer_scale_init_values (float or None): layer scale init values, set None to disable
vlffn_start_layer_index (int): vl-ffn start index
config: (dict): other hyper from pytorch-lighting
"""
super().__init__()
drop_path_rate = drop_path_rate if config is None else config["drop_path_rate"]
rank_zero_info("drop path rate: {}".format(drop_path_rate))
self.use_abs_pos_emb = use_abs_pos_emb
self.need_relative_position_embed = need_relative_position_embed
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
self.num_heads = num_heads
self.vlffn_start_layer_index = vlffn_start_layer_index
if config["loss_names"]["textmlm"] > 0:
self.vlffn_start_layer_index = depth
rank_zero_info("Set vlffn_start_layer_index={} for text-only pretraining".format(self.vlffn_start_layer_index))
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if self.use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
with_vlffn=(i >= self.vlffn_start_layer_index),
layer_scale_init_values=layer_scale_init_values,
max_text_len=config["max_text_len"],
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def visual_embed(self, _x):
x = self.patch_embed(_x)
x = x.flatten(2).transpose(1, 2)
B, L, _ = x.shape
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
x_mask = torch.ones(x.shape[0], x.shape[1])
return x, x_mask
def vlmo_large_patch16(pretrained=False, **kwargs):
img_size = kwargs.pop("img_size", 224)
model = MultiWayTransformer(
img_size=img_size, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
mlp_ratio=4, qkv_bias=True, vlffn_start_layer_index=21,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
184,959 | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from pytorch_lightning.utilities.distributed import rank_zero_info
class MultiWayTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
need_relative_position_embed=True,
use_abs_pos_emb=False,
layer_scale_init_values=0.1,
vlffn_start_layer_index=10,
config=None,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
need_relative_position_embed (bool): enable relative position bias on self-attention
use_abs_pos_emb (bool): enable abs pos emb
layer_scale_init_values (float or None): layer scale init values, set None to disable
vlffn_start_layer_index (int): vl-ffn start index
config: (dict): other hyper from pytorch-lighting
"""
super().__init__()
drop_path_rate = drop_path_rate if config is None else config["drop_path_rate"]
rank_zero_info("drop path rate: {}".format(drop_path_rate))
self.use_abs_pos_emb = use_abs_pos_emb
self.need_relative_position_embed = need_relative_position_embed
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
self.num_heads = num_heads
self.vlffn_start_layer_index = vlffn_start_layer_index
if config["loss_names"]["textmlm"] > 0:
self.vlffn_start_layer_index = depth
rank_zero_info("Set vlffn_start_layer_index={} for text-only pretraining".format(self.vlffn_start_layer_index))
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if self.use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
with_vlffn=(i >= self.vlffn_start_layer_index),
layer_scale_init_values=layer_scale_init_values,
max_text_len=config["max_text_len"],
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def visual_embed(self, _x):
x = self.patch_embed(_x)
x = x.flatten(2).transpose(1, 2)
B, L, _ = x.shape
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
x_mask = torch.ones(x.shape[0], x.shape[1])
return x, x_mask
def vlmo_base_plus_patch16(pretrained=False, **kwargs):
img_size = kwargs.pop("img_size", 224)
model = MultiWayTransformer(
img_size=img_size, patch_size=16, embed_dim=544, depth=24, num_heads=16,
mlp_ratio=4, qkv_bias=True, vlffn_start_layer_index=21,
use_abs_pos_emb=True, need_relative_position_embed=False,
layer_scale_init_values=None, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
184,960 | import torch
import random
import json
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from vlmo.modules.dist_utils import all_gather
from vlmo.modules.objectives import compute_irtr_recall, compute_irtr_recall_with_rerank
from vlmo.gadgets.my_metrics import Accuracy, VQAScore, Scalar
from pytorch_lightning.utilities.distributed import rank_zero_info
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_{k}_i2t_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_t2i_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_logit_scale", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itc":
setattr(pl_module, f"{split}_{k}_i2t_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_t2i_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_logit_scale", Scalar())
setattr(pl_module, f"{split}_{k}_vl_i2t_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_vl_t2i_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_vl_logit_scale", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar()) | null |
184,961 | import torch
import random
import json
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from vlmo.modules.dist_utils import all_gather
from vlmo.modules.objectives import compute_irtr_recall, compute_irtr_recall_with_rerank
from vlmo.gadgets.my_metrics import Accuracy, VQAScore, Scalar
from pytorch_lightning.utilities.distributed import rank_zero_info
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(val_ir_r1, val_ir_r5, val_ir_r10, val_tr_r1, val_tr_r5, val_tr_r10) = compute_irtr_recall(pl_module, split="val")
val_avg = (val_ir_r1.item() + val_ir_r5.item() + val_ir_r10.item() + val_tr_r1.item() + val_tr_r5.item() + val_tr_r10.item()) / 6.0
pl_module.logger.experiment.add_scalar(
"recalls/val_avg", val_avg, pl_module.global_step
)
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module, split="test")
test_avg = (ir_r1.item() + ir_r5.item() + ir_r10.item() + tr_r1.item() + tr_r5.item() + tr_r10.item()) / 6.0
pl_module.logger.experiment.add_scalar(
"recalls/test_avg", test_avg, pl_module.global_step
)
print("val_avg:{}, test_avg:{}".format(val_avg, test_avg))
print("test ir_r1:{}, ir_r5:{}, ir_r10:{}, tr_r1:{}, tr_r5:{}, tr_r10:{}".format(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10))
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += val_avg
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "nlvr2":
if phase == "train":
value = getattr(pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value_dev = getattr(pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value_dev)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
value_test = getattr(pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value_test)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
value = value_dev
elif loss_name == "irtr":
value_i2t = getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/i2t_accuracy_epoch", value_i2t)
getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").reset()
value_t2i = getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/t2i_accuracy_epoch", value_t2i)
getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").reset()
value = value_i2t + value_t2i
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itc":
value_i2t = getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/i2t_accuracy_epoch", value_i2t)
getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").reset()
value_t2i = getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/t2i_accuracy_epoch", value_t2i)
getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
value_vl_i2t = getattr(pl_module, f"{phase}_{loss_name}_vl_i2t_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/vl_i2t_accuracy_epoch", value_vl_i2t)
getattr(pl_module, f"{phase}_{loss_name}_vl_i2t_accuracy").reset()
value_vl_t2i = getattr(pl_module, f"{phase}_{loss_name}_vl_t2i_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/vl_t2i_accuracy_epoch", value_vl_t2i)
getattr(pl_module, f"{phase}_{loss_name}_vl_t2i_accuracy").reset()
value = value_i2t + value_t2i
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric) | null |
184,962 | import torch
import random
import json
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from vlmo.modules.dist_utils import all_gather
from vlmo.modules.objectives import compute_irtr_recall, compute_irtr_recall_with_rerank
from vlmo.gadgets.my_metrics import Accuracy, VQAScore, Scalar
from pytorch_lightning.utilities.distributed import rank_zero_info
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item() | null |
184,963 | import torch
import random
import json
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from vlmo.modules.dist_utils import all_gather
from vlmo.modules.objectives import compute_irtr_recall, compute_irtr_recall_with_rerank
from vlmo.gadgets.my_metrics import Accuracy, VQAScore, Scalar
from pytorch_lightning.utilities.distributed import rank_zero_info
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v >= 1
]
return | null |
184,964 | import torch
import random
import json
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from vlmo.modules.dist_utils import all_gather
from vlmo.modules.objectives import compute_irtr_recall, compute_irtr_recall_with_rerank
from vlmo.gadgets.my_metrics import Accuracy, VQAScore, Scalar
from pytorch_lightning.utilities.distributed import rank_zero_info
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(-group["lr"] * group["weight_decay"], p.data)
return loss
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
]
head_names = ["vqa_classifier", "nlvr2_classifier"]
lr_mult = pl_module.hparams.config["lr_mult"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
names = [n for n, p in pl_module.named_parameters()]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr * lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult,
},
]
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None or pl_module.trainer.max_steps==-1:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
rank_zero_info("Warmup_steps:{} \t Max_steps:{}".format(warmup_steps, max_steps))
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
) | null |
184,972 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_mlm(pl_module, batch):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss * 0.25,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret | null |
184,973 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_textonly_mlm(pl_module, batch):
infer = pl_module.infer_text_mlm(batch, mask_text=True)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_textmlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_textmlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"textmlm/{phase}/loss", loss)
pl_module.log(f"textmlm/{phase}/accuracy", acc)
return ret | null |
184,974 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_itm_hardneg(pl_module, batch, sim_i2t, sim_t2i):
pos_len = batch["text_ids"].size(0)
neg_len = batch["text_ids"].size(0)
bsz = batch["text_ids"].size(0)
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len), torch.zeros(neg_len)]).to(
pl_module.device
)
batch = {k: v for k, v in batch.items()}
infer_pos = pl_module.infer(batch, mask_text=False, mask_image=False)
batch_text_ids = infer_pos["text_ids"]
batch_text_masks = infer_pos["text_masks"]
batch_image = infer_pos["image"]
with torch.no_grad():
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more hard negative candidates.
gathered_text_ids = [
torch.zeros_like(batch_text_ids) for _ in range(world_size)
]
gathered_text_masks = [
torch.zeros_like(batch_text_masks) for _ in range(world_size)
]
gathered_image = [
torch.zeros_like(batch_image) for _ in range(world_size)
]
dist.all_gather(gathered_text_ids, batch_text_ids)
dist.all_gather(gathered_text_masks, batch_text_masks)
dist.all_gather(gathered_image, batch_image)
all_text_ids = torch.cat(
[batch_text_ids]
+ gathered_text_ids[:rank]
+ gathered_text_ids[rank + 1 :]
)
all_text_masks = torch.cat(
[batch_text_masks]
+ gathered_text_masks[:rank]
+ gathered_text_masks[rank + 1 :]
)
all_image = torch.cat(
[batch_image]
+ gathered_image[:rank]
+ gathered_image[rank + 1 :]
)
with torch.no_grad():
weights_i2t = F.softmax(sim_i2t[:bsz, :].float(), dim=1)
weights_t2i = F.softmax(sim_t2i[:bsz, :].float(), dim=1)
weights_i2t.fill_diagonal_(0)
weights_t2i.fill_diagonal_(0)
images_neg = []
for b in range(bsz):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
images_neg.append(all_image[neg_idx])
images_neg = torch.stack(images_neg, dim=0)
# select a negative text for each image
text_ids_neg = []
text_masks_neg = []
for b in range(bsz):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_ids_neg.append(all_text_ids[neg_idx])
text_masks_neg.append(all_text_masks[neg_idx])
text_ids_neg = torch.stack(text_ids_neg, dim=0)
text_masks_neg = torch.stack(text_masks_neg, dim=0)
# text_labels is not used in ITM loss
batch_imgs_neg = {"image":[images_neg], "text_ids":batch["text_ids"], "text_labels":batch["text_labels"], "text_masks":batch["text_masks"]}
infer_imags_neg = pl_module.infer(batch_imgs_neg, mask_text=False, mask_image=False)
batch_text_neg = {"image":batch["image"], "text_ids":text_ids_neg, "text_labels":batch["text_labels"], "text_masks":text_masks_neg}
infer_text_neg = pl_module.infer(batch_text_neg, mask_text=False, mask_image=False)
all_cls_feats = torch.cat([infer_pos["cls_feats"], infer_imags_neg["cls_feats"], infer_text_neg["cls_feats"]], dim=0)
itm_logits = pl_module.itm_score(all_cls_feats)
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret | null |
184,975 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_itc(pl_module, batch, aggregate=True):
# pl_module.logit_scale.data = torch.clamp(pl_module.logit_scale.data, 0, 4.6052)
infer_imag = pl_module.infer_image(batch, mask_image=False)
infer_text = pl_module.infer_text(batch, mask_text=False)
image_features = infer_imag["cls_feats"]
text_features = infer_text["cls_feats"]
logit_scale = pl_module.logit_scale.exp().mean()
image_vlffn_features = infer_imag["cls_vlffn_feats"]
text_vlffn_features = infer_text["cls_vlffn_feats"]
logit_vl_scale = pl_module.logit_vl_scale.exp().mean()
if aggregate:
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more negatives to contrast with.
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
all_image_features = torch.cat(
[image_features]
+ gathered_image_features[:rank]
+ gathered_image_features[rank + 1 :]
)
all_text_features = torch.cat(
[text_features]
+ gathered_text_features[:rank]
+ gathered_text_features[rank + 1 :]
)
# this is needed to send gradients back everywhere.
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
gathered_image_vlffn_features = [
torch.zeros_like(image_vlffn_features) for _ in range(world_size)
]
gathered_text_vlffn_features = [
torch.zeros_like(text_vlffn_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_vlffn_features, image_vlffn_features)
dist.all_gather(gathered_text_vlffn_features, text_vlffn_features)
all_image_vlffn_features = torch.cat(
[image_vlffn_features]
+ gathered_image_vlffn_features[:rank]
+ gathered_image_vlffn_features[rank + 1 :]
)
all_text_vlffn_features = torch.cat(
[text_vlffn_features]
+ gathered_text_vlffn_features[:rank]
+ gathered_text_vlffn_features[rank + 1 :]
)
# this is needed to send gradients back everywhere.
logits_per_vlffn_image = logit_vl_scale * all_image_vlffn_features @ all_text_vlffn_features.t()
logits_per_vlffn_text = logits_per_vlffn_image.t()
else:
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
ground_truth = torch.arange(len(logits_per_image)).long().to(device=logits_per_image.get_device())
itc_loss = (
F.cross_entropy(logits_per_image.float(), ground_truth)
+ F.cross_entropy(logits_per_text.float(), ground_truth)
) / 2
itc_vlffn_loss = (
F.cross_entropy(logits_per_vlffn_image.float(), ground_truth)
+ F.cross_entropy(logits_per_vlffn_text.float(), ground_truth)
) / 2
itc_total_loss = (itc_loss + itc_vlffn_loss) * 0.5
ret = {
"itc_loss": itc_total_loss,
"itc_i2t_logits": logits_per_image,
"itc_t2i_logits": logits_per_text,
"itc_labels": ground_truth,
"itc_logit_scale": logit_scale,
"itc_logit_vl_scale": logit_vl_scale,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itc_loss")(ret["itc_loss"])
scale = getattr(pl_module, f"{phase}_itc_logit_scale")(ret["itc_logit_scale"])
i2t_acc = getattr(pl_module, f"{phase}_itc_i2t_accuracy")(
ret["itc_i2t_logits"], ret["itc_labels"]
)
t2i_acc = getattr(pl_module, f"{phase}_itc_t2i_accuracy")(
ret["itc_t2i_logits"], ret["itc_labels"]
)
pl_module.log(f"itc/{phase}/loss", loss)
pl_module.log(f"itc/{phase}/logit_scale", scale)
pl_module.log(f"itc/{phase}/i2t_accuracy", i2t_acc)
pl_module.log(f"itc/{phase}/t2i_accuracy", t2i_acc)
vl_scale = getattr(pl_module, f"{phase}_itc_vl_logit_scale")(ret["itc_logit_vl_scale"])
vl_i2t_acc = getattr(pl_module, f"{phase}_itc_vl_i2t_accuracy")(
logits_per_vlffn_image, ret["itc_labels"]
)
vl_t2i_acc = getattr(pl_module, f"{phase}_itc_vl_t2i_accuracy")(
logits_per_vlffn_text, ret["itc_labels"]
)
pl_module.log(f"itc/{phase}/vl_logit_scale", vl_scale)
pl_module.log(f"itc/{phase}/vl_i2t_accuracy", vl_i2t_acc)
pl_module.log(f"itc/{phase}/vl_t2i_accuracy", vl_t2i_acc)
return ret | null |
184,976 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_irtr(pl_module, batch, aggregate=True):
# pl_module.logit_scale.data = torch.clamp(pl_module.logit_scale.data, 0, 4.6052)
infer_imag = pl_module.infer_image_ft(batch, mask_image=False)
infer_text = pl_module.infer_text_ft(batch, mask_text=False)
image_features = infer_imag["cls_feats"]
text_features = infer_text["cls_feats"]
logit_scale = pl_module.logit_scale.exp().mean()
if aggregate:
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more negatives to contrast with.
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
all_image_features = torch.cat(
[image_features]
+ gathered_image_features[:rank]
+ gathered_image_features[rank + 1 :]
)
all_text_features = torch.cat(
[text_features]
+ gathered_text_features[:rank]
+ gathered_text_features[rank + 1 :]
)
# this is needed to send gradients back everywhere.
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
else:
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
ground_truth = torch.arange(len(logits_per_image)).long().to(device=logits_per_image.get_device())
irtr_loss = (
F.cross_entropy(logits_per_image.float(), ground_truth)
+ F.cross_entropy(logits_per_text.float(), ground_truth)
) / 2
ret = {
"irtr_loss": irtr_loss,
"irtr_i2t_logits": logits_per_image,
"irtr_t2i_logits": logits_per_text,
"irtr_labels": ground_truth,
"irtr_logit_scale": logit_scale,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
scale = getattr(pl_module, f"{phase}_irtr_logit_scale")(ret["irtr_logit_scale"])
i2t_acc = getattr(pl_module, f"{phase}_irtr_i2t_accuracy")(
ret["irtr_i2t_logits"], ret["irtr_labels"]
)
t2i_acc = getattr(pl_module, f"{phase}_irtr_t2i_accuracy")(
ret["irtr_t2i_logits"], ret["irtr_labels"]
)
pl_module.log(f"irtr/{phase}/loss", loss)
pl_module.log(f"irtr/{phase}/logit_scale", scale)
pl_module.log(f"irtr/{phase}/i2t_accuracy", i2t_acc)
pl_module.log(f"irtr/{phase}/t2i_accuracy", t2i_acc)
return ret | null |
184,977 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret | null |
184,978 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels)
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret | null |
184,979 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_irtr_recall(pl_module, split="test"):
world_size = dist.get_world_size()
rank = dist.get_rank()
if split == "val":
rank_zero_info("Use val set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
else:
rank_zero_info("Use test set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
if split == "val":
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
else:
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
rank_zero_info("len(tiids): {}".format(len(tiids)))
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
image_preload.append(
{
"image": [_b["image"][0].to(pl_module.device)],
"img_index": _b["img_index"],
}
)
iids = list()
for pre in image_preload:
iids += pre["img_index"]
iids = torch.tensor(iids)
rank_zero_info("len(iids): {}".format(len(iids)))
txt_cls_feats = list()
for txt_batch in text_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_text_ft(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
}
)["cls_feats"]
txt_cls_feats.append(cls_feats)
img_cls_feats = list()
for img_batch in image_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_image_ft(
{
"image": img_batch["image"],
}
)["cls_feats"]
img_cls_feats.append(cls_feats)
txt_cls_feats = torch.cat(txt_cls_feats)
img_cls_feats = torch.cat(img_cls_feats)
rank_zero_info("txt_cls_feats.size(): {}\t{}".format(txt_cls_feats.size(), split))
rank_zero_info("img_cls_feats.size(): {}\t{}".format(img_cls_feats.size(), split))
scores = img_cls_feats @ txt_cls_feats.t()
rank_zero_info("scores.size(): {}".format(scores.size(), split))
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices.to(tiids.device)]
topk5_iids = tiids[topk5.indices.to(tiids.device)]
topk1_iids = tiids[topk1.indices.to(tiids.device)]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices.to(iids.device)]
topk5_iids = iids[topk5.indices.to(iids.device)]
topk1_iids = iids[topk1.indices.to(iids.device)]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) | null |
184,980 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_irtr_recall_with_rerank(pl_module, split="test"):
world_size = dist.get_world_size()
rank = dist.get_rank()
if split == "val":
rank_zero_info("Use val set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
else:
rank_zero_info("Use test set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
if split == "val":
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
else:
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
text_ids_list = list()
text_masks_list = list()
text_labels_list = list()
for pre in text_preload:
tiids += pre["img_index"]
text_ids_list.append(pre["text_ids"])
text_masks_list.append(pre["text_masks"])
text_labels_list.append(pre["text_labels"])
tiids = torch.tensor(tiids)
all_text_ids = torch.cat(text_ids_list)
all_text_masks = torch.cat(text_masks_list)
all_text_labels = torch.cat(text_labels_list)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
image_preload.append(
{
"image": [_b["image"][0].to(pl_module.device)],
"img_index": _b["img_index"],
}
)
iids = list()
image_list = list()
for pre in image_preload:
iids += pre["img_index"]
image_list.append(pre["image"][0])
iids = torch.tensor(iids)
all_image = torch.cat(image_list)
txt_cls_feats = list()
for txt_batch in text_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_text_ft(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
}
)["cls_feats"]
txt_cls_feats.append(cls_feats)
img_cls_feats = list()
for img_batch in image_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_image_ft(
{
"image": img_batch["image"],
}
)["cls_feats"]
img_cls_feats.append(cls_feats)
txt_cls_feats = torch.cat(txt_cls_feats)
img_cls_feats = torch.cat(img_cls_feats)
scores = img_cls_feats @ txt_cls_feats.t()
rank_zero_info("scores.size(): {}".format(scores.size(), split))
scores_i2t = torch.full((len(iids), len(tiids)), -100.0).to(pl_module.device)
k_test = pl_module.hparams.config["k_test"]
num_tasks = world_size
step = scores.size(0) // num_tasks + 1
start = rank * step
end = min(scores.size(0), start+step)
for i, sims in enumerate(scores[start:end]):
if i%100 == 0:
rank_zero_info("TR Rerank: {}".format(i))
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
cur_images = all_image[start+i].repeat(k_test, 1, 1, 1)
cur_text_ids = all_text_ids[topk_idx]
cur_text_masks = all_text_masks[topk_idx]
cur_text_labels = all_text_labels[topk_idx]
cur_rerank_batch = {"image":[cur_images], "text_ids":cur_text_ids, "text_labels":cur_text_labels, "text_masks":cur_text_masks}
infer_rerank = pl_module.infer(cur_rerank_batch, mask_text=False, mask_image=False)
itm_logits = pl_module.itm_score(infer_rerank["cls_feats"])
itm_scores = itm_logits[:,1]
scores_i2t[start+i,topk_idx] = itm_scores
scores = scores.t()
scores_t2i = torch.full((len(tiids), len(iids)), -100.0).to(pl_module.device)
step = scores.size(0) // num_tasks + 1
start = rank * step
end = min(scores.size(0), start+step)
for i,sims in enumerate(scores[start:end]):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
cur_images = all_image[topk_idx]
cur_text_ids = all_text_ids[start+i].repeat(k_test, 1)
cur_text_masks = all_text_masks[start+i].repeat(k_test, 1)
cur_text_labels = all_text_labels[start+i].repeat(k_test, 1)
cur_rerank_batch = {"image":[cur_images], "text_ids":cur_text_ids, "text_labels":cur_text_labels, "text_masks":cur_text_masks}
infer_rerank = pl_module.infer(cur_rerank_batch, mask_text=False, mask_image=False)
itm_logits = pl_module.itm_score(infer_rerank["cls_feats"])
itm_scores = itm_logits[:,1]
scores_t2i[start+i, topk_idx] = itm_scores
dist.barrier()
torch.distributed.all_reduce(scores_i2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(scores_t2i, op=torch.distributed.ReduceOp.SUM)
scores_t2i = scores_t2i + scores
scores_i2t = scores_i2t + scores.t()
topk10 = scores_i2t.topk(10, dim=1)
topk5 = scores_i2t.topk(5, dim=1)
topk1 = scores_i2t.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores_t2i.topk(10, dim=1)
topk5 = scores_t2i.topk(5, dim=1)
topk1 = scores_t2i.topk(1, dim=1)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
ir_r5 = (tiids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
ir_r1 = (tiids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) | null |
184,981 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_() | null |
184,982 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def vqa_test_step(pl_module, batch, output):
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds} | null |
184,983 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def vqa_test_wrapup(outs, model_name, output_dir):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
for out in outs:
qids += out["qids"]
preds += out["preds"]
rets = list()
for qid, pred in zip(qids, preds):
rets.append({"question_id": qid, "answer": pred})
with open(f"vqa_submit_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob("vqa_submit_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
# os.makedirs("result", exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
with open(f"{output_dir}/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"vqa_submit_{rank}.json") | null |
184,985 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def config():
exp_name = "vlmo"
seed = 1
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "itc": 1, "mlm": 1})
batch_size = 1024 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller.
# Image setting
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
image_size = 224
draw_false_image = 0
image_only = False
text_only = False
# Text Setting
vqav2_label_size = 3129
max_text_len = 40
max_text_len_of_initckpt = 196
tokenizer = "bert-base-uncased"
vocab_size = 30522
whole_word_masking = False
mlm_prob = 0.15
draw_false_text = 0
# Transformer Setting
model_arch = "vlmo_base_patch16"
drop_path_rate = 0.1
# Optimizer Setting
optim_type = "adamw"
learning_rate = 1e-4
weight_decay = 0.01
decay_power = 1
max_epoch = 100
max_steps = 200000
warmup_steps = 0.1
end_lr = 0
lr_mult = 1 # multiply lr for downstream heads
# Downstream Setting
get_recall_metric = False
get_recall_rerank_metric = False
k_test = 32
# PL Trainer Setting
resume_from = None
fast_dev_run = False
val_check_interval = 1.0
test_only = False
use_sharded_training = False
resume_during_training = False
# below params varies with the environment
data_root = ""
log_dir = "result"
per_gpu_batchsize = 4 # you should define this manually with per_gpu_batch_size=#
num_gpus = 1
num_nodes = 1
load_path = ""
num_workers = 8
precision = 16 | null |
184,986 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_textmlm_base():
exp_name = "textmlm_base"
datasets = ["wikibk"]
loss_names = _loss_names({"textmlm": 1})
batch_size = 1024
max_text_len = 196
learning_rate = 2e-4
whole_word_masking = True
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_patch16" | null |
184,987 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_textmlm_base_plus():
exp_name = "textmlm_base_plus"
datasets = ["wikibk"]
loss_names = _loss_names({"textmlm": 1})
batch_size = 1024
max_text_len = 196
learning_rate = 2e-4
whole_word_masking = True
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_plus_patch16" | null |
184,988 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_mlm_itm_itc_base():
exp_name = "mlm_itm_itc_base"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1, "itc": 1})
batch_size = 1024
whole_word_masking = True
learning_rate = 2e-4
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_patch16" | null |
184,989 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_mlm_itm_itc_base_plus():
exp_name = "mlm_itm_itc_base_plus"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1, "itc": 1})
batch_size = 1024
whole_word_masking = True
learning_rate = 1e-4
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_plus_patch16" | null |
184,990 | from sacred import Experiment
def _loss_names(d):
def task_mlm_itm_itc_large():
exp_name = "mlm_itm_itc_large"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1, "itc": 1})
batch_size = 1024
whole_word_masking = True
learning_rate = 5e-5
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vit_large_patch16_224" | null |
184,991 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_nlvr2_base():
exp_name = "finetune_nlvr2_base"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 5e-5
val_transform_keys = ["square_transform"]
use_sharded_training=False
model_arch = "vlmo_base_patch16" | null |
184,992 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_nlvr2_base_plus():
exp_name = "finetune_nlvr2_base_plus"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.2
val_transform_keys = ["square_transform"]
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16" | null |
184,993 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_nlvr2_base_image384():
exp_name = "finetune_nlvr2_base_image384"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 5e-5
val_transform_keys = ["square_transform"]
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_patch16" | null |
184,994 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_nlvr2_base_plus_image384():
exp_name = "finetune_nlvr2_base_plus_image384"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.2
val_transform_keys = ["square_transform"]
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16" | null |
184,995 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_nlvr2_large():
exp_name = "finetune_nlvr2_large"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
use_sharded_training=False
model_arch = "vlmo_large_patch16" | null |
184,996 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_nlvr2_large_image384():
exp_name = "finetune_nlvr2_large_image384"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
image_size = 384
use_sharded_training=False
model_arch = "vlmo_large_patch16" | null |
184,997 | from sacred import Experiment
def _loss_names(d):
def task_finetune_vqa_base_image480():
exp_name = "finetune_vqa_base_image480"
datasets = ["vqa"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"vqa": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
lr_mult = 20
image_size = 480
use_sharded_training=False
model_arch = "vlmo_base_patch16" | null |
184,998 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_vqa_base_plus_image480():
exp_name = "finetune_vqa_base_plus_image480"
datasets = ["vqa"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"vqa": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
lr_mult = 20
image_size = 480
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16" | null |
184,999 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_vqa_large_image480():
exp_name = "finetune_vqa_large_image480"
datasets = ["vqa"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"vqa": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 1.5e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
lr_mult = 20
image_size = 480
use_sharded_training=False
model_arch = "vlmo_large_patch16" | null |
185,000 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_irtr_f30k_base():
exp_name = "finetune_irtr_f30k_base"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.15
use_sharded_training=False
model_arch = "vlmo_base_patch16" | null |
185,001 | from sacred import Experiment
def _loss_names(d):
def task_finetune_irtr_f30k_base_image384():
exp_name = "finetune_irtr_f30k_base_image384"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.15
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_patch16" | null |
185,002 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_irtr_f30k_base_plus_image384():
exp_name = "finetune_irtr_f30k_base_plus_image384"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16" | null |
185,003 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_irtr_f30k_large_image384():
exp_name = "finetune_irtr_f30k_large_image384"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 2e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_large_patch16" | null |
185,004 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_irtr_coco_base_image384():
exp_name = "finetune_irtr_coco_base_image384"
datasets = ["coco"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 3000
warmup_steps = 300
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_patch16" | null |
185,005 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_irtr_coco_base_plus_image384():
exp_name = "finetune_irtr_coco_base_plus_image384"
datasets = ["coco"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 3000
warmup_steps = 300
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16" | null |
185,006 | from sacred import Experiment
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
def task_finetune_irtr_coco_large_image384():
exp_name = "finetune_irtr_coco_large_image384"
datasets = ["coco"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 3000
warmup_steps = 300
get_recall_metric = True
learning_rate = 2e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_large_patch16" | null |
185,007 | from sacred import Experiment
def step1_5k():
max_epoch = 100
warmup_steps = 150
max_steps = 1500 | null |
185,008 | from sacred import Experiment
def step3k():
max_epoch = 100
warmup_steps = 300
max_steps = 3000 | null |
185,009 | from sacred import Experiment
def step200k():
max_epoch = 200
warmup_steps = 2500
max_steps = 200000 | null |
185,010 | from sacred import Experiment
def step500k():
max_epoch = 500
warmup_steps = 2500
max_steps = 500000 | null |
185,018 | import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v):
def ShearY(img, v):
def TranslateXabs(img, v):
def TranslateYabs(img, v):
def Rotate(img, v):
def AutoContrast(img, _):
def Equalize(img, _):
def Solarize(img, v):
def SolarizeAdd(img, addition=0, threshold=128):
def Posterize(img, v):
def Contrast(img, v):
def Color(img, v):
def Brightness(img, v):
def Sharpness(img, v):
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l | null |
185,044 | from .utils import (
inception_normalize,
)
from torchvision import transforms
from .randaugment import RandomAugment
from PIL import Image
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
def square_transform(size=224):
return transforms.Compose(
[
transforms.Resize((size, size), interpolation=Image.BICUBIC),
transforms.ToTensor(),
inception_normalize,
]
) | null |
185,045 | from .utils import (
inception_normalize,
)
from torchvision import transforms
from .randaugment import RandomAugment
from PIL import Image
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
def square_transform_randaug(size=224):
return transforms.Compose(
[
transforms.RandomResizedCrop(size, scale=(0.5, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
transforms.ToTensor(),
inception_normalize,
]
) | null |
185,047 | import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions, iid2split):
def make_arrow(root, dataset_root):
with open(f"{root}/karpathy/dataset_flickr30k.json", "r") as fp:
captions = json.load(fp)
captions = captions["images"]
iid2captions = defaultdict(list)
iid2split = dict()
for cap in tqdm(captions):
filename = cap["filename"]
iid2split[filename] = cap["split"]
for c in cap["sentences"]:
iid2captions[filename].append(c["raw"])
paths = list(glob(f"{root}/flickr30k-images/*.jpg"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)]
for split in ["train", "val", "test"]:
batches = [b for b in bs if b[-1] == split]
dataframe = pd.DataFrame(
batches, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/f30k_caption_karpathy_{split}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table) | null |
185,049 | import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(line):
return [
"None",
[line],
"wikibk",
"train",
]
def make_arrow(root, dataset_root):
for index in range(0, 50):
file_path = f"{root}/wikibk.{index}.txt"
all_sents = []
with open(file_path, "r", encoding="utf-8") as fp:
for line in fp:
all_sents.append(line.strip())
print(file_path)
print("Number of sentences: {}".format(len(all_sents)))
bs = [path2rest(line) for line in tqdm(all_sents)]
dataframe = pd.DataFrame(bs, columns=["image", "caption", "source", "split"],)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/wikibk_train_{index}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect() | null |
185,052 | import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
def make_arrow(root, dataset_root):
with open(f"{root}/annot.json", "r") as fp:
captions = json.load(fp)
iid2captions = dict()
for cap in tqdm(captions):
iid = cap[0].split("/")[-1]
iid2captions[iid] = [cap[1]]
paths = list(glob(f"{root}/images_train/*/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(bs, columns=["image", "caption", "image_id", "split"],)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/sbu_{sub}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect() | null |
185,053 | import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
def make_arrow(root, dataset_root):
for split in ["val", "train"]:
with open(f"{root}/{split}_annot.json", "r") as fp:
captions = json.load(fp)
iid2captions = dict()
for cap in tqdm(captions):
iid = cap[0].split("/")[-1]
iid2captions[iid] = [cap[1]]
paths = list(glob(f"{root}/images_{split}/*/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(
bs, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/conceptual_caption_{split}_{sub}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect() | null |
185,054 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import math
import os
import pickle
import random
from time import sleep
import numpy as np
import torch
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm
from transformers import \
BertTokenizer, RobertaTokenizer
from transformers.tokenization_bert import whitespace_tokenize
import s2s_ft.s2s_loader as seq2seq_loader
from s2s_ft.modeling_decoding import LayoutlmForSeq2SeqDecoder, BertConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.utils import load_and_cache_layoutlm_examples, convert_src_layout_inputs_to_tokens, \
get_tokens_from_src_and_index, convert_tgt_layout_inputs_to_tokens
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list | null |
185,055 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import math
import os
import pickle
import random
from time import sleep
import numpy as np
import torch
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm
from transformers import \
BertTokenizer, RobertaTokenizer
from transformers.tokenization_bert import whitespace_tokenize
import s2s_ft.s2s_loader as seq2seq_loader
from s2s_ft.modeling_decoding import LayoutlmForSeq2SeqDecoder, BertConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.utils import load_and_cache_layoutlm_examples, convert_src_layout_inputs_to_tokens, \
get_tokens_from_src_and_index, convert_tgt_layout_inputs_to_tokens
def ascii_print(text):
text = text.encode("ascii", "ignore")
print(text) | null |
185,056 | from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
import tqdm
from s2s_ft.modeling import LayoutlmForSequenceToSequence, LayoutlmConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import \
RobertaConfig, BertConfig, \
BertTokenizer, RobertaTokenizer, \
XLMRobertaConfig, XLMRobertaTokenizer
from s2s_ft.configuration_unilm import UnilmConfig
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.configuration_minilm import MinilmConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft import utils
from s2s_ft.config import BertForSeq2SeqConfig
logger = logging.getLogger(__name__)
def prepare_for_training(args, model, checkpoint_state_dict, amp):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if amp:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if checkpoint_state_dict:
amp.load_state_dict(checkpoint_state_dict['amp'])
if checkpoint_state_dict:
optimizer.load_state_dict(checkpoint_state_dict['optimizer'])
model.load_state_dict(checkpoint_state_dict['model'])
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
return model, optimizer
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, training_features, model, tokenizer)` to solve the following problem:
Train the model
Here is the function:
def train(args, training_features, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0] and args.log_dir:
tb_writer = SummaryWriter(log_dir=args.log_dir)
else:
tb_writer = None
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
else:
amp = None
# model recover
recover_step = utils.get_max_epoch_model(args.output_dir)
checkpoint_state_dict = None
model.to(args.device)
model, optimizer = prepare_for_training(args, model, checkpoint_state_dict, amp=amp)
if args.n_gpu == 0 or args.no_cuda:
per_node_train_batch_size = args.per_gpu_train_batch_size * args.gradient_accumulation_steps
else:
per_node_train_batch_size = args.per_gpu_train_batch_size * args.n_gpu * args.gradient_accumulation_steps
train_batch_size = per_node_train_batch_size * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)
global_step = recover_step if recover_step else 0
if args.num_training_steps == -1:
args.num_training_steps = int(args.num_training_epochs * len(training_features) / train_batch_size)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.num_training_steps, last_epoch=-1)
if checkpoint_state_dict:
scheduler.load_state_dict(checkpoint_state_dict["lr_scheduler"])
train_dataset = utils.Seq2seqDatasetForLayoutlm(
features=training_features, max_source_len=args.max_source_seq_length,
max_target_len=args.max_target_seq_length, vocab_size=tokenizer.vocab_size,
cls_id=tokenizer.cls_token_id, sep_id=tokenizer.sep_token_id, pad_id=tokenizer.pad_token_id,
mask_id=tokenizer.mask_token_id, random_prob=args.random_prob, keep_prob=args.keep_prob,
offset=train_batch_size * global_step, num_training_instances=train_batch_size * args.num_training_steps,
layout_flag=args.model_type == 'layoutlm'
)
logger.info("Check dataset:")
for i in range(5):
source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, target_index = train_dataset.__getitem__(
i)
logger.info("Instance-%d" % i)
try:
src = [sid[0] for sid in source_ids]
tgt = [tid[0] for tid in target_ids]
except TypeError:
src = source_ids
tgt = target_ids
logger.info("Source tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(src)))
logger.info("Target tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(tgt)))
logger.info("Mode = %s" % str(model))
# Train!
logger.info(" ***** Running training ***** *")
logger.info(" Num examples = %d", len(training_features))
logger.info(" Num Epochs = %.2f", len(train_dataset) / len(training_features))
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Batch size per node = %d", per_node_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", train_batch_size)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.num_training_steps)
if args.num_training_steps <= global_step:
logger.info("Training is done. Please use a new dir or clean this dir!")
else:
# The training features are shuffled
train_sampler = SequentialSampler(train_dataset) \
if args.local_rank == -1 else DistributedSampler(train_dataset, shuffle=False)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler,
batch_size=per_node_train_batch_size // args.gradient_accumulation_steps,
collate_fn=utils.batch_list_to_batch_tensors)
train_iterator = tqdm.tqdm(
train_dataloader, initial=global_step,
desc="Iter (loss=X.XXX, lr=X.XXXXXXX)", disable=args.local_rank not in [-1, 0])
model.train()
model.zero_grad()
tr_loss, logging_loss = 0.0, 0.0
for step, batch in enumerate(train_iterator):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'source_idxys': batch[0],
'target_idxys': batch[1],
'pseudo_idxys': batch[2],
'num_source_tokens': batch[3],
'num_target_tokens': batch[4],
'target_index': batch[-1]}
loss = model(**inputs)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
train_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
logging_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and global_step % args.logging_steps == 0 and tb_writer is not None:
logging_loss = 0.0
tb_writer.add_scalar('train/lr', scheduler.get_lr()[0], global_step=global_step)
tb_writer.add_scalar('train/loss', loss.item(), global_step=global_step)
if args.local_rank in [-1, 0] and args.save_steps > 0 and \
(global_step % args.save_steps == 0 or global_step == args.num_training_steps):
save_path = os.path.join(args.output_dir, "ckpt-%d" % global_step)
os.makedirs(save_path, exist_ok=True)
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(save_path)
optim_to_save = {
"optimizer": optimizer.state_dict(),
"lr_scheduler": scheduler.state_dict(),
}
if args.fp16:
optim_to_save["amp"] = amp.state_dict()
torch.save(
optim_to_save, os.path.join(args.output_dir, 'optim.{}.bin'.format(global_step)))
logger.info("Saving model checkpoint %d into %s", global_step, save_path)
if args.local_rank in [-1, 0] and tb_writer:
tb_writer.close() | Train the model |
185,057 | from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
import tqdm
from s2s_ft.modeling import LayoutlmForSequenceToSequence, LayoutlmConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import \
RobertaConfig, BertConfig, \
BertTokenizer, RobertaTokenizer, \
XLMRobertaConfig, XLMRobertaTokenizer
from s2s_ft.configuration_unilm import UnilmConfig
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.configuration_minilm import MinilmConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft import utils
from s2s_ft.config import BertForSeq2SeqConfig
MODEL_CLASSES = {
'bert': (BertConfig, BertTokenizer),
'minilm': (MinilmConfig, MinilmTokenizer),
'roberta': (RobertaConfig, RobertaTokenizer),
'xlm-roberta': (XLMRobertaConfig, XLMRobertaTokenizer),
'unilm': (UnilmConfig, UnilmTokenizer),
'layoutlm': (LayoutlmConfig, BertTokenizer),
}
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--train_file", default=None, type=str,
help="Training data (json format) for training. Keys: source and target")
parser.add_argument("--train_folder", default=None, type=str,
help="Training data folder for training. Keys: source and target")
parser.add_argument("--sentence_shuffle_rate", default=0, type=float)
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--layoutlm_only_layout", action='store_true')
parser.add_argument("--layout_only_dataset", action='store_true')
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list:")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
parser.add_argument("--log_dir", default=None, type=str,
help="The output directory where the log will be written.")
## Other parameters
parser.add_argument("--config_name", default=None, type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default=None, type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_source_seq_length", default=464, type=int,
help="The maximum total source sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--max_target_seq_length", default=48, type=int,
help="The maximum total target sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--cached_train_features_file", default=None, type=str,
help="Cached training features file")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--label_smoothing", default=0.1, type=float,
help="Label smoothing.")
parser.add_argument("--num_training_steps", default=-1, type=int,
help="set total number of training steps to perform")
parser.add_argument("--num_training_epochs", default=10, type=int,
help="set total number of training epochs to perform (--num_training_steps has higher priority)")
parser.add_argument("--num_warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--random_prob", default=0.1, type=float,
help="prob to random replace a masked token")
parser.add_argument("--keep_prob", default=0.1, type=float,
help="prob to keep no change for a masked token")
parser.add_argument('--logging_steps', type=int, default=500,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=1500,
help="Save checkpoint every X updates steps.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
return args | null |
185,058 | from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
import tqdm
from s2s_ft.modeling import LayoutlmForSequenceToSequence, LayoutlmConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import \
RobertaConfig, BertConfig, \
BertTokenizer, RobertaTokenizer, \
XLMRobertaConfig, XLMRobertaTokenizer
from s2s_ft.configuration_unilm import UnilmConfig
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.configuration_minilm import MinilmConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft import utils
from s2s_ft.config import BertForSeq2SeqConfig
logger = logging.getLogger(__name__)
def prepare(args):
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
os.makedirs(args.output_dir, exist_ok=True)
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") | null |
185,059 | from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
import tqdm
from s2s_ft.modeling import LayoutlmForSequenceToSequence, LayoutlmConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import \
RobertaConfig, BertConfig, \
BertTokenizer, RobertaTokenizer, \
XLMRobertaConfig, XLMRobertaTokenizer
from s2s_ft.configuration_unilm import UnilmConfig
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.configuration_minilm import MinilmConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft import utils
from s2s_ft.config import BertForSeq2SeqConfig
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'bert': (BertConfig, BertTokenizer),
'minilm': (MinilmConfig, MinilmTokenizer),
'roberta': (RobertaConfig, RobertaTokenizer),
'xlm-roberta': (XLMRobertaConfig, XLMRobertaTokenizer),
'unilm': (UnilmConfig, UnilmTokenizer),
'layoutlm': (LayoutlmConfig, BertTokenizer),
}
class LayoutlmForSequenceToSequence(BertPreTrainedForSeq2SeqModel):
def __init__(self, config):
super(LayoutlmForSequenceToSequence, self).__init__(config)
if config.base_model_type == 'layoutlm':
self.bert = LayoutlmModel(config)
else:
self.bert = BertModel(config)
self.cls = LayoutlmSPOnlyMLMHead(config, src_len=config.max_source_length)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
# setattr(config, 'label_smoothing', 0.1)
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.max_source_length, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none', ignore_index=0)
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (to_weight <= from_weight)
pseudo_tokens_mask = (from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | ((from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(self, source_idxys, target_idxys, target_index, pseudo_idxys, num_source_tokens, num_target_tokens,
target_span_ids=None):
source_len = source_idxys.size(1)
target_len = target_idxys.size(1)
pseudo_len = pseudo_idxys.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
if self.config.base_model_type == 'layoutlm':
source_xys = source_idxys[:, :, 1:]
target_xys = target_idxys[:, :, 1:]
pseudo_xys = pseudo_idxys[:, :, 1:]
input_xys = torch.cat((source_xys, target_xys, pseudo_xys), dim=1)
source_ids = source_idxys[:, :, 0]
target_ids = target_idxys[:, :, 0]
pseudo_ids = pseudo_idxys[:, :, 0]
else:
source_ids = source_idxys
target_ids = target_idxys
pseudo_ids = pseudo_idxys
input_xys = None
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
self.create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
self.create_mask_and_position_ids(num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids)
if self.config.base_model_type == 'layoutlm':
outputs = self.bert(
input_ids, input_xys, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths, return_emb=True)
else:
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths, return_emb=True)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
sequence_embedding = outputs[-1]
source_embedding = sequence_embedding[:, :source_len, :]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
# TODO: do we need to mask the impossible pos with the real input length
prediction_scores_masked = self.cls(pseudo_sequence_output, source_embedding)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), target_index)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), target_index)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
class BertForSeq2SeqConfig(BertConfig):
def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1,
rel_pos_bins=0, max_rel_pos=0, fix_word_embedding=False, **kwargs):
super(BertForSeq2SeqConfig, self).__init__(**kwargs)
self.label_smoothing = label_smoothing
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
self.fix_word_embedding = fix_word_embedding
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None, fix_word_embedding=False):
required_keys = [
"vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
"hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
"max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps",
]
kwargs = {}
for key in required_keys:
assert hasattr(config, key)
kwargs[key] = getattr(config, key)
kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
if isinstance(config, RobertaConfig):
kwargs["type_vocab_size"] = 0
kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2
additional_keys = [
"source_type_id", "target_type_id", "rel_pos_bins", "max_rel_pos",
]
for key in additional_keys:
if hasattr(config, key):
kwargs[key] = getattr(config, key)
if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
kwargs["max_position_embeddings"] = max_position_embeddings
logger.info(" ** Change max position embeddings to %d ** " % max_position_embeddings)
return cls(label_smoothing=label_smoothing, fix_word_embedding=fix_word_embedding, **kwargs)
def get_model_and_tokenizer(args):
config_class, tokenizer_class = MODEL_CLASSES[args.model_type]
model_config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config = BertForSeq2SeqConfig.from_exist_config(
config=model_config, label_smoothing=args.label_smoothing,
max_position_embeddings=args.max_source_seq_length + args.max_target_seq_length,
max_source_length=args.max_source_seq_length,
base_model_type=args.model_type,
layoutlm_only_layout_flag=args.layoutlm_only_layout,
)
logger.info("Model config for seq2seq: %s", str(config))
if args.model_type == 'layoutlm':
if args.tokenizer_name is not None:
tokenizer_name = args.tokenizer_name
else:
tokenizer_name = 'bert' + args.model_name_or_path[8:]
tokenizer = tokenizer_class.from_pretrained(
tokenizer_name, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None)
else:
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None)
model = LayoutlmForSequenceToSequence.from_pretrained(
args.model_name_or_path, config=config, model_type=args.model_type,
reuse_position_embedding=True,
cache_dir=args.cache_dir if args.cache_dir else None,
)
return model, tokenizer | null |
185,062 | import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def hf_distilbert_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface DistilBERT format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key == 'roberta.embeddings.position_embeddings.weight':
value = value[2:]
if key == 'roberta.embeddings.token_type_embeddings.weight':
continue
if key.startswith('roberta'):
key = 'bert.' + key[8:]
elif key.startswith('lm_head'):
if 'layer_norm' in key or 'dense' in key:
key = 'cls.predictions.transform.' + key[8:]
else:
key = 'cls.predictions.' + key[8:]
key = key.replace('layer_norm', 'LayerNorm')
new_state_dict[key] = value
return new_state_dict | null |
185,063 | import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
def hf_bert_to_hf_bert(state_dict):
# NOTE: all cls states are used for prediction,
# we predict the index so omit all pretrained states for prediction.
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.startswith('cls'):
# NOTE: all cls states are used for prediction,
# we predict the index so omit all pretrained states for prediction.
continue
new_state_dict[key] = value
return new_state_dict | null |
185,064 | import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def hf_layoutlm_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface LayoutLM format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.startswith('layoutlm'):
key = 'bert.' + key[9:]
elif key.startswith('cls'):
# NOTE: all cls states are used for prediction,
# we predict the index so omit all pretrained states for prediction.
continue
new_state_dict[key] = value
return new_state_dict | null |
185,065 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors | null |
185,066 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
os.path.basename(output_dir)
both_set = set([int(os.path.basename(fn).split('.')[1]) for fn in fn_model_list]
) & set([int(os.path.basename(fn).split('.')[1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None | null |
185,067 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
logger = logging.getLogger(__name__)
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for i, line in enumerate(reader):
if i == 100:
break
examples.append(json.loads(line))
features = []
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = tokenizer.tokenize(example["tgt"])
features.append({
"source_ids": tokenizer.convert_tokens_to_ids(source_tokens),
"target_ids": tokenizer.convert_tokens_to_ids(target_tokens),
})
if shuffle:
random.shuffle(features)
if local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features | null |
185,068 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
logger = logging.getLogger(__name__)
def load_and_cache_line_order_examples(
example_path, tokenizer, local_rank, cached_features_file, max_src_length=1024,
layout_flag=True, shuffle=True,
src_shuffle_rate=0,
file_info_flag=False,
):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.exists(cached_features_file) and False:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset at %s", example_path)
examples = []
with open(example_path, 'r') as layout_reader:
logger.info(f'Start loading {example_path}')
for i, line in enumerate(layout_reader):
examples.append(json.loads(line))
features = []
for layout in tqdm.tqdm(examples):
bleu = layout['bleu']
if random.random() < src_shuffle_rate:
# print('Random!!!')
# DONE: the random src! here has bug! index also need shuffle
src_layout = layout['src']
tgt_index = layout['tgt_index']
source_length = len(src_layout)
shuffle_index = list(range(source_length))
random.shuffle(shuffle_index)
shuffle_layout = ['' for _ in range(source_length)]
for i, j in enumerate(shuffle_index):
# NOTE: map i-th token to j-th token
shuffle_layout[j] = src_layout[i]
shuffle_target_index = [shuffle_index[i] for i in tgt_index]
layout['tgt_index'] = shuffle_target_index
layout['src'] = shuffle_layout
mask = tokenizer.mask_token_id
src_ids = [tokenizer.convert_tokens_to_ids([str(tmp_i)])[:1] + src_layout for tmp_i, src_layout in enumerate(layout['src'])]
tgt_ids = [tokenizer.convert_tokens_to_ids([str(tmp_i)])[:1] + tgt_layout for tmp_i, tgt_layout in enumerate(layout['tgt'])]
tgt_index = layout['tgt_index']
feature = {
"source_ids": src_ids,
"target_ids": tgt_ids,
"target_index": tgt_index,
'bleu': bleu
}
if file_info_flag:
file_info = {'original_filename': layout['filename'], 'filename': layout['filename'],
'page_idx': 0}
feature['file_info'] = file_info
features.append(feature)
if shuffle:
random.shuffle(features)
if local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features | null |
185,069 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
logger = logging.getLogger(__name__)
def load_and_cache_layoutlm_examples(
example_path, tokenizer, local_rank, cached_features_file, max_src_length=1024,
layout_flag=True, shuffle=True,
src_shuffle_rate=0,
file_info_flag=False
):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset at %s", example_path)
examples = []
if os.path.isdir(example_path):
text_files = glob.glob(f'{example_path}/*text*.json')
layout_files = [re.sub('text|txt', 'layout', x, 1) for x in text_files]
else:
text_files = [example_path]
layout_files = [re.sub('text|txt', 'layout', example_path, 1)]
for text_file, layout_file in zip(text_files, layout_files):
with open(text_file, mode='r', encoding='utf-8') as text_reader, \
open(layout_file, mode='r', encoding='utf-8') as layout_reader:
logger.info(f'Start loading {text_file}')
for i, (text_line, layout_line) in enumerate(zip(text_reader, layout_reader)):
if (i + 1) % 10000 == 0:
logger.info(f'{i + 1} lines ...')
examples.append((json.loads(text_line), json.loads(layout_line)))
features = []
def tokenize_text_and_layout_src(_text, _layout, _layout_flag):
ret = []
index_split = {}
words = _text.split()
# note: (OLD) the index should start from 1: 0-the cls token in src
# note: (NEW) we need to remove the src embedding's CLS SEP token so we can still start from 0
# note: (NEWER) we need to at least one blank pos for ignore index in loss function (we use sep's index)
# NOTE: (NEWER-ER) 1 for all padding tgt index
new_token_index = 1 # first ordinary index
for i, (word, box) in enumerate(zip(words, _layout)):
if (not box[2] >= box[0]) or (not box[3] >= box[1]):
continue
tokens = tokenizer.tokenize(word)
tokens = tokenizer.convert_tokens_to_ids(tokens)
new_token_ids = []
for token in tokens:
if _layout_flag:
ret.append([token] + box)
else:
ret.append(token)
new_token_ids.append(new_token_index)
new_token_index += 1
index_split[i] = new_token_ids
return ret, index_split
def tokenize_text_and_layout_tgt(_text, _layout, _index, _index_split, _layout_flag):
ret = []
ret_index = []
words = _text.split()
for word, box, i in zip(words, _layout, _index):
if (not box[2] >= box[0]) or (not box[3] >= box[1]):
continue
tokens = tokenizer.tokenize(word)
tokens = tokenizer.convert_tokens_to_ids(tokens)
for token, ii in zip(tokens, _index_split[i]):
if _layout_flag:
ret.append([token] + box)
else:
ret.append(token)
ii = min(ii, max_src_length - 1)
ret_index.append(ii)
return ret, ret_index
for text, layout in tqdm.tqdm(examples):
if 'bleu' in text:
bleu = text['bleu']
else:
bleu = 0
if random.random() < src_shuffle_rate:
# print('Random!!!')
# DONE: the random src! here has bug! index also need shuffle
src_text = text['src']
src_layout = layout['src']
tgt_index = text['tgt_index']
src_text = src_text.split()
source_length = len(src_text)
shuffle_index = list(range(source_length))
random.shuffle(shuffle_index)
shuffle_text = ['' for _ in range(source_length)]
shuffle_layout = ['' for _ in range(source_length)]
for i, j in enumerate(shuffle_index):
# NOTE: map i-th token to j-th token
shuffle_text[j] = src_text[i]
shuffle_layout[j] = src_layout[i]
shuffle_target_index = [shuffle_index[i] for i in tgt_index]
text['src'] = ' '.join(shuffle_text)
text['tgt_index'] = shuffle_target_index
layout['src'] = shuffle_layout
src_ids, src_index_split = tokenize_text_and_layout_src(text['src'], layout['src'],
_layout_flag=layout_flag)
tgt_ids, tgt_index = tokenize_text_and_layout_tgt(text['tgt'], layout['tgt'], text['tgt_index'],
src_index_split, _layout_flag=layout_flag)
feature = {
"source_ids": src_ids,
"target_ids": tgt_ids,
"target_index": tgt_index,
'bleu': bleu
}
if file_info_flag:
file_info = {'original_filename': text['original_filename'], 'filename': text['filename'], 'page_idx': text['page_idx']}
feature['file_info'] = file_info
features.append(feature)
if shuffle:
random.shuffle(features)
if local_rank in [-1, 0] and cached_features_file is not None:
if not os.path.exists(os.path.dirname(cached_features_file)):
os.makedirs(os.path.dirname(cached_features_file))
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features | null |
185,070 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
def convert_src_layout_inputs_to_tokens(inputs, converter, max_src_length, layout_flag=True):
ret = []
if not layout_flag:
for line in inputs:
ret.append(converter(line["source_ids"])[: max_src_length])
else:
for line in inputs:
raw_text_ids = [x[0] for x in line['source_ids']]
raw_text = converter(raw_text_ids)
new_line = [[t] + x[1:] for t, x in zip(raw_text, line['source_ids'])][: max_src_length]
ret.append(new_line)
return ret | null |
185,071 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
def convert_tgt_layout_inputs_to_tokens(inputs, converter, max_tgt_length, layout_flag=True):
ret = []
if not layout_flag:
for line in inputs:
ret.append(converter(line["target_ids"])[: max_tgt_length])
else:
for line in inputs:
raw_text_ids = [x[0] for x in line['target_ids']]
ret.append(converter(raw_text_ids)[: max_tgt_length])
return ret | null |
185,072 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
def get_tokens_from_src_and_index(src, index, modifier=None):
result = []
for i in index:
i = modifier(i)
i = min(i, len(src) - 1)
if isinstance(src[i], list):
result.append(src[i][0])
else:
result.append(src[i])
return result | null |
185,073 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
def get_layout_from_src_and_index(src, index, modifier=None):
result = []
s = set()
for i in index:
i = modifier(i)
i = min(i, len(src) - 1)
layout = src[i][1:]
if repr(layout) not in s:
result.append(layout)
s.add(repr(layout))
return result | null |
185,074 | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
def get_everything_from_src_and_index(src, index, modifier=None):
result = []
for i in index:
i = modifier(i)
i = min(i, len(src) - 1)
result.append(src[i])
return result | null |
185,079 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import logging
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.modules.loss import _Loss
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Here is the function:
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
185,080 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import logging
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.modules.loss import _Loss
def swish(x):
return x * torch.sigmoid(x) | null |
185,081 | import math
import numpy as np
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor, nn
import torch.nn.functional as F
from torch.nn import LayerNorm, Parameter
from modules import (
GradMultiply,
SamePad,
get_activation_fn,
GLU_Linear,
quant_noise,
)
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
has_relative_attention_bias=False,
num_buckets=32,
max_distance=128,
gru_rel_pos=False,
rescale_init=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.has_relative_attention_bias = has_relative_attention_bias
self.num_buckets = num_buckets
self.max_distance = max_distance
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
self.head_dim = embed_dim // num_heads
self.q_head_dim = self.head_dim
self.k_head_dim = self.head_dim
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
k_bias = True
if rescale_init:
k_bias = False
k_embed_dim = embed_dim
q_embed_dim = embed_dim
self.k_proj = quant_noise(
nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size
)
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.gru_rel_pos = gru_rel_pos
if self.gru_rel_pos:
self.grep_linear = nn.Linear(self.q_head_dim, 8)
self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1))
self.reset_parameters()
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
if self.has_relative_attention_bias:
nn.init.xavier_normal_(self.relative_attention_bias.weight)
def _relative_positions_bucket(self, relative_positions, bidirectional=True):
num_buckets = self.num_buckets
max_distance = self.max_distance
relative_buckets = 0
if bidirectional:
num_buckets = num_buckets // 2
relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
relative_positions = torch.abs(relative_positions)
else:
relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
max_exact = num_buckets // 2
is_small = relative_positions < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_positions.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_positions_bucket(
relative_position,
bidirectional=True
)
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(relative_position_bucket)
values = values.permute([2, 0, 1])
return values
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
position_bias: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if self.has_relative_attention_bias and position_bias is None:
position_bias = self.compute_bias(tgt_len, src_len)
position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
alpha = 32
q *= 1 / alpha
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.q_head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.k_head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = (attn_weights - attn_weights.max(dim=-1, keepdim=True)[0]) * alpha
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v, position_bias
if position_bias is not None:
attn_mask_rel_pos = position_bias
if self.gru_rel_pos == 1:
query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim) * alpha / self.scaling
_B, _H, _L, __ = query_layer.size()
gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view(
_B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1)
gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, tgt_len, 1) * position_bias
attn_mask_rel_pos = attn_mask_rel_pos.view(attn_weights.size())
attn_weights = attn_weights + attn_mask_rel_pos
attn_weights_float = F.softmax(
attn_weights, dim=-1
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights, position_bias
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
if src_len > prev_key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask.float()
elif key_padding_mask is not None:
if src_len > key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = key_padding_mask.float()
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
The provided code snippet includes necessary dependencies for implementing the `init_bert_params` function. Write a Python function `def init_bert_params(module)` to solve the following problem:
Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated).
Here is the function:
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data) | Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). |
185,082 | import math
import warnings
import torch
from torch import Tensor, nn
import torch.nn.functional as F
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
The provided code snippet includes necessary dependencies for implementing the `get_activation_fn` function. Write a Python function `def get_activation_fn(activation: str)` to solve the following problem:
Returns the activation function corresponding to `activation`
Here is the function:
def get_activation_fn(activation: str):
"""Returns the activation function corresponding to `activation`"""
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
warnings.warn(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
elif activation == "glu":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation)) | Returns the activation function corresponding to `activation` |
185,083 | import math
import warnings
import torch
from torch import Tensor, nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `quant_noise` function. Write a Python function `def quant_noise(module, p, block_size)` to solve the following problem:
Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks
Here is the function:
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert (
module.weight.size(1) % block_size == 0
), "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert (
module.in_channels % block_size == 0
), "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(
in_features // block_size * out_features, device=weight.device
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(
int(in_channels // block_size * out_channels),
device=weight.device,
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(
weight.size(0), weight.size(1), device=weight.device
)
mask.bernoulli_(p)
mask = (
mask.unsqueeze(2)
.unsqueeze(3)
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
)
# scale weights and apply mask
mask = mask.to(
torch.bool
) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module | Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks |
185,084 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as distributed
def ema_inplace(moving_avg, new, decay):
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) | null |
185,085 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as distributed
def l2norm(t):
def sample_vectors(samples, num):
def kmeans(samples, num_clusters, num_iters=10, use_cosine_sim=False):
dim, dtype, device = samples.shape[-1], samples.dtype, samples.device
means = sample_vectors(samples, num_clusters)
for _ in range(num_iters):
if use_cosine_sim:
dists = samples @ means.t()
else:
diffs = rearrange(samples, 'n d -> n () d') \
- rearrange(means, 'c d -> () c d')
dists = -(diffs ** 2).sum(dim=-1)
buckets = dists.max(dim=-1).indices
bins = torch.bincount(buckets, minlength=num_clusters)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
new_means.scatter_add_(0, repeat(buckets, 'n -> n d', d=dim), samples)
new_means = new_means / bins_min_clamped[..., None]
if use_cosine_sim:
new_means = l2norm(new_means)
means = torch.where(zero_mask[..., None], means, new_means)
return means, bins | null |
185,086 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as distributed
def l2norm(t):
return F.normalize(t, p=2, dim=-1)
def norm_ema_inplace(moving_avg, new, decay):
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
moving_avg.data.copy_(l2norm(moving_avg.data)) | null |
185,087 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes = [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) >= 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes | null |
185,088 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels | null |
185,089 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets | null |
185,090 | import logging
import os
from typing import Any, List, Optional
import librosa
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data.fairseq_dataset import FairseqDataset
The provided code snippet includes necessary dependencies for implementing the `_collate_frames` function. Write a Python function `def _collate_frames( frames: List[torch.Tensor], is_audio_input: bool = False )` to solve the following problem:
Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
Here is the function:
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
):
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out | Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] |
185,091 | import logging
import os
from typing import Any, List, Optional
import librosa
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_audio` function. Write a Python function `def load_audio(manifest_path, max_keep, min_keep)` to solve the following problem:
manifest tsv: src_wav, src_nframe, tgt_wav, tgt_nframe, tgt_spkemb
Here is the function:
def load_audio(manifest_path, max_keep, min_keep):
"""manifest tsv: src_wav, src_nframe, tgt_wav, tgt_nframe, tgt_spkemb"""
n_long, n_short = 0, 0
src_names, tgt_names, inds, sizes, tgt_sizes, spk_embeds = [], [], [], [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) >= 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
src_names.append(items[0])
tgt_names.append(items[2])
tgt_sizes.append(items[3])
spk_embeds.append(items[4])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(src_names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, src_names, inds, tot, sizes, tgt_names, tgt_sizes, spk_embeds | manifest tsv: src_wav, src_nframe, tgt_wav, tgt_nframe, tgt_spkemb |
185,092 | import logging
import os
from typing import Any, List, Optional
import librosa
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data.fairseq_dataset import FairseqDataset
The provided code snippet includes necessary dependencies for implementing the `logmelfilterbank` function. Write a Python function `def logmelfilterbank( audio, sampling_rate, fft_size=1024, hop_size=256, win_length=None, window="hann", num_mels=80, fmin=80, fmax=7600, eps=1e-10, )` to solve the following problem:
Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will be the same as fft_size. window (str): Window function type. num_mels (int): Number of mel basis. fmin (int): Minimum frequency in mel basis calculation. fmax (int): Maximum frequency in mel basis calculation. eps (float): Epsilon value to avoid inf in log calculation. Returns: ndarray: Log Mel filterbank feature (#frames, num_mels).
Here is the function:
def logmelfilterbank(
audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
):
"""Compute log-Mel filterbank feature.
(https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py)
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="reflect")
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sr=sampling_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax)
return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T))) | Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will be the same as fft_size. window (str): Window function type. num_mels (int): Number of mel basis. fmin (int): Minimum frequency in mel basis calculation. fmax (int): Maximum frequency in mel basis calculation. eps (float): Epsilon value to avoid inf in log calculation. Returns: ndarray: Log Mel filterbank feature (#frames, num_mels). |
185,093 | import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
The provided code snippet includes necessary dependencies for implementing the `_collate_frames` function. Write a Python function `def _collate_frames( frames: List[torch.Tensor], is_audio_input: bool = False )` to solve the following problem:
Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
Here is the function:
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
):
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out | Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] |
185,094 | import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
def add_first_frame_and_remove_last_frame(ys):
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in | null |
185,095 | import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes, spk_embeds = [], [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 3, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
spk_embeds.append(items[2])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes, spk_embeds | null |
185,096 | import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.