text
stringlengths 1
93.6k
|
|---|
output = torch.cat([output, padding.expand(output.size(0), slen-output.size(1), output.size(2))], dim=1)
|
if self.return_last:
|
outputs.append(hidden.permute(1, 0, 2).contiguous().view(bsz, -1))
|
else:
|
outputs.append(output)
|
if self.concat:
|
return torch.cat(outputs, dim=2)
|
return outputs[-1]
|
class BiAttention(nn.Module):
|
def __init__(self, input_size, dropout):
|
super().__init__()
|
self.dropout = LockedDropout(dropout)
|
self.input_linear = nn.Linear(input_size, 1, bias=False)
|
self.memory_linear = nn.Linear(input_size, 1, bias=False)
|
self.dot_scale = nn.Parameter(torch.Tensor(input_size).uniform_(1.0 / (input_size ** 0.5)))
|
def forward(self, input, memory, mask):
|
bsz, input_len, memory_len = input.size(0), input.size(1), memory.size(1)
|
input = self.dropout(input)
|
memory = self.dropout(memory)
|
input_dot = self.input_linear(input)
|
memory_dot = self.memory_linear(memory).view(bsz, 1, memory_len)
|
cross_dot = torch.bmm(input * self.dot_scale, memory.permute(0, 2, 1).contiguous())
|
att = input_dot + memory_dot + cross_dot
|
att = att - 1e30 * (1 - mask[:,None])
|
weight_one = F.softmax(att, dim=-1)
|
output_one = torch.bmm(weight_one, memory)
|
weight_two = F.softmax(att.max(dim=-1)[0], dim=-1).view(bsz, 1, input_len)
|
output_two = torch.bmm(weight_two, input)
|
return torch.cat([input, output_one, input*output_one, output_two*output_one], dim=-1)
|
class GateLayer(nn.Module):
|
def __init__(self, d_input, d_output):
|
super(GateLayer, self).__init__()
|
self.linear = nn.Linear(d_input, d_output)
|
self.gate = nn.Linear(d_input, d_output)
|
self.sigmoid = nn.Sigmoid()
|
def forward(self, input):
|
return self.linear(input) * self.sigmoid(self.gate(input))
|
# <FILESEP>
|
import os
|
import argparse
|
import time
|
from tqdm import tqdm
|
from data import fix_legacy_dict
|
import numpy as np
|
# np.random.seed(0)
|
import torch
|
import torch.nn as nn
|
# torch.manual_seed(0)
|
import torchvision
|
import torchvision.transforms as transforms
|
from torchvision.utils import save_image, make_grid
|
from model import Model
|
from config import diffusion_config
|
def _map_gpu(gpu):
|
if gpu == 'cuda':
|
return lambda x: x.cuda()
|
else:
|
return lambda x: x.to(torch.device('cuda:'+gpu))
|
def rescale(X, batch=True):
|
return (X.detach().clone()+1)/2
|
def std_normal(size):
|
return map_gpu(torch.normal(0, 1, size=size))
|
def print_size(net):
|
"""
|
Print the number of parameters of a network
|
"""
|
if net is not None and isinstance(net, torch.nn.Module):
|
module_parameters = filter(lambda p: p.requires_grad, net.parameters())
|
params = sum([np.prod(p.size()) for p in module_parameters])
|
print("{} Parameters: {:.6f}M".format(
|
net.__class__.__name__, params / 1e6), flush=True)
|
def calc_diffusion_hyperparams(T, beta_0, beta_T):
|
"""
|
Compute diffusion process hyperparameters
|
Parameters:
|
T (int): number of diffusion steps
|
beta_0 and beta_T (float): beta schedule start/end value,
|
where any beta_t in the middle is linearly interpolated
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.