text
stringlengths 1
93.6k
|
|---|
output_t = self.rnn_2(output, context_lens)
|
output_t = self.self_att(output_t, output_t, context_mask)
|
output_t = self.linear_2(output_t)
|
output = output + output_t
|
sp_output = self.rnn_sp(output, context_lens)
|
start_output = torch.matmul(start_mapping.permute(0, 2, 1).contiguous(), sp_output[:,:,self.hidden:])
|
end_output = torch.matmul(end_mapping.permute(0, 2, 1).contiguous(), sp_output[:,:,:self.hidden])
|
sp_output = torch.cat([start_output, end_output], dim=-1)
|
sp_output_t = self.linear_sp(sp_output)
|
sp_output_aux = Variable(sp_output_t.data.new(sp_output_t.size(0), sp_output_t.size(1), 1).zero_())
|
predict_support = torch.cat([sp_output_aux, sp_output_t], dim=-1).contiguous()
|
sp_output = torch.matmul(all_mapping, sp_output)
|
output_start = torch.cat([output, sp_output], dim=-1)
|
output_start = self.rnn_start(output_start, context_lens)
|
logit1 = self.linear_start(output_start).squeeze(2) - 1e30 * (1 - context_mask)
|
output_end = torch.cat([output, output_start], dim=2)
|
output_end = self.rnn_end(output_end, context_lens)
|
logit2 = self.linear_end(output_end).squeeze(2) - 1e30 * (1 - context_mask)
|
output_type = torch.cat([output, output_end], dim=2)
|
output_type = torch.max(self.rnn_type(output_type, context_lens), 1)[0]
|
predict_type = self.linear_type(output_type)
|
if not return_yp: return logit1, logit2, predict_type, predict_support
|
outer = logit1[:,:,None] + logit2[:,None]
|
outer_mask = self.get_output_mask(outer)
|
outer = outer - 1e30 * (1 - outer_mask[None].expand_as(outer))
|
yp1 = outer.max(dim=2)[0].max(dim=1)[1]
|
yp2 = outer.max(dim=1)[0].max(dim=1)[1]
|
return logit1, logit2, predict_type, predict_support, yp1, yp2
|
class LockedDropout(nn.Module):
|
def __init__(self, dropout):
|
super().__init__()
|
self.dropout = dropout
|
def forward(self, x):
|
dropout = self.dropout
|
if not self.training:
|
return x
|
m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - dropout)
|
mask = Variable(m.div_(1 - dropout), requires_grad=False)
|
mask = mask.expand_as(x)
|
return mask * x
|
class EncoderRNN(nn.Module):
|
def __init__(self, input_size, num_units, nlayers, concat, bidir, dropout, return_last):
|
super().__init__()
|
self.rnns = []
|
for i in range(nlayers):
|
if i == 0:
|
input_size_ = input_size
|
output_size_ = num_units
|
else:
|
input_size_ = num_units if not bidir else num_units * 2
|
output_size_ = num_units
|
self.rnns.append(nn.GRU(input_size_, output_size_, 1, bidirectional=bidir, batch_first=True))
|
self.rnns = nn.ModuleList(self.rnns)
|
self.init_hidden = nn.ParameterList([nn.Parameter(torch.Tensor(2 if bidir else 1, 1, num_units).zero_()) for _ in range(nlayers)])
|
self.dropout = LockedDropout(dropout)
|
self.concat = concat
|
self.nlayers = nlayers
|
self.return_last = return_last
|
# self.reset_parameters()
|
def reset_parameters(self):
|
for rnn in self.rnns:
|
for name, p in rnn.named_parameters():
|
if 'weight' in name:
|
p.data.normal_(std=0.1)
|
else:
|
p.data.zero_()
|
def get_init(self, bsz, i):
|
return self.init_hidden[i].expand(-1, bsz, -1).contiguous()
|
def forward(self, input, input_lengths=None):
|
bsz, slen = input.size(0), input.size(1)
|
output = input
|
outputs = []
|
if input_lengths is not None:
|
lens = input_lengths.data.cpu().numpy()
|
for i in range(self.nlayers):
|
hidden = self.get_init(bsz, i)
|
output = self.dropout(output)
|
if input_lengths is not None:
|
output = rnn.pack_padded_sequence(output, lens, batch_first=True)
|
output, hidden = self.rnns[i](output, hidden)
|
if input_lengths is not None:
|
output, _ = rnn.pad_packed_sequence(output, batch_first=True)
|
if output.size(1) < slen: # used for parallel
|
padding = Variable(output.data.new(1, 1, 1).zero_())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.