code
stringlengths
3
6.57k
_apply(value)
x.items()
isinstance(x, list)
_apply(x)
isinstance(x, tuple)
tuple(_apply(x)
isinstance(x, set)
_apply(x)
_apply(sample)
move_to_cuda(sample)
_move_to_cuda(tensor)
tensor.cuda()
apply_to_sample(_move_to_cuda, sample)
move_to_cpu(sample)
_move_to_cpu(tensor)
tensors (float16)
tensor.to(dtype=torch.float32)
tensor.cpu()
apply_to_sample(_move_to_cpu, sample)
module.get_incremental_state(incremental_state, key)
module.set_incremental_state(incremental_state, key, value)
load_align_dict(replace_unk)
isinstance(replace_unk, str)
len(replace_unk)
open(replace_unk, "r")
line.split()
print_embed_overlap(embed_dict, vocab_dict)
set(embed_dict.keys()
set(vocab_dict.symbols)
len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)
parse_embedding(embed_path)
open(embed_path)
next(f_embed)
line.rstrip()
split(" ")
float(weight)
load_embedding(embed_dict, vocab, embedding)
range(len(vocab)
replace_unk(hypo_str, src_str, alignment, align_dict, unk)
tokenizer.tokenize_line(hypo_str)
tokenizer.tokenize_line(src_str)
enumerate(hypo_tokens)
align_dict.get(src_token, src_token)
join(hypo_tokens)
tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore)
tgt_dict.unk_string()
tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
make_positions(tensor, padding_idx: int, onnx_trace: bool = False)
tensor.ne(padding_idx)
int()
return (torch.cumsum(mask, dim=1)
type_as(mask)
long()
strip_pad(tensor, pad)
tensor.ne(pad)
buffered_arange(max)
hasattr(buffered_arange, "buf")
torch.LongTensor()
buffered_arange.buf.numel()
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
src_tokens.eq(padding_idx)
pad_mask.any()
any()
any()
src_tokens.size(1)
torch.empty(0)
long()
torch.arange(max_len, out=buffered)
buffered.type_as(src_tokens)
expand_as(src_tokens)
pad_mask.long()
sum(dim=1, keepdim=True)
torch.remainder(range - num_pads, max_len)
torch.remainder(range + num_pads, max_len)
src_tokens.gather(1, index)
item(tensor)
hasattr(tensor, "item")
tensor.item()
hasattr(tensor, "__getitem__")
multi_tensor_total_norm(grads, chunk_size=2048*32)
per_device_grads.get(device)
cur_device_grads.append(grad)
per_device_grads.keys()
TODO(msb)
torch.zeros((1, 1)
torch.cuda.device(device)
multi_tensor_l2norm(chunk_size, has_inf, [cur_device_grads], False)
norms.append(norm[0])
torch.norm(g, p=2, dtype=torch.float32)
torch.norm(torch.stack(norms)
clip_grad_norm_(params, max_norm, aggregate_norm_fn=None)
isinstance(params, torch.Tensor)
list(params)
p.grad.detach()
filter(lambda p: p.grad is not None, params)
len(grads)
len(params)
new_tensor(0.)