code stringlengths 3 6.57k |
|---|
children.pop(0) |
splits.append((start, split, end, nuc, rel) |
child_edus.extend(left_child_edus) |
splits.extend(left_child_splits) |
numericalize(dataset, word_vocab, pos_vocab, nuc_label, rel_label) |
filter(lambda d: d.root_relation() |
chain(*dataset) |
list(paragraph.edus() |
encoder_inputs.append((edu_word_ids, edu_pos_ids) |
enumerate(edus) |
gen_decoder_data(paragraph.root_relation() |
decoder_inputs.append((start, end) |
pred_splits.append(split) |
pred_nucs.append(nuc_label[nuc]) |
pred_rels.append(rel_label[rel]) |
instances.append((encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels) |
gen_batch_iter(instances, batch_size, use_gpu=False) |
np.random.permutation(instances) |
len(instances) |
min(num_instances, offset+batch_size) |
len(encoder_inputs) |
len(encoder_inputs) |
len(edu_word_ids) |
len(edu_word_ids) |
np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long) |
np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long) |
np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.uint8) |
np.zeros([num_batch, max_edu_seqlen-1, 2], dtype=np.long) |
np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long) |
np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long) |
np.zeros([num_batch, max_edu_seqlen - 1], dtype=np.long) |
np.zeros([num_batch, max_edu_seqlen-1, max_edu_seqlen+1], dtype=np.uint8) |
enumerate(batch) |
enumerate(encoder_inputs) |
len(edu_word_ids) |
enumerate(decoder_inputs) |
len(pred_splits) |
len(pred_nucs) |
len(pred_rels) |
torch.from_numpy(e_input_words) |
long() |
torch.from_numpy(e_input_poses) |
long() |
torch.from_numpy(e_masks) |
byte() |
torch.from_numpy(d_inputs) |
long() |
torch.from_numpy(d_outputs) |
long() |
torch.from_numpy(d_output_nucs) |
long() |
torch.from_numpy(d_output_rels) |
long() |
torch.from_numpy(d_masks) |
byte() |
e_input_words.cuda() |
e_input_poses.cuda() |
e_masks.cuda() |
d_inputs.cuda() |
d_outputs.cuda() |
d_output_nucs.cuda() |
d_output_rels.cuda() |
d_masks.cuda() |
yield (e_input_words, e_input_poses, e_masks) |
parse_and_eval(dataset, model) |
model.eval() |
PartitionPtrParser(model) |
list(filter(lambda d: d.root_relation() |
chain(*dataset) |
len(golds) |
paragraph.edus() |
EDU([TEXT(edu.text) |
setattr(edu_copy, "words", edu.words) |
setattr(edu_copy, "tags", edu.tags) |
edus.append(edu_copy) |
strips.append(edus) |
parser.parse(edus) |
parses.append(parse) |
parse_eval(parses, golds) |
model_score(scores) |
sum(score[2] for score in scores) |
main(args) |
random.seed(args.seed) |
torch.manual_seed(args.seed) |
np.random.seed(args.seed) |
CDTB(args.data, "TRAIN", "VALIDATE", "TEST", ctb_dir=args.ctb_dir, preprocess=True, cache_dir=args.cache_dir) |
build_vocab(cdtb.train) |
numericalize(cdtb.train, word_vocab, pos_vocab, nuc_label, rel_label) |
logging.info("num of instances trainset: %d" % len(trainset) |
logging.info("args: %s" % str(args) |
model.cuda() |
logging.info("model:\n%s" % str(model) |
optim.Adam(model.parameters() |
SummaryWriter(args.log_dir) |
logging.info("hint: run 'tensorboard --logdir %s' to observe training status" % args.log_dir) |
range(1, args.epoch + 1) |
gen_batch_iter(trainset, args.batch_size, args.use_gpu) |
enumerate(batch_iter, start=1) |
model.train() |
optimizer.zero_grad() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.