theachyuttiwari commited on
Commit
9b0c357
·
1 Parent(s): 4eeb7ed

Upload kilt_create_dpr_support_docs.py

Browse files
Files changed (1) hide show
  1. kilt_create_dpr_support_docs.py +109 -0
kilt_create_dpr_support_docs.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ import faiss
6
+ import torch
7
+ from datasets import load_dataset, Dataset
8
+ from tqdm.auto import tqdm
9
+ from transformers import AutoTokenizer, DPRQuestionEncoder, DPRContextEncoder
10
+
11
+ from common import articles_to_paragraphs, embed_questions, embed_passages, create_kilt_datapoint, \
12
+ kilt_wikipedia_columns
13
+ from common import kilt_wikipedia_paragraph_columns as columns
14
+
15
+
16
+ def generate_support_docs(args):
17
+ dims = 128
18
+ min_chars_per_passage = 200
19
+ device = ("cuda" if torch.cuda.is_available() else "cpu")
20
+ lfqa = load_dataset("vblagoje/lfqa")
21
+
22
+ ctx_tokenizer = AutoTokenizer.from_pretrained(args.ctx_encoder_name)
23
+ ctx_model = DPRContextEncoder.from_pretrained(args.ctx_encoder_name).to(device)
24
+ _ = ctx_model.eval()
25
+
26
+ question_tokenizer = AutoTokenizer.from_pretrained(args.question_encoder_name)
27
+ question_model = DPRQuestionEncoder.from_pretrained(args.question_encoder_name).to(device)
28
+ _ = question_model.eval()
29
+
30
+ kilt_wikipedia = load_dataset("kilt_wikipedia", split="full")
31
+
32
+ kilt_wikipedia_paragraphs = kilt_wikipedia.map(articles_to_paragraphs, batched=True,
33
+ remove_columns=kilt_wikipedia_columns,
34
+ batch_size=512,
35
+ cache_file_name=f"../data/wiki_kilt_paragraphs_full.arrow",
36
+ desc="Expanding wiki articles into paragraphs")
37
+
38
+ # use paragraphs that are not simple fragments or very short sentences
39
+ # Wikipedia Faiss index needs to fit into a 16 Gb GPU
40
+ kilt_wikipedia_paragraphs = kilt_wikipedia_paragraphs.filter(
41
+ lambda x: (x["end_character"] - x["start_character"]) > min_chars_per_passage)
42
+
43
+ def query_index(question, topk=7):
44
+ topk = topk * 3 # grab 3x results and filter for word count
45
+ question_embedding = embed_questions(question_model, question_tokenizer, [question])
46
+ scores, wiki_passages = kilt_wikipedia_paragraphs.get_nearest_examples("embeddings", question_embedding, k=topk)
47
+
48
+ retrieved_examples = []
49
+ r = list(zip(wiki_passages[k] for k in columns))
50
+ for i in range(topk):
51
+ retrieved_examples.append({k: v for k, v in zip(columns, [r[j][0][i] for j in range(len(columns))])})
52
+
53
+ return retrieved_examples
54
+
55
+ def create_support_doc(dataset: Dataset, output_filename: str):
56
+ progress_bar = tqdm(range(len(dataset)), desc="Creating supporting docs")
57
+
58
+ with open(output_filename, "w") as fp:
59
+ for example in dataset:
60
+ wiki_passages = query_index(example["title"])
61
+ kilt_dp = create_kilt_datapoint(example, columns, wiki_passages)
62
+ json.dump(kilt_dp, fp)
63
+ fp.write("\n")
64
+ progress_bar.update(1)
65
+
66
+ if not os.path.isfile(args.index_file_name):
67
+ def embed_passages_for_retrieval(examples):
68
+ return embed_passages(ctx_model, ctx_tokenizer, examples, max_length=128)
69
+
70
+ paragraphs_embeddings = kilt_wikipedia_paragraphs.map(embed_passages_for_retrieval,
71
+ batched=True, batch_size=512,
72
+ cache_file_name=args.encoded_kilt_file_name,
73
+ desc="Creating faiss index")
74
+
75
+ paragraphs_embeddings.add_faiss_index(column="embeddings", custom_index=faiss.IndexFlatIP(dims))
76
+ paragraphs_embeddings.save_faiss_index("embeddings", args.index_file_name)
77
+
78
+ kilt_wikipedia_paragraphs.load_faiss_index("embeddings", args.index_file_name, device=0)
79
+ create_support_doc(lfqa["train"], "lfqa_dpr_train_precomputed_dense_docs.json")
80
+ create_support_doc(lfqa["validation"], "lfqa_dpr_validation_precomputed_dense_docs.json")
81
+
82
+
83
+ if __name__ == "__main__":
84
+ parser = argparse.ArgumentParser(description="Creates support docs for seq2seq model training")
85
+ parser.add_argument(
86
+ "--ctx_encoder_name",
87
+ default="vblagoje/dpr-ctx_encoder-single-lfqa-base",
88
+ help="Question encoder to use",
89
+ )
90
+ parser.add_argument(
91
+ "--question_encoder_name",
92
+ default="vblagoje/dpr-question_encoder-single-lfqa-base",
93
+ help="Question encoder to use",
94
+ )
95
+
96
+ parser.add_argument(
97
+ "--index_file_name",
98
+ default="../data/kilt_dpr_wikipedia_first.faiss",
99
+ help="Faiss index with passage embeddings",
100
+ )
101
+
102
+ parser.add_argument(
103
+ "--encoded_kilt_file_name",
104
+ default="../data/kilt_embedded.arrow",
105
+ help="Encoded KILT file name",
106
+ )
107
+
108
+ main_args, _ = parser.parse_known_args()
109
+ generate_support_docs(main_args)