mailychee commited on
Commit
61fead0
·
verified ·
1 Parent(s): 2aada5b

Upload 2 files

Browse files
Files changed (2) hide show
  1. Evaluate_RAG.ipynb +399 -0
  2. KG_CQR_CoT.ipynb +796 -0
Evaluate_RAG.ipynb ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1492,
6
+ "id": "5ff255f7-7ecf-409c-b45e-2b0ee45308ff",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pandas as pd\n",
11
+ "from tqdm.notebook import tqdm\n",
12
+ "from statistics import mean\n",
13
+ "import string"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "markdown",
18
+ "id": "b52cc255-409c-4f4b-9d86-e95bdc887da2",
19
+ "metadata": {},
20
+ "source": [
21
+ "# Load data"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": null,
27
+ "id": "c4125362-08c9-454b-b3f0-eb991c94359a",
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "res = pd.read_excel(\"result-here\")\n",
32
+ "res.head()"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "code",
37
+ "execution_count": 1724,
38
+ "id": "651aec9e-1141-4818-9a6a-60c7b4c17df2",
39
+ "metadata": {},
40
+ "outputs": [],
41
+ "source": [
42
+ "answers = res[\"Answer\"].tolist()\n",
43
+ "labels = res[\"Label\"].tolist()\n",
44
+ "n_thought = res[\"n_CoT\"].tolist()"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "markdown",
49
+ "id": "c2cc7168-91ab-4942-b178-80c69d9d71f7",
50
+ "metadata": {},
51
+ "source": [
52
+ "# Evaluate"
53
+ ]
54
+ },
55
+ {
56
+ "cell_type": "markdown",
57
+ "id": "b439a16d-6401-418e-9f12-63d692c06b31",
58
+ "metadata": {},
59
+ "source": [
60
+ "## F1_score"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": 1725,
66
+ "id": "8a88165d-ae20-47d1-b6ef-f6c3b2013993",
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "def precision(answer, label):\n",
71
+ " answer_tokens = set(answer.lower().split())\n",
72
+ " label_tokens = set(label.lower().split())\n",
73
+ "\n",
74
+ " intersection = answer_tokens & label_tokens\n",
75
+ " precision = len(intersection) / len(answer_tokens) if len(answer_tokens) > 0 else 0\n",
76
+ "\n",
77
+ " return precision\n",
78
+ "\n",
79
+ "def recall(answer, label):\n",
80
+ " answer_tokens = set(answer.lower().split())\n",
81
+ " label_tokens = set(label.lower().split())\n",
82
+ "\n",
83
+ " intersection = answer_tokens & label_tokens\n",
84
+ " recall = len(intersection) / len(label_tokens) if len(label_tokens) > 0 else 0\n",
85
+ "\n",
86
+ " return recall\n",
87
+ "\n",
88
+ "def f1(answer, label):\n",
89
+ " prec = precision(answer, label)\n",
90
+ " rec = recall(answer, label)\n",
91
+ "\n",
92
+ " if prec == 0 and rec == 0: return 0\n",
93
+ " return 2*prec*rec/(prec+rec)\n",
94
+ "\n",
95
+ "def evaluate_f1(answers, labels):\n",
96
+ " avg_f1 = []\n",
97
+ " for i in range(len(answers)):\n",
98
+ " f1_score = f1(answers[i], labels[i])\n",
99
+ " avg_f1.append(f1_score)\n",
100
+ " return mean(avg_f1)"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": null,
106
+ "id": "a82a603f-480f-419c-bcbd-408578ee4bc5",
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "p, r = [], []\n",
111
+ "for i in range(len(answers)):\n",
112
+ " ans, lab = answers[i], labels[i]\n",
113
+ " p.append(precision(ans, lab))\n",
114
+ " r.append(recall(ans, lab))\n",
115
+ "print(f\"Precision: {mean(p)}\")\n",
116
+ "print(f\"Recall: {mean(r)}\")"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": null,
122
+ "id": "1a48ff64-2514-45f0-91e2-8698cf12e623",
123
+ "metadata": {},
124
+ "outputs": [],
125
+ "source": [
126
+ "evaluate_f1(answers, labels)"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": null,
132
+ "id": "b63bfe40-35ac-440d-8a07-8cd11f0e8abf",
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "mean(n_thought)"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "markdown",
141
+ "id": "a6f5e95f-2622-4e0f-9088-5ab5b66583ce",
142
+ "metadata": {},
143
+ "source": [
144
+ "## GPT Score\n"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "markdown",
149
+ "id": "0c76b795-e4ff-4f3f-9707-dc690aafc75f",
150
+ "metadata": {},
151
+ "source": [
152
+ "### Initialize LLM"
153
+ ]
154
+ },
155
+ {
156
+ "cell_type": "code",
157
+ "execution_count": 1730,
158
+ "id": "2a131281-51d9-41b6-ad30-309454d93a8e",
159
+ "metadata": {},
160
+ "outputs": [],
161
+ "source": [
162
+ "from transformers import AutoTokenizer\n",
163
+ "from langchain_community.llms import VLLMOpenAI\n",
164
+ "from langchain_openai import ChatOpenAI"
165
+ ]
166
+ },
167
+ {
168
+ "cell_type": "code",
169
+ "execution_count": 1731,
170
+ "id": "fd700efa-2e90-49e1-934c-e14e9f7357b4",
171
+ "metadata": {},
172
+ "outputs": [],
173
+ "source": [
174
+ "inference_server_url = \"your_inference_server_url\"\n",
175
+ "tokenizer = AutoTokenizer.from_pretrained(\"Qwen2.5-7B-Instruct\")\n",
176
+ "\n",
177
+ "### For Chat OpenAI template\n",
178
+ "llm = ChatOpenAI(\n",
179
+ " model=\"Qwen2.5-7B-Instruct\",\n",
180
+ " openai_api_key=\"test\",\n",
181
+ " openai_api_base=inference_server_url,\n",
182
+ " temperature=0,\n",
183
+ " max_tokens=100,\n",
184
+ " streaming= False\n",
185
+ ")"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "markdown",
190
+ "id": "97fba228-9846-486f-8d3f-8648afd42b27",
191
+ "metadata": {},
192
+ "source": [
193
+ "### Metrics implementation"
194
+ ]
195
+ },
196
+ {
197
+ "cell_type": "code",
198
+ "execution_count": 1733,
199
+ "id": "5f6dc8cd-9d93-4a05-a9ed-ec40d74b3097",
200
+ "metadata": {},
201
+ "outputs": [],
202
+ "source": [
203
+ "class Correctness(BaseModel):\n",
204
+ " \"\"\"Correctness score ranges from 1-5 to evaluate whether the generated answer aligns with the reference answer\"\"\"\n",
205
+ " correctness_score: int = Field(\n",
206
+ " description=\"The correctness of generated answer compares to reference, score ranges from 1-5\"\n",
207
+ " )\n",
208
+ " \n",
209
+ "class Faithfulness(BaseModel):\n",
210
+ " \"\"\"Faithfulness score ranges from 1-5 to check whether the generated answer remains true to the given context\"\"\"\n",
211
+ " faithfulness_score: int = Field(\n",
212
+ " description=\"The generated answer remains true to the given context, score ranges from 1-5\"\n",
213
+ " )\n",
214
+ "\n",
215
+ "class Relevancy(BaseModel):\n",
216
+ " \"\"\"Relevancy score ranges from 1-5 to check whether the retrieved context and the generated answer relevant to the query\"\"\"\n",
217
+ " relevancy_score: int = Field(\n",
218
+ " description=\"The retrieved context and the generated answer relevant to the query, score ranges from 1-5\"\n",
219
+ " )\n"
220
+ ]
221
+ },
222
+ {
223
+ "cell_type": "code",
224
+ "execution_count": 1734,
225
+ "id": "da8d2984-2d1c-473e-8e8e-13e5dda6c6aa",
226
+ "metadata": {},
227
+ "outputs": [],
228
+ "source": [
229
+ "def correctness_evaluation(query, answer, label):\n",
230
+ " system_prompt = (\n",
231
+ " \"You are a judge. Your task is to evaluate whether the provided answer aligns with the label, given the query, \"\n",
232
+ " \"by assigning a score strictly based on the following rubric (score must be 1, 2, 3, 4, or 5):\\n\\n\"\n",
233
+ " \"Score Rubric:\\n\"\n",
234
+ " \"1: If the generated answer is not relevant to the user query and reference label.\\n\"\n",
235
+ " \"2: If the generated answer aligns with the reference label but is not relevant to the user query.\\n\"\n",
236
+ " \"3: If the generated answer is relevant to the user query and reference label but contains mistakes.\\n\"\n",
237
+ " \"4: If the generated answer is relevant to the user query and has the exact same metrics as the reference label, \"\n",
238
+ " \"but it is not as concise.\\n\"\n",
239
+ " \"5: If the generated answer is relevant to the user query and fully correct according to the reference label.\\n\\n\"\n",
240
+ " \"Important Notes:\\n\"\n",
241
+ " \"- Only evaluate based on commonalities between the answer and the label.\\n\"\n",
242
+ " \"- Do not penalize for elements present in the label but missing in the answer.\\n\"\n",
243
+ " \"\\n\"\n",
244
+ " \"Only return the score (1, 2, 3, 4, or 5). Do not generate any other text, such as explanations or openings/closings.\"\n",
245
+ " )\n",
246
+ " chat_template_contextual = tokenizer.apply_chat_template(\n",
247
+ " [\n",
248
+ " {\"role\": \"system\", \"content\": system_prompt},\n",
249
+ " {\"role\": \"user\", \"content\": f\"\\nQuery: {query}\\nAnswer: {answer}\\nLabel: {label}\"}\n",
250
+ " ],\n",
251
+ " tokenize=False,\n",
252
+ " add_generation_prompt=True\n",
253
+ " )\n",
254
+ " prompt_gen_answer = PromptTemplate(\n",
255
+ " template=chat_template_contextual, \n",
256
+ " input_variables=[\"system_prompt\", \"query\", \"answer\", \"label\"]\n",
257
+ " )\n",
258
+ " \n",
259
+ " structured_check_content = llm.with_structured_output(Correctness)\n",
260
+ " chain_gen_answer = prompt_gen_answer | structured_check_content\n",
261
+ " final_score = chain_gen_answer.invoke({\n",
262
+ " \"system_prompt\": system_prompt, \n",
263
+ " \"query\": query, \n",
264
+ " \"answer\": answer, \n",
265
+ " \"label\": label\n",
266
+ " }).correctness_score\n",
267
+ " \n",
268
+ " return final_score\n",
269
+ "\n",
270
+ "def faithfulness_evaluation(answer, context):\n",
271
+ " system_prompt = (\n",
272
+ " \"You are a judge. Your task is to evaluate whether the provided answer remains true and faithful \"\n",
273
+ " \"to the given context by assigning a score strictly based on the following rubric:\\n\\n\"\n",
274
+ " \"Score Rubric:\\n\"\n",
275
+ " \"- Score 1: The answer is completely unfaithful and contradicts the context.\\n\"\n",
276
+ " \"- Score 2: The answer contains mostly false information or is unsupported by the context, with only minor overlaps.\\n\"\n",
277
+ " \"- Score 3: The answer is partially faithful, with some alignment to the context but contains notable inaccuracies.\\n\"\n",
278
+ " \"- Score 4: The answer is mostly faithful to the context but may have minor inaccuracies or omissions.\\n\"\n",
279
+ " \"- Score 5: The answer is completely faithful and aligns fully with the context.\\n\\n\"\n",
280
+ " \"Important Notes:\\n\"\n",
281
+ " \"- Only evaluate based on common elements between the answer and the context.\\n\"\n",
282
+ " \"- Do not penalize the answer for missing elements that are present in the context but not in the answer.\\n\\n\"\n",
283
+ " \"Only return the score (1, 2, 3, 4, or 5). Do not generate any additional text, such as explanations or openings/closings.\"\n",
284
+ " )\n",
285
+ " chat_template_contextual = tokenizer.apply_chat_template(\n",
286
+ " [\n",
287
+ " {\"role\": \"system\", \"content\": system_prompt},\n",
288
+ " {\"role\": \"user\", \"content\": f\"Answer: {answer}\\nContext: {context}\"}\n",
289
+ " ],\n",
290
+ " tokenize=False,\n",
291
+ " add_generation_prompt=True\n",
292
+ " )\n",
293
+ " prompt_gen_answer = PromptTemplate(\n",
294
+ " template=chat_template_contextual,\n",
295
+ " input_variables=[\"system_prompt\", \"answer\", \"context\"]\n",
296
+ " )\n",
297
+ " structured_check_content = llm.with_structured_output(Faithfulness)\n",
298
+ " chain_gen_answer = prompt_gen_answer | structured_check_content\n",
299
+ " evaluation_score = chain_gen_answer.invoke({\n",
300
+ " \"system_prompt\": system_prompt,\n",
301
+ " \"answer\": answer,\n",
302
+ " \"context\": context\n",
303
+ " }).faithfulness_score\n",
304
+ " \n",
305
+ " return evaluation_score\n",
306
+ "\n",
307
+ "\n",
308
+ "\n",
309
+ "def relevancy_score(query, context, answer):\n",
310
+ " system_prompt = (\n",
311
+ " \"You are a judge. Your task is to evaluate the relevance of the retrieved context and the generated answer \"\n",
312
+ " \"to the given query. Your evaluation must strictly follow the score rubric below:\\n\\n\"\n",
313
+ " \"Score Rubric:\\n\"\n",
314
+ " \"- Score 1: Both the retrieved context and generated answer are completely irrelevant to the query.\\n\"\n",
315
+ " \"- Score 2: The retrieved context is somewhat related, but the generated answer is irrelevant to the query.\\n\"\n",
316
+ " \"- Score 3: Both the retrieved context and generated answer are somewhat relevant to the query, but not precise.\\n\"\n",
317
+ " \"- Score 4: The retrieved context and generated answer are mostly relevant to the query, with minor inaccuracies.\\n\"\n",
318
+ " \"- Score 5: Both the retrieved context and generated answer are fully relevant and precisely aligned with the query.\\n\\n\"\n",
319
+ " \"Important Notes:\\n\"\n",
320
+ " \"- Only return the score (1, 2, 3, 4, or 5). Do not provide any additional text such as explanations, openings, or closings.\"\n",
321
+ " )\n",
322
+ " chat_template_contextual = tokenizer.apply_chat_template(\n",
323
+ " [\n",
324
+ " {\"role\": \"system\", \"content\": system_prompt},\n",
325
+ " {\"role\": \"user\", \"content\": f\"Query: {query}\\nContext: {context}\\nAnswer: {answer}\"}\n",
326
+ " ],\n",
327
+ " tokenize=False,\n",
328
+ " add_generation_prompt=True\n",
329
+ " )\n",
330
+ " prompt_gen_answer = PromptTemplate(\n",
331
+ " template=chat_template_contextual,\n",
332
+ " input_variables=[\"system_prompt\", \"query\", \"context\", \"answer\"]\n",
333
+ " )\n",
334
+ " structured_check_content = llm.with_structured_output(Relevancy)\n",
335
+ " chain_gen_answer = prompt_gen_answer | structured_check_content\n",
336
+ " relevancy_result = chain_gen_answer.invoke({\n",
337
+ " \"system_prompt\": system_prompt,\n",
338
+ " \"query\": query,\n",
339
+ " \"context\": context,\n",
340
+ " \"answer\": answer\n",
341
+ " }).relevancy_score\n",
342
+ " \n",
343
+ " return relevancy_result\n"
344
+ ]
345
+ },
346
+ {
347
+ "cell_type": "markdown",
348
+ "id": "7926b5c1-2342-476f-8bcd-99c1c2353128",
349
+ "metadata": {},
350
+ "source": [
351
+ "### Execution"
352
+ ]
353
+ },
354
+ {
355
+ "cell_type": "code",
356
+ "execution_count": 1783,
357
+ "id": "b3bdd014-d366-4d2d-bd7c-85d8459c48a4",
358
+ "metadata": {},
359
+ "outputs": [],
360
+ "source": [
361
+ "def run_evaluate(tasks):\n",
362
+ " query, label, answer, context = tasks[0], tasks[1], tasks[2], tasks[3]\n",
363
+ " try:\n",
364
+ " corr = correctness_evaluation(query, answer, label)\n",
365
+ " faith = faithfulness_evaluation(answer, context)\n",
366
+ " rele = relevancy_score(query, context, answer)\n",
367
+ " result = {\"Correctness\": corr, \"Faithfulness\": faith, \"Relevancy\":rele}\n",
368
+ "\n",
369
+ " return result\n",
370
+ " except Exception as e:\n",
371
+ " print(f\"Error occurred during processing question '{query}': {e}\")\n",
372
+ " return None\n",
373
+ " \n",
374
+ " "
375
+ ]
376
+ }
377
+ ],
378
+ "metadata": {
379
+ "kernelspec": {
380
+ "display_name": "Python 3 (ipykernel)",
381
+ "language": "python",
382
+ "name": "python3"
383
+ },
384
+ "language_info": {
385
+ "codemirror_mode": {
386
+ "name": "ipython",
387
+ "version": 3
388
+ },
389
+ "file_extension": ".py",
390
+ "mimetype": "text/x-python",
391
+ "name": "python",
392
+ "nbconvert_exporter": "python",
393
+ "pygments_lexer": "ipython3",
394
+ "version": "3.10.6"
395
+ }
396
+ },
397
+ "nbformat": 4,
398
+ "nbformat_minor": 5
399
+ }
KG_CQR_CoT.ipynb ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "8ffa66cb",
6
+ "metadata": {},
7
+ "source": [
8
+ "## Import libraries"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": null,
14
+ "id": "431c0fdb",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "import os\n",
19
+ "import copy\n",
20
+ "import numpy as np\n",
21
+ "import pickle\n",
22
+ "import pandas as pd\n",
23
+ "import faiss\n",
24
+ "import traceback, time\n",
25
+ "\n",
26
+ "import json\n",
27
+ "import requests\n",
28
+ "from typing import List\n",
29
+ "from langchain_core.embeddings import Embeddings\n",
30
+ "from tqdm.notebook import tqdm\n",
31
+ "\n",
32
+ "from sklearn.metrics.pairwise import cosine_similarity\n",
33
+ "from langchain.prompts import PromptTemplate\n",
34
+ "from typing import Literal\n",
35
+ "import multiprocessing\n",
36
+ "\n",
37
+ "from langchain_core.prompts import ChatPromptTemplate\n",
38
+ "from langchain_core.pydantic_v1 import BaseModel, Field\n",
39
+ "from langchain_openai import ChatOpenAI\n",
40
+ "\n",
41
+ "from rank_bm25 import BM25Okapi\n",
42
+ "from langchain_core.output_parsers import StrOutputParser,JsonOutputParser\n",
43
+ "from multiprocessing import Pool, Manager\n"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "id": "cb97885e",
49
+ "metadata": {},
50
+ "source": [
51
+ "## CALL API ENPOINTS (LLM, EMBEDDING)"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": 1,
57
+ "id": "89a5966f-cda1-4e3f-9f89-ecbe9e4127b8",
58
+ "metadata": {},
59
+ "outputs": [],
60
+ "source": [
61
+ "os.environ['CUDA_VISIBLE_DEVICES'] = \"5\"\n",
62
+ "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY\"\n",
63
+ "os.environ[\"http_proxy\"] = \"\"\n",
64
+ "os.environ[\"https_proxy\"] = \"\""
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "id": "bec9c145",
70
+ "metadata": {},
71
+ "source": [
72
+ "### CALL LLM"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 4,
78
+ "id": "d3423138-d290-42bb-b838-17abcbfde695",
79
+ "metadata": {},
80
+ "outputs": [],
81
+ "source": [
82
+ "from transformers import AutoTokenizer\n",
83
+ "from langchain_community.llms import VLLMOpenAI\n",
84
+ "from langchain_openai import ChatOpenAI\n",
85
+ "\n",
86
+ "\n",
87
+ "inference_server_url = \"your_inference_server_url\"\n",
88
+ "tokenizer = AutoTokenizer.from_pretrained(\"your_tokenizer\")\n",
89
+ "\n",
90
+ "### For Chat OpenAI template\n",
91
+ "llm = ChatOpenAI(\n",
92
+ " model=\"your_model\",\n",
93
+ " openai_api_key=\"test\",\n",
94
+ " openai_api_base=inference_server_url,\n",
95
+ " temperature=0,\n",
96
+ " max_tokens=256,\n",
97
+ " streaming= False\n",
98
+ ")"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "markdown",
103
+ "id": "205f37b4",
104
+ "metadata": {},
105
+ "source": [
106
+ "### Embedding\n"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": 6,
112
+ "id": "a0ae425c-e4f9-4b7d-a0d0-6614fc00fb1f",
113
+ "metadata": {},
114
+ "outputs": [],
115
+ "source": [
116
+ "class CustomAPIEmbeddings(Embeddings):\n",
117
+ " def __init__(self, api_url: str, show_progress:bool): \n",
118
+ " self.api_url = api_url\n",
119
+ " self.show_progress = show_progress\n",
120
+ "\n",
121
+ " def embed_documents(self, texts: List[str]) -> List[List[float]]:\n",
122
+ " lst_embedding = []\n",
123
+ " if self.show_progress: # for tqdm embedding\n",
124
+ " for query in tqdm(texts):\n",
125
+ " payload = json.dumps({\n",
126
+ " \"query\": query\n",
127
+ " })\n",
128
+ " headers = {\n",
129
+ " 'Content-Type': 'application/json'\n",
130
+ " }\n",
131
+ " try:\n",
132
+ " response = json.loads(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)['embedding']\n",
133
+ " except:\n",
134
+ " print(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)\n",
135
+ " lst_embedding.append(response)\n",
136
+ " else:\n",
137
+ " for query in texts:\n",
138
+ " payload = json.dumps({\n",
139
+ " \"query\": query\n",
140
+ " })\n",
141
+ " headers = {\n",
142
+ " 'Content-Type': 'application/json'\n",
143
+ " }\n",
144
+ " try:\n",
145
+ " response = json.loads(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)['embedding']\n",
146
+ " except:\n",
147
+ " print(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)\n",
148
+ " lst_embedding.append(response)\n",
149
+ " \n",
150
+ " return lst_embedding # Adjust this based on the response format of your API\n",
151
+ "\n",
152
+ " def embed_query(self, text: str) -> List[float]:\n",
153
+ " return self.embed_documents([text])[0]\n",
154
+ "embeddings = CustomAPIEmbeddings(api_url='your_api_url', show_progress=False)\n",
155
+ "\n"
156
+ ]
157
+ },
158
+ {
159
+ "cell_type": "markdown",
160
+ "id": "82420213-5a13-44ae-90bd-6844c572bea1",
161
+ "metadata": {},
162
+ "source": [
163
+ "### 1. Load Graph Data"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "markdown",
168
+ "id": "b16aa1b1-4bb8-4336-949a-26a6a015c274",
169
+ "metadata": {},
170
+ "source": [
171
+ "#### Load Data (Triplets, Triplets Relation Embeddings)"
172
+ ]
173
+ },
174
+ {
175
+ "cell_type": "code",
176
+ "execution_count": 8,
177
+ "id": "63915c9b-798e-4ab5-a6ed-c5256d676836",
178
+ "metadata": {
179
+ "scrolled": true
180
+ },
181
+ "outputs": [],
182
+ "source": [
183
+ "with open(\"your-triplets\",'rb') as f:\n",
184
+ " dct_mapping_triplet = pickle.load(f)\n",
185
+ "\n",
186
+ "with open(\"your-triplet-embeddings\",'rb') as f:\n",
187
+ " lst_embedding = pickle.load(f)\n",
188
+ "\n",
189
+ "lst_embedding = np.array(lst_embedding)"
190
+ ]
191
+ },
192
+ {
193
+ "cell_type": "code",
194
+ "execution_count": 12,
195
+ "id": "c3382c9a-ac7a-4eb1-80cd-ce6e3931f36c",
196
+ "metadata": {},
197
+ "outputs": [],
198
+ "source": [
199
+ "df_test = pd.read_csv(\"final_data.csv\")\n",
200
+ "test_data = df_test['question'].tolist()\n",
201
+ "df_test['documents'] = df_test['documents'].map(lambda x : eval(x))"
202
+ ]
203
+ },
204
+ {
205
+ "cell_type": "code",
206
+ "execution_count": 14,
207
+ "id": "4694d341-f160-4528-baf6-5f19871e47eb",
208
+ "metadata": {},
209
+ "outputs": [],
210
+ "source": [
211
+ "faiss_embeddings = lst_embedding.astype('float32')\n",
212
+ "d = faiss_embeddings.shape[1]\n",
213
+ "index = faiss.IndexFlatIP(d)\n",
214
+ "index.add(faiss_embeddings)"
215
+ ]
216
+ },
217
+ {
218
+ "cell_type": "markdown",
219
+ "id": "b9d21a29-19d1-4db9-9043-7311c364f0b3",
220
+ "metadata": {},
221
+ "source": [
222
+ "### 2. Contextxual Question Retrieval (CQR)"
223
+ ]
224
+ },
225
+ {
226
+ "cell_type": "code",
227
+ "execution_count": null,
228
+ "id": "b0a33bc8-4b12-4191-96b8-d311f5c1cdcc",
229
+ "metadata": {},
230
+ "outputs": [],
231
+ "source": [
232
+ "def faiss_cosine(query_vector, k=10):\n",
233
+ " query_vector = query_vector.astype('float32')\n",
234
+ " distances, indices = index.search(query_vector, k)\n",
235
+ " return indices.flatten()\n",
236
+ "\n",
237
+ "def compute_cosine_similarity_chunk(inp):\n",
238
+ " return cosine_similarity(inp['chunk'], inp['vector'])\n",
239
+ "\n",
240
+ "def parallel_cosine_similarity(matrix, vector, n_jobs=128):\n",
241
+ " num_rows = matrix.shape[0]\n",
242
+ " chunk_size = num_rows // n_jobs\n",
243
+ " chunks = [{\"vector\": vector, \"chunk\":matrix[i * chunk_size:(i + 1) * chunk_size]} for i in range(n_jobs - 1)]\n",
244
+ " chunks.append({\"vector\": vector, \"chunk\":matrix[(n_jobs - 1) * chunk_size:]})\n",
245
+ " with multiprocessing.Pool(n_jobs) as pool:\n",
246
+ " results = list(pool.imap(compute_cosine_similarity_chunk, chunks))\n",
247
+ " cosine_similarities = np.vstack(results)\n",
248
+ " return cosine_similarities\n",
249
+ "\n",
250
+ "def query_triplet_topk(query, k=10):\n",
251
+ " query_emb = np.array(embeddings.embed_query(query)).reshape(1,-1)\n",
252
+ " topk_indices_sorted = faiss_cosine(query_emb).tolist()\n",
253
+ " return [dct_mapping_triplet[x] for x in topk_indices_sorted]\n",
254
+ "\n",
255
+ "def query_triplet_threshold(query, threshold=0.8):\n",
256
+ " query_emb = np.array(embeddings.embed_query(query)).reshape(1,-1)\n",
257
+ " similarities = cosine_similarity(query_emb, lst_embedding).flatten()\n",
258
+ " topk_indices = np.where(similarities > threshold)[0]\n",
259
+ " topk_indices_sorted = topk_indices[np.argsort(-similarities[topk_indices])].tolist()\n",
260
+ " return [dct_mapping_triplet[x] for x in topk_indices_sorted]\n",
261
+ "\n",
262
+ "\n",
263
+ "class GradeRelation(BaseModel):\n",
264
+ " \"\"\"Binary score for relevance check on retrieved text.\"\"\"\n",
265
+ " binary_score: str = Field(\n",
266
+ " description=\"The Text is relevant to the question, 'yes' or 'no'\"\n",
267
+ " )\n",
268
+ "\n",
269
+ "class GradeRelationList(BaseModel):\n",
270
+ " \"\"\"List passage index check on retrieved text.\"\"\"\n",
271
+ " passage_idx: str = Field(\n",
272
+ " description=\"The passage index of relevant chunks, seperated by a comma\"\n",
273
+ " )\n",
274
+ "\n",
275
+ "def check_grade(question, text):\n",
276
+ " prompt_text_grader = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing relevance \n",
277
+ " of a retrieved text to a user question. The goal is to filter out erroneous retrievals. \\n\n",
278
+ " Give a binary score 'yes' or 'no' score to indicate whether the text is relevant to the question. \\n\n",
279
+ " Provide the binary score as a JSON with a single key 'score' and no premable or explaination.\n",
280
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
281
+ " Here is the retrieved text: \\n\\n {text} \\n\\n\n",
282
+ " Here is the user question: {question} \\n <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n",
283
+ " \"\"\",\n",
284
+ " input_variables=[\"question\", \"text\"]\n",
285
+ " )\n",
286
+ " structured_llm_grader = llm.with_structured_output(GradeRelation)\n",
287
+ " relation_grader = prompt_text_grader | structured_llm_grader \n",
288
+ " result = relation_grader.invoke({\"question\": question, \"text\": text})\n",
289
+ " return result\n",
290
+ "\n",
291
+ "def check_grade_lst(question, text):\n",
292
+ " prompt_text_grader = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing relevance \n",
293
+ " of a list of retrieved passages to a user question. The goal is to filter out erroneous retrievals. \\n\n",
294
+ " Return only the passage index whether the passage is relevant to the question. \\n\n",
295
+ " Provide the output as a JSON with passage index seperated by a comma and no premable or explaination.\n",
296
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
297
+ " Here is the list of retrieved text: \\n\\n {text} \\n\\n\n",
298
+ " Here is the user question: {question} \\n <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n",
299
+ " \"\"\",\n",
300
+ " input_variables=[\"question\", \"text\"]\n",
301
+ " )\n",
302
+ " structured_llm_grader = llm.with_structured_output(GradeRelationList)\n",
303
+ " relation_grader = prompt_text_grader | structured_llm_grader \n",
304
+ " result = relation_grader.invoke({\"question\": question, \"text\": text})\n",
305
+ " return result\n",
306
+ "\n",
307
+ "\n",
308
+ "def check_relations(question, relations):\n",
309
+ " result = []\n",
310
+ " for rel in relations:\n",
311
+ " check = check_grade(question, rel['r.summary'])\n",
312
+ " if check.binary_score == \"yes\":\n",
313
+ " result.append(rel)\n",
314
+ " return result\n",
315
+ "\n",
316
+ "def format_relations(relations):\n",
317
+ " result = []\n",
318
+ " for rel in relations:\n",
319
+ " formatted_relation = f\"{rel['n']['id']} - {rel['r'][1]} -> {rel['m']['id']}\"\n",
320
+ " result.append(formatted_relation)\n",
321
+ " return result"
322
+ ]
323
+ },
324
+ {
325
+ "cell_type": "code",
326
+ "execution_count": 16,
327
+ "id": "411cb9c3-501f-4e7e-8775-d2910183ad6c",
328
+ "metadata": {},
329
+ "outputs": [],
330
+ "source": [
331
+ "def format_claim(relations):\n",
332
+ " return \"\\n\\n\".join(f\"{idx+1}. {rel['r.summary']}\" for idx, rel in enumerate(relations))\n",
333
+ "\n",
334
+ "def format_triplet(relations):\n",
335
+ " return \"\\n\\n\".join(f\"{idx+1}. ({rel['r'][0]['id']}, {rel['r'][1]}, {rel['r'][2]['id']})\" for idx, rel in enumerate(relations))\n",
336
+ "\n",
337
+ "\n",
338
+ "class contextual_output(BaseModel):\n",
339
+ " \"\"\"contextual summarization for the input question.\"\"\"\n",
340
+ " summary: str = Field(\n",
341
+ " description=\"Concise summary ocontextual information of the input question\"\n",
342
+ " )\n",
343
+ "\n",
344
+ "class contextual_triplets(BaseModel):\n",
345
+ " \"\"\"contextual generation of knowledge subgraph.\"\"\"\n",
346
+ " context: str = Field(\n",
347
+ " description=\"generate concise contextual information based on list of triplets\"\n",
348
+ " )\n",
349
+ " \n",
350
+ "\n",
351
+ "def contextual_question_retrieval(claims):\n",
352
+ " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n",
353
+ " assistant responsible for generating a comprehensive summary of the data provided below.\\n\n",
354
+ " Given the list of claims that may relation with each other. Please write a Concise summary of claims that aim to provide a contextual information.\\n\n",
355
+ " The output just generate a concise summary without any explaination.\\n\n",
356
+ " Please note that if the provided claims are contradictory, please resolve the contradictions and provide a single, coherent summary (no need Here is part)\n",
357
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
358
+ " Here is the list of claims: \\n\\n {claims} \\n\\n\n",
359
+ " \"\"\",\n",
360
+ " input_variables=[\"claims\"]\n",
361
+ " )\n",
362
+ " \n",
363
+ " structured_summary_contextual = llm.with_structured_output(contextual_output)\n",
364
+ " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n",
365
+ " results = contextual_chain.invoke({\"claims\": claims})\n",
366
+ " return results\n",
367
+ "\n",
368
+ "def quick_contextual_question_retrieval(question, claims):\n",
369
+ " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n",
370
+ " assistant responsible for generating a comprehensive summary of the data provided below.\\n\n",
371
+ " Given the question and list of claims that may relation with each other. You have to decide which claims relevant to the question.\\n\n",
372
+ " Please write a Concise summary of relevant claims that aim to provide a contextual information. (IT MUST CONTAINS ONLY RELEVANT CLAIMS).\\n\n",
373
+ " The output just generate a concise summary without any explaination and without combination with the question.\\n\n",
374
+ " Please note that if the provided claims are contradictory, please resolve the contradictions and provide a single, coherent summary (no need Here is part)\n",
375
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
376
+ " Here is the question: \\n\\n {question} \\n\\n\n",
377
+ " Here is the list of claims: \\n\\n {claims} \\n\\n\n",
378
+ " \"\"\",\n",
379
+ " input_variables=[\"question\", \"claims\"]\n",
380
+ " )\n",
381
+ " structured_summary_contextual = llm.with_structured_output(contextual_output)\n",
382
+ " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n",
383
+ " results = contextual_chain.invoke({\"question\":question, \"claims\": claims})\n",
384
+ " return results\n",
385
+ "\n",
386
+ "def contextual_question_retrieval_triplet(triplet):\n",
387
+ " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n",
388
+ " assistant responsible for generating a contexual information based on the list of triplets of a given knowledge graph.\\n\n",
389
+ " Given the knowledge graph contain a list of triplets (entity 1, relation, entity 2), please generate a contextual information, the objective is to represent the triplets information of the knowledge graph into plain text information.\n",
390
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
391
+ " Here is the list of triplets: \\n\\n {triplet} \\n\\n\n",
392
+ " \"\"\",\n",
393
+ " input_variables=[\"triplet\"]\n",
394
+ " )\n",
395
+ " structured_summary_contextual = llm.with_structured_output(contextual_triplets)\n",
396
+ " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n",
397
+ " results = contextual_chain.invoke({\"triplet\": triplet})\n",
398
+ " return results\n",
399
+ "\n",
400
+ "def contextual_question_retrieval_triplet_descriptions_mixed(triplet):\n",
401
+ " prompt_summary_contextual = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful assistant\n",
402
+ " assistant responsible for generating a contexual information based on the list of triplets of a given knowledge graph.\\n\n",
403
+ " Given the knowledge graph contain a list of and their descriptions with the following format: {{(entity 1, relation, entity 2): text description}}\\n\n",
404
+ " Please generate a contextual information, the objective is to represent the triplets information of the knowledge graph into plain text information.\\n\n",
405
+ " Note that the output MUST only contains contextual information without any explanation and opening sentence.\n",
406
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
407
+ " Here are the list of triplets and descriptions: \\n\\n {triplet} \\n\\n\n",
408
+ " \"\"\",\n",
409
+ " input_variables=[\"triplet\"]\n",
410
+ " )\n",
411
+ " \n",
412
+ " structured_summary_contextual = llm.with_structured_output(contextual_triplets)\n",
413
+ " contextual_chain = prompt_summary_contextual | structured_summary_contextual \n",
414
+ " results = contextual_chain.invoke({\"triplet\": triplet})\n",
415
+ " return results\n",
416
+ "\n",
417
+ "\n",
418
+ "def add_context_to_question(question, check_relate=False):\n",
419
+ " global cnt_err \n",
420
+ " relations = query_triplet_topk(question)\n",
421
+ " if check_relate:\n",
422
+ " check_rels = check_relations(question, relations)\n",
423
+ " if check_rels:\n",
424
+ " contextual_summary = contextual_question_retrieval(format_claim(check_rels)).summary\n",
425
+ " else:\n",
426
+ " contextual_summary = \"\"\n",
427
+ " else:\n",
428
+ " try:\n",
429
+ " context = check_grade_lst(question, format_claim(relations)).passage_idx\n",
430
+ " context = [int(x) for x in context.split(\",\")]\n",
431
+ " check_rels = [relations[x-1] for x in context]\n",
432
+ " contextual_summary = contextual_question_retrieval(format_claim(check_rels)).summary\n",
433
+ " except:\n",
434
+ " cnt_err += 1\n",
435
+ " contextual_summary = \"\"\n",
436
+ " question = question + \" with some extra data: \" + contextual_summary\n",
437
+ " return question\n",
438
+ "\n",
439
+ "\n",
440
+ "def format_triplet_mixed(relations):\n",
441
+ " return \"\\n\".join(f\"({rel['n']['id']}, {rel['r'][1]}, {rel['m']['id']}): {rel['r.summary']}\" for idx, rel in enumerate(relations))\n",
442
+ "\n",
443
+ "def add_triplet_context_to_question(question, check_relate=False):\n",
444
+ " global cnt_err\n",
445
+ " global map_triplet\n",
446
+ " relations = query_triplet_topk(question)\n",
447
+ " if check_relate: \n",
448
+ " check_rels = check_relations(question, relations)\n",
449
+ " print(len(check_rels))\n",
450
+ " if check_rels:\n",
451
+ " contextual_summary = contextual_question_retrieval_triplet(format_triplet(check_rels)).context\n",
452
+ " else:\n",
453
+ " contextual_summary = \"\"\n",
454
+ " else: \n",
455
+ " try:\n",
456
+ " a = time.time()\n",
457
+ " context = check_grade_lst(question, format_claim(relations)).passage_idx\n",
458
+ " b = time.time()\n",
459
+ " if context != None:\n",
460
+ " context = [int(x) for x in context.split(\",\")]\n",
461
+ " check_rels = [relations[x-1] for x in context]\n",
462
+ " else:\n",
463
+ " check_rels = []\n",
464
+ " if check_rels == []:\n",
465
+ " contextual_summary = \"\"\n",
466
+ " else:\n",
467
+ " contextual_summary = contextual_question_retrieval_triplet(format_triplet_mixed(check_rels)).context\n",
468
+ " c = time.time()\n",
469
+ " except Exception as e:\n",
470
+ " print(e)\n",
471
+ " cnt_err += 1\n",
472
+ " contextual_summary = \"\"\n",
473
+ " if contextual_summary != \"\":\n",
474
+ " question = question + \" with some extra data: \" + contextual_summary\n",
475
+ " return question"
476
+ ]
477
+ },
478
+ {
479
+ "cell_type": "code",
480
+ "execution_count": 18,
481
+ "id": "9813e40f-26ba-49ca-a9b3-96a00b7ac1d9",
482
+ "metadata": {},
483
+ "outputs": [],
484
+ "source": [
485
+ "lst_triplet_top_k_cos = []\n",
486
+ "for i in tqdm(test_data):\n",
487
+ " lst_triplet_top_k_cos.append(query_triplet_topk(i))\n",
488
+ "map_triplet = {}\n",
489
+ "for i,j in zip(lst_triplet_top_k_cos, test_data):\n",
490
+ " map_triplet[j] = i\n",
491
+ "\n"
492
+ ]
493
+ },
494
+ {
495
+ "cell_type": "markdown",
496
+ "id": "8bdb4e5b-a82b-4f35-b6c1-b468a6783b5b",
497
+ "metadata": {},
498
+ "source": [
499
+ "### 3. CQR for Multi-Step Questions"
500
+ ]
501
+ },
502
+ {
503
+ "cell_type": "markdown",
504
+ "id": "c8e6ab8b-8030-49ab-928a-743a6cc4e7a2",
505
+ "metadata": {},
506
+ "source": [
507
+ "#### 3.1 Loading Data"
508
+ ]
509
+ },
510
+ {
511
+ "cell_type": "code",
512
+ "execution_count": null,
513
+ "id": "888d725d-51c7-43a7-b546-56a17d131274",
514
+ "metadata": {},
515
+ "outputs": [],
516
+ "source": [
517
+ "# BM25\n",
518
+ "with open(\"passages.txt\",\"r\") as f:\n",
519
+ " lst_chunks = f.read().split(\"<endofpassage>\")[:-1]\n",
520
+ "print(len(list(set(lst_chunks))))\n",
521
+ "mapping_chunks = {j:i for i,j in enumerate(list(set(lst_chunks)))}\n",
522
+ "lst_chunks = list(set(lst_chunks))"
523
+ ]
524
+ },
525
+ {
526
+ "cell_type": "markdown",
527
+ "id": "c9351926-e8ec-4da7-bb19-581ee19256eb",
528
+ "metadata": {},
529
+ "source": [
530
+ "#### 3.2 Excuting Baseline - IRCOT\n",
531
+ "ref: https://github.com/stonybrooknlp/ircot"
532
+ ]
533
+ },
534
+ {
535
+ "cell_type": "markdown",
536
+ "id": "d1c635b8-65cd-408e-83d0-33b5e7c30b85",
537
+ "metadata": {},
538
+ "source": [
539
+ "##### 3.2.1 Retrieve Modulus"
540
+ ]
541
+ },
542
+ {
543
+ "cell_type": "code",
544
+ "execution_count": 28,
545
+ "id": "1593312f-a726-4be3-b019-dd627794995b",
546
+ "metadata": {},
547
+ "outputs": [],
548
+ "source": [
549
+ "tokenized_corpus = [doc.split(\" \") for doc in lst_chunks]\n",
550
+ "bm25 = BM25Okapi(tokenized_corpus)"
551
+ ]
552
+ },
553
+ {
554
+ "cell_type": "code",
555
+ "execution_count": 30,
556
+ "id": "70fbc603-05e5-4127-8912-8407c64c4b7c",
557
+ "metadata": {},
558
+ "outputs": [],
559
+ "source": [
560
+ "def retrieval_bm25(question, k):\n",
561
+ " tokenized_query = question.split(\" \")\n",
562
+ " lst_retrieval = bm25.get_top_n(tokenized_query, lst_chunks, n=k)\n",
563
+ " return lst_retrieval"
564
+ ]
565
+ },
566
+ {
567
+ "cell_type": "markdown",
568
+ "id": "58e7b420-b61b-45c2-b3f7-101d852a6ee3",
569
+ "metadata": {},
570
+ "source": [
571
+ "##### 3.2.12 Interleaving Retrieval with Chain-of-Thought Reasoning"
572
+ ]
573
+ },
574
+ {
575
+ "cell_type": "code",
576
+ "execution_count": null,
577
+ "id": "bf623c44-1d2f-47ca-8f3c-78178a73014f",
578
+ "metadata": {
579
+ "scrolled": true
580
+ },
581
+ "outputs": [],
582
+ "source": [
583
+ "def format_docs(docs):\n",
584
+ " return \"\\n\\n\".join(f\"{doc}\" for doc in docs)\n",
585
+ "\n",
586
+ "class GradeRespose(BaseModel):\n",
587
+ " \"\"\"Binary score to determine if the passages provide sufficient information to answer the question directly.\"\"\"\n",
588
+ " binary_score: bool = Field(\n",
589
+ " description=\"The relevant passages provide sufficient information to answer the question directly, 'yes' or 'no'\"\n",
590
+ " )\n",
591
+ "\n",
592
+ "class gen_query(BaseModel):\n",
593
+ " \"\"\"Generate chain-of-thought query for futher research and exploration.\"\"\"\n",
594
+ " new_query: str = Field(\n",
595
+ " description=\"Generate new chain-of-thought query for futher research and exploration\"\n",
596
+ " )\n",
597
+ "\n",
598
+ "def check_response(question, context):\n",
599
+ " prompt_check_response = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an advanced AI assistant skilled in analyzing textual data.\\n\n",
600
+ " Below is a question and relevant passages that may contain information to answer it.\\n\n",
601
+ " Your task is to determine if the provided passages contain enough relevant information to answer the question, even if not directly stated.\\n\n",
602
+ " Consider both direct answers and implied or partially inferred information.\\n\n",
603
+ " Return a binary score: 'True' if the context provides sufficient information to answer the question; 'False' if it does not.\\n\n",
604
+ " Provide only the binary score in JSON format with a single key 'score'. Do not include explanations.\\n\n",
605
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
606
+ " Here is the question: \\n\\n {question} \\n\\n\n",
607
+ " Here is the relevance passages: \\n\\n {context} \\n\\n\n",
608
+ " \n",
609
+ " \"\"\",\n",
610
+ " input_variables=[\"question\", \"context\"]\n",
611
+ " )\n",
612
+ " structured_check_content= llm.with_structured_output(GradeRespose)\n",
613
+ " check_response_chain = prompt_check_response | structured_check_content \n",
614
+ " results = check_response_chain.invoke({\"question\": question ,\"context\": context})\n",
615
+ " return results\n",
616
+ "\n",
617
+ "def gen_question(question, context, previous_though):\n",
618
+ " prompt_gen_answer = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an advanced AI skilled in generating a concise insightful chain-of-thought query to guide further research and exploration.\\n\n",
619
+ " Below is a question and relevant context information and previous failed queries.\\n\n",
620
+ " Your task is to:\\n\n",
621
+ " 1. Analyze the input question to understand its intent and identify gaps in the provided context that prevent a complete answer.\\n\n",
622
+ " 2. Generate a new chain-of-thought query that is based on the input question, incorporating logical steps or deeper aspects of the topic.\\n\n",
623
+ " This new query should be designed to guide further search or inquiry, aiming to bridge the identified gaps and refine the search for an answer.\\n\n",
624
+ " 3. Avoid repeating or rephrasing any of the previous failed queries. Instead, aim to expand the scope or explore different facets of the topic that have not been addressed yet.\\n\n",
625
+ " All JSON MUST in correct format. DO NOT get information from 'Relevant context information' to create new input variables.\n",
626
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
627
+ " Here is the question: \\n\\n {question} \\n\\n\n",
628
+ " Here is the relevance context information: \\n\\n {context} \\n\\n\n",
629
+ " Here is the previous failed queries: \\n\\n {previous_though} \\n\\n\n",
630
+ " \n",
631
+ " \"\"\",\n",
632
+ " input_variables=[\"question\", \"context\", \"previous_though\"]\n",
633
+ " )\n",
634
+ " structured_check_content = llm.with_structured_output(gen_query)\n",
635
+ " chain_gen_answer = prompt_gen_answer | structured_check_content\n",
636
+ " answer = chain_gen_answer.invoke({\"question\": question, \"context\": context, \"previous_though\": previous_though})\n",
637
+ "\n",
638
+ " return answer\n",
639
+ "\n",
640
+ "\n",
641
+ "def final_answer(question, context):\n",
642
+ " prompt_gen_answer = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an expert AI designed to analyze information from retrieval-augumented generation system.\\n\n",
643
+ " Your task is to answer questions based on the input context. Below is a question along with the input context.\\n\n",
644
+ " Make sure your repsonse is consice clear, and directly answer the question in 2-3 sentences WITHOUT any explaination.\\n\n",
645
+ " DO NOT use any external knowledge.\\n\n",
646
+ " If the answer is not directly found in the given context, try to infer the best possible answer based on the given context in 2-3 sentences.\n",
647
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
648
+ " Here is the question: \\n\\n {question} \\n\\n\n",
649
+ " Here is the input context: \\n\\n {context} \\n\\n\n",
650
+ " \n",
651
+ " \"\"\",\n",
652
+ " input_variables=[\"question\", \"context\"]\n",
653
+ " )\n",
654
+ " chain_gen_answer = prompt_gen_answer | llm | StrOutputParser()\n",
655
+ " answer = chain_gen_answer.invoke({\"question\":question, \"context\": context}).strip()\n",
656
+ " return answer\n",
657
+ "\n",
658
+ "def max_length_context(context,threshold=512):\n",
659
+ " res = []\n",
660
+ " for i in context:\n",
661
+ " if len(i.split(\" \")) > threshold:\n",
662
+ " tmp = \" \".join(x for x in i.split(\" \")[:threshold])\n",
663
+ " res.append(tmp)\n",
664
+ " else:\n",
665
+ " res.append(i)\n",
666
+ " return res\n",
667
+ "\n",
668
+ " "
669
+ ]
670
+ },
671
+ {
672
+ "cell_type": "markdown",
673
+ "id": "bb0a38e3-4ca8-4b96-a9b4-8cd45534f2da",
674
+ "metadata": {},
675
+ "source": [
676
+ "# IRCoT Baseline"
677
+ ]
678
+ },
679
+ {
680
+ "cell_type": "code",
681
+ "execution_count": null,
682
+ "id": "76b13928-9551-49a0-a763-cba30eab7815",
683
+ "metadata": {
684
+ "scrolled": true
685
+ },
686
+ "outputs": [],
687
+ "source": [
688
+ "def process_question(tasks):\n",
689
+ " \"\"\"Process a single question.\"\"\"\n",
690
+ " question, label, k, n_loop = tasks[0], tasks[1], tasks[2], tasks[3]\n",
691
+ " try:\n",
692
+ " i = 0\n",
693
+ " thought_q = \"\"\n",
694
+ " pt = []\n",
695
+ " context = max_length_context(retrieval_bm25(question, k))\n",
696
+ " while i < n_loop:\n",
697
+ " check = check_response(question, format_docs(context)).binary_score\n",
698
+ " if check or (not check and i == n_loop - 1):\n",
699
+ " gen_answer = final_answer(question, format_docs(context))\n",
700
+ " break\n",
701
+ " else: \n",
702
+ " new_CoT_query = gen_question(question, format_docs(context), \"\\n\".join(pt)).new_query\n",
703
+ " pt.append(new_CoT_query)\n",
704
+ " thought_q += \"\\n\" + str(i) + \"-\" + new_CoT_query\n",
705
+ " new_context = max_length_context(retrieval_bm25(new_CoT_query, k))\n",
706
+ " context = context + new_context\n",
707
+ " context = list(set(context)) \n",
708
+ " i += 1\n",
709
+ " return {\n",
710
+ " \"Question\": question,\n",
711
+ " \"Answer\": gen_answer,\n",
712
+ " \"Label\": label,\n",
713
+ " \"Context\": context,\n",
714
+ " \"CoT\": thought_q,\n",
715
+ " \"n_CoT\": int(i+1),\n",
716
+ " }\n",
717
+ " except Exception as e:\n",
718
+ " print(f\"Error occurred during processing question '{question}': {e}\")\n",
719
+ " return None\n"
720
+ ]
721
+ },
722
+ {
723
+ "cell_type": "markdown",
724
+ "id": "b48bb3c5-c57a-452d-a9e3-9341ad87c7ae",
725
+ "metadata": {},
726
+ "source": [
727
+ "# IRCoT + KG"
728
+ ]
729
+ },
730
+ {
731
+ "cell_type": "code",
732
+ "execution_count": null,
733
+ "id": "351838d5-2e6d-42eb-a7c4-fd638c917fd2",
734
+ "metadata": {
735
+ "scrolled": true
736
+ },
737
+ "outputs": [],
738
+ "source": [
739
+ "def process_question_KG(tasks):\n",
740
+ " question, label, k, n_loop= tasks[0], tasks[1], tasks[2], tasks[3] # Unpack the arguments\n",
741
+ " \n",
742
+ " try:\n",
743
+ " i = 0\n",
744
+ " thought_q = \"\"\n",
745
+ " pt = []\n",
746
+ " context = max_length_context(retrieval_bm25(add_triplet_context_to_question(question), k))\n",
747
+ " while i < n_loop:\n",
748
+ " check = check_response(question, format_docs(context)).binary_score\n",
749
+ " if check or (not check and i == n_loop - 1):\n",
750
+ " gen_answer = final_answer(question, format_docs(context))\n",
751
+ " break\n",
752
+ " else:\n",
753
+ " new_CoT_query = gen_question(question, format_docs(context), \"\\n\".join(pt)).new_query\n",
754
+ " pt.append(new_CoT_query)\n",
755
+ " thought_q += \"\\n\" + str(i) + \"-\" + new_CoT_query\n",
756
+ " new_context = max_length_context(retrieval_bm25(add_triplet_context_to_question(new_CoT_query), k))\n",
757
+ " context = context + new_context\n",
758
+ " context = list(set(context))\n",
759
+ " i += 1\n",
760
+ " return {\n",
761
+ " \"Question\": question,\n",
762
+ " \"Answer\": gen_answer,\n",
763
+ " \"Label\": label,\n",
764
+ " \"Context\": context,\n",
765
+ " \"CoT\": thought_q,\n",
766
+ " \"n_CoT\": int(i+1),\n",
767
+ " }\n",
768
+ " except Exception as e:\n",
769
+ " print(f\"Error occurred during processing question '{question}': {e}\")\n",
770
+ " return None\n",
771
+ "\n"
772
+ ]
773
+ }
774
+ ],
775
+ "metadata": {
776
+ "kernelspec": {
777
+ "display_name": "Python 3 (ipykernel)",
778
+ "language": "python",
779
+ "name": "python3"
780
+ },
781
+ "language_info": {
782
+ "codemirror_mode": {
783
+ "name": "ipython",
784
+ "version": 3
785
+ },
786
+ "file_extension": ".py",
787
+ "mimetype": "text/x-python",
788
+ "name": "python",
789
+ "nbconvert_exporter": "python",
790
+ "pygments_lexer": "ipython3",
791
+ "version": "3.10.6"
792
+ }
793
+ },
794
+ "nbformat": 4,
795
+ "nbformat_minor": 5
796
+ }