mailychee commited on
Commit
039fb3a
·
verified ·
1 Parent(s): 4ef2dfc

Upload sgcp (1).ipynb

Browse files
Files changed (1) hide show
  1. sgcp (1).ipynb +849 -0
sgcp (1).ipynb ADDED
@@ -0,0 +1,849 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "c456b0f8-4c68-495d-b2e1-7cffc10728e2",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from collections import defaultdict, deque\n",
11
+ "import numpy as np\n",
12
+ "import pandas as pd\n",
13
+ "from tqdm.notebook import tqdm\n",
14
+ "from sklearn.metrics.pairwise import cosine_similarity\n",
15
+ "from sentence_transformers import SentenceTransformer\n",
16
+ "from langchain.prompts import PromptTemplate\n",
17
+ "from langchain_core.pydantic_v1 import BaseModel, Field\n",
18
+ "from multiprocessing import Pool\n",
19
+ "import pickle\n",
20
+ "import faiss\n",
21
+ "import warnings\n",
22
+ "warnings.filterwarnings(\"ignore\")"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "markdown",
27
+ "id": "7ca0f819-3674-4add-ba3b-c793b79e6ed2",
28
+ "metadata": {},
29
+ "source": [
30
+ "# LLM"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "id": "3eacd9d7-6978-4ac4-92f2-ed64689483ff",
37
+ "metadata": {},
38
+ "outputs": [],
39
+ "source": [
40
+ "from transformers import AutoTokenizer\n",
41
+ "from langchain_community.llms import VLLMOpenAI\n",
42
+ "from langchain_openai import ChatOpenAI\n",
43
+ "\n",
44
+ "\n",
45
+ "inference_server_url = \"http://localhost:8100/v1\"\n",
46
+ "tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Meta-Llama-3.1-8B-Instruct\")\n",
47
+ "\n",
48
+ "### For Chat OpenAI template\n",
49
+ "llm = ChatOpenAI(\n",
50
+ " model=\"Llama-3.1-8B-Instruct\",\n",
51
+ " openai_api_key=\"EMPTY\",\n",
52
+ " openai_api_base=inference_server_url,\n",
53
+ " temperature=0,\n",
54
+ " streaming= False\n",
55
+ ")"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": null,
61
+ "id": "454db217-6968-4373-8b7c-76e7ee164031",
62
+ "metadata": {
63
+ "scrolled": true
64
+ },
65
+ "outputs": [],
66
+ "source": [
67
+ "llm.invoke(\"Hello\")"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "markdown",
72
+ "id": "f32f7238-f817-4fad-b261-0d99bc752365",
73
+ "metadata": {},
74
+ "source": [
75
+ "# Embedding"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": null,
81
+ "id": "22c0f930-a871-4f59-a22a-19da9e0c9d4e",
82
+ "metadata": {},
83
+ "outputs": [],
84
+ "source": [
85
+ "import json\n",
86
+ "import requests\n",
87
+ "from typing import List\n",
88
+ "from langchain_core.embeddings import Embeddings\n",
89
+ "from tqdm.notebook import tqdm\n",
90
+ "\n",
91
+ "class CustomAPIEmbeddings(Embeddings):\n",
92
+ " def __init__(self, api_url: str, show_progress: bool):\n",
93
+ " self.api_url = api_url\n",
94
+ " self.show_progress = show_progress\n",
95
+ "\n",
96
+ " def embed_documents(self, texts: List[str]) -> List[List[float]]:\n",
97
+ " lst_embedding = []\n",
98
+ " if self.show_progress: # for tqdm embedding\n",
99
+ " for query in tqdm(texts):\n",
100
+ " payload = json.dumps({\n",
101
+ " \"inputs\": query\n",
102
+ " })\n",
103
+ " headers = {\n",
104
+ " 'Content-Type': 'application/json'\n",
105
+ " }\n",
106
+ " try:\n",
107
+ " response = json.loads(\n",
108
+ " requests.request(\"POST\", self.api_url, headers=headers, data=payload).text\n",
109
+ " )\n",
110
+ " lst_embedding.append(response)\n",
111
+ " except Exception as e:\n",
112
+ " print(e)\n",
113
+ " print(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)\n",
114
+ " else:\n",
115
+ " for query in texts:\n",
116
+ " payload = json.dumps({\n",
117
+ " \"inputs\": query\n",
118
+ " })\n",
119
+ " headers = {\n",
120
+ " 'Content-Type': 'application/json'\n",
121
+ " }\n",
122
+ " try:\n",
123
+ " response = json.loads(\n",
124
+ " requests.request(\"POST\", self.api_url, headers=headers, data=payload).text\n",
125
+ " )\n",
126
+ " lst_embedding.append(response)\n",
127
+ " except Exception as e:\n",
128
+ " print(e)\n",
129
+ " # print(requests.request(\"POST\", self.api_url, headers=headers, data=payload).text)\n",
130
+ "\n",
131
+ " return lst_embedding\n",
132
+ "\n",
133
+ " def embed_query(self, text: str) -> List[float]:\n",
134
+ " return self.embed_documents([text])[0]\n",
135
+ "\n",
136
+ "# Instantiate\n",
137
+ "embeddings = CustomAPIEmbeddings(api_url='http://localhost:8081/embed', show_progress=False)\n"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "code",
142
+ "execution_count": null,
143
+ "id": "4d521cc3-1289-4989-829e-f27cd6bcde05",
144
+ "metadata": {},
145
+ "outputs": [],
146
+ "source": [
147
+ "from transformers import AutoTokenizer\n",
148
+ "\n",
149
+ "tokenizer = AutoTokenizer.from_pretrained(\"BAAI/bge-large-en-v1.5\")"
150
+ ]
151
+ },
152
+ {
153
+ "cell_type": "markdown",
154
+ "id": "f5269674-4496-45ad-81f5-5fcfb7665362",
155
+ "metadata": {},
156
+ "source": [
157
+ "# Load data"
158
+ ]
159
+ },
160
+ {
161
+ "cell_type": "code",
162
+ "execution_count": null,
163
+ "id": "27ab0473-ad8e-4070-a972-ef4eb003f55c",
164
+ "metadata": {},
165
+ "outputs": [],
166
+ "source": [
167
+ "df = pd.read_csv(\"final_data.csv\")\n",
168
+ "questions = df[\"question\"].to_list()"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": null,
174
+ "id": "80852f40-af74-42fd-af20-de3aa4bca33c",
175
+ "metadata": {},
176
+ "outputs": [],
177
+ "source": [
178
+ "df['question'].duplicated().sum()\n"
179
+ ]
180
+ },
181
+ {
182
+ "cell_type": "code",
183
+ "execution_count": null,
184
+ "id": "adc6c054-5370-4aee-821d-f615c7acc791",
185
+ "metadata": {
186
+ "scrolled": true
187
+ },
188
+ "outputs": [],
189
+ "source": [
190
+ "df[df['question'].duplicated()]['question'].value_counts()\n",
191
+ "df[df[\"question\"]==\"What does the table show?\"]\n"
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": null,
197
+ "id": "3675ffaa-1606-4f51-86b3-19baa58b797e",
198
+ "metadata": {},
199
+ "outputs": [],
200
+ "source": [
201
+ "with open(\"map_triplet_rb.pkl\",'rb') as f:\n",
202
+ " dct_mapping_triplet = pickle.load(f)\n",
203
+ "with open(\"embedded_ragbench_clean.pkl\",'rb') as f:\n",
204
+ " lst_embedding = pickle.load(f)"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": null,
210
+ "id": "23313f6c-300f-4e12-b883-fc6df677a4ca",
211
+ "metadata": {},
212
+ "outputs": [],
213
+ "source": [
214
+ "lst_embedding.shape"
215
+ ]
216
+ },
217
+ {
218
+ "cell_type": "code",
219
+ "execution_count": null,
220
+ "id": "1c867ac2-14a4-4eb8-8af9-4cfdbf4c42fe",
221
+ "metadata": {},
222
+ "outputs": [],
223
+ "source": [
224
+ "faiss_embeddings = lst_embedding.astype('float32')\n",
225
+ "d = faiss_embeddings.shape[1] # dimension\n",
226
+ "index = faiss.IndexFlatL2(d) # L2 distance index\n",
227
+ "index.add(faiss_embeddings) # add embeddings"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "markdown",
232
+ "id": "d38bd5e9-bed9-4213-9b84-d36f44df6064",
233
+ "metadata": {},
234
+ "source": [
235
+ "# Functions"
236
+ ]
237
+ },
238
+ {
239
+ "cell_type": "code",
240
+ "execution_count": null,
241
+ "id": "4b251e36-6567-40f7-bb32-334e5b342317",
242
+ "metadata": {},
243
+ "outputs": [],
244
+ "source": [
245
+ "def faiss_cosine(query_vector, k=10):\n",
246
+ " query_vector = query_vector.astype('float32')\n",
247
+ " distances, indices = index.search(query_vector, k)\n",
248
+ " return indices.flatten()\n",
249
+ "\t\n",
250
+ "def query_triplet_topk(query, k=10):\n",
251
+ "\tt = tokenizer.encode(query)\n",
252
+ "\tif len(t) > 512:\n",
253
+ "\t\tt = t[:500]\n",
254
+ "\t\tquery = tokenizer.decode(t)\n",
255
+ "\tquery_emb = np.array(embeddings.embed_query(query)).reshape(1,-1)\n",
256
+ "\ttopk_indices_sorted = faiss_cosine(query_emb).tolist()\n",
257
+ "\treturn [dct_mapping_triplet[x] for x in topk_indices_sorted]\n",
258
+ "\n",
259
+ "def format_claim(relations):\n",
260
+ " for rel in relations:\n",
261
+ " rel['r.summary'] = rel['r.summary'].split(\"\\n\\n\")[-1]\n",
262
+ " # return \"\\n\\n\".join(f\"[{i+1}] {doc.page_content}\" for i, doc in enumerate(docs))\n",
263
+ " return \"\\n\\n\".join(f\"{idx+1}. {rel['r.summary']}\" for idx, rel in enumerate(relations))\n",
264
+ "\n",
265
+ "class GradeRelationList(BaseModel):\n",
266
+ " \"\"\"List passage index check on retrieved text.\"\"\"\n",
267
+ " passage_idx: str = Field(\n",
268
+ " description=\"The passage index of relevant chunks, seperated by a comma\"\n",
269
+ " )\n",
270
+ "\t\n",
271
+ "def check_grade_lst(question, text):\n",
272
+ " prompt_text_grader = PromptTemplate(template=\"\"\"<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing relevance \n",
273
+ " of a list of retrieved passages to a user question. The goal is to filter out erroneous retrievals. \\n\n",
274
+ " Return only the passage index whether the passage is relevant to the question. \\n\n",
275
+ " Provide the output as a JSON with passage index seperated by a comma and no premable or explaination.\n",
276
+ " <|eot_id|><|start_header_id|>user<|end_header_id|>\n",
277
+ " Here is the list of retrieved text: \\n\\n {text} \\n\\n\n",
278
+ " Here is the user question: {question} \\n <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n",
279
+ " \"\"\",\n",
280
+ " input_variables=[\"question\", \"text\"]\n",
281
+ " )\n",
282
+ " structured_llm_grader = llm.with_structured_output(GradeRelationList)\n",
283
+ " relation_grader = prompt_text_grader | structured_llm_grader \n",
284
+ " result = relation_grader.invoke({\"question\": question, \"text\": text})\n",
285
+ " # print(result)\n",
286
+ " return result"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "code",
291
+ "execution_count": null,
292
+ "id": "82f27027-7a59-4321-a2ec-40fb20304476",
293
+ "metadata": {
294
+ "scrolled": true
295
+ },
296
+ "outputs": [],
297
+ "source": [
298
+ "query_emb = np.array(embeddings.embed_query(questions[1])).reshape(1,-1)\n",
299
+ "topk_indices_sorted = faiss_cosine(query_emb).tolist()\n",
300
+ "topk_indices_sorted"
301
+ ]
302
+ },
303
+ {
304
+ "cell_type": "code",
305
+ "execution_count": null,
306
+ "id": "521d7e93-71df-48e2-9fbc-387053845ab2",
307
+ "metadata": {},
308
+ "outputs": [],
309
+ "source": [
310
+ "dct_mapping_triplet[36542]"
311
+ ]
312
+ },
313
+ {
314
+ "cell_type": "code",
315
+ "execution_count": null,
316
+ "id": "9a9ce274-24af-4b33-aca5-58b3d9c5d905",
317
+ "metadata": {},
318
+ "outputs": [],
319
+ "source": [
320
+ "# Query top 10 triplet\n",
321
+ "# lst_triplet_top_k_cos = []\n",
322
+ "# for i in tqdm(questions):\n",
323
+ "# lst_triplet_top_k_cos.append(query_triplet_topk(i))"
324
+ ]
325
+ },
326
+ {
327
+ "cell_type": "code",
328
+ "execution_count": null,
329
+ "id": "cceb5693-5c83-4175-8362-1b7a2ab442f7",
330
+ "metadata": {},
331
+ "outputs": [],
332
+ "source": [
333
+ "# with open(\"top10_rb.pkl\", \"wb\") as f:\n",
334
+ "# \tpickle.dump(lst_triplet_top_k_cos, f)"
335
+ ]
336
+ },
337
+ {
338
+ "cell_type": "code",
339
+ "execution_count": null,
340
+ "id": "34a972ea-a23b-41c1-a80b-8be47247fac0",
341
+ "metadata": {},
342
+ "outputs": [],
343
+ "source": [
344
+ "with open(\"top10_rb.pkl\", \"rb\") as f:\n",
345
+ "\tlst_triplet_top_k_cos = pickle.load(f)"
346
+ ]
347
+ },
348
+ {
349
+ "cell_type": "code",
350
+ "execution_count": null,
351
+ "id": "0d5651de-27a3-430c-865b-a0c8d54dfdbb",
352
+ "metadata": {},
353
+ "outputs": [],
354
+ "source": [
355
+ "map_triplet = {}\n",
356
+ "for i,j in zip(lst_triplet_top_k_cos, questions):\n",
357
+ " map_triplet[j] = i"
358
+ ]
359
+ },
360
+ {
361
+ "cell_type": "code",
362
+ "execution_count": null,
363
+ "id": "16c6f900-a8f2-473a-9629-bdbd565cf6ce",
364
+ "metadata": {},
365
+ "outputs": [],
366
+ "source": [
367
+ "len(questions)"
368
+ ]
369
+ },
370
+ {
371
+ "cell_type": "code",
372
+ "execution_count": null,
373
+ "id": "e7e7dfd6-983a-4fb8-91b1-34b23b8faa14",
374
+ "metadata": {},
375
+ "outputs": [],
376
+ "source": [
377
+ "# # Filter triplet\n",
378
+ "# f_triplet = []\n",
379
+ "# for q in tqdm(questions, total=len(questions)):\n",
380
+ "# \trelations = map_triplet[q]\n",
381
+ "# \tcontext = check_grade_lst(q, format_claim(relations)).passage_idx\n",
382
+ "# \tcontext = [int(x) for x in context.split(\",\")]\n",
383
+ "# \tcheck_rels = [relations[x-1] for x in context]\n",
384
+ "# \tf_triplet.append(check_rels)\n",
385
+ "# \tif len(f_triplet) % 10 == 0:\n",
386
+ "# \t\twith open(\"top10_filtered_rb.pkl\", \"wb\") as f:\n",
387
+ "# \t\t\tpickle.dump(f_triplet, f)"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": null,
393
+ "id": "51dde469-38a2-4268-87ea-429b8b2b5c61",
394
+ "metadata": {},
395
+ "outputs": [],
396
+ "source": [
397
+ "import os\n",
398
+ "\n",
399
+ "os.getcwd()"
400
+ ]
401
+ },
402
+ {
403
+ "cell_type": "code",
404
+ "execution_count": null,
405
+ "id": "2955959b-54e1-4c49-bef8-42afa30c6e85",
406
+ "metadata": {},
407
+ "outputs": [],
408
+ "source": [
409
+ "import uuid\n",
410
+ "def filter_triplet(q):\n",
411
+ "\tglobal map_triplet\n",
412
+ "\trelations = map_triplet[q]\n",
413
+ "\ttry:\n",
414
+ "\t\tcontext = check_grade_lst(q, format_claim(relations)).passage_idx\n",
415
+ "\t\tcontext = [int(x) for x in context.split(\",\")]\n",
416
+ "\t\t# Validate context indices\n",
417
+ "\t\t# if any(x <= 0 or x > len(relations) for x in context):\n",
418
+ "\t\t# raise ValueError(\"Invalid index in context\")\n",
419
+ "\t\tf = [relations[x - 1] for x in context]\n",
420
+ "\texcept Exception as e:\n",
421
+ "\t\tprint(f\"Error processing {q}: {e}\")\n",
422
+ "\t\tf = relations # fallback: use all relations\n",
423
+ "\n",
424
+ "\tfile_name = uuid.uuid4()\n",
425
+ "\tto_save = (q, f)\n",
426
+ "\twith open(f\"/home/ubuntu/work/minhbc/doan/ftriplet_318b/{file_name}.pkl\", \"wb\") as file:\n",
427
+ "\t\tpickle.dump(to_save, file)\n",
428
+ "\treturn f"
429
+ ]
430
+ },
431
+ {
432
+ "cell_type": "code",
433
+ "execution_count": null,
434
+ "id": "416ae401-edf4-4c0c-b992-d4f3b0127e7e",
435
+ "metadata": {},
436
+ "outputs": [],
437
+ "source": [
438
+ "# with Pool(5) as pool:\n",
439
+ "# f_triplets = list(tqdm(pool.imap(filter_triplet, questions), total=len(questions)))"
440
+ ]
441
+ },
442
+ {
443
+ "cell_type": "code",
444
+ "execution_count": null,
445
+ "id": "90dd9f7d-873d-4188-948c-69e3a108e650",
446
+ "metadata": {},
447
+ "outputs": [],
448
+ "source": [
449
+ "# with open(\"top10_filtered_rb.pkl\", \"wb\") as f:\n",
450
+ "# \tpickle.dump(f_triplets, f)"
451
+ ]
452
+ },
453
+ {
454
+ "cell_type": "markdown",
455
+ "id": "c5fb4a21-17f2-4c33-9e73-7843c9f878df",
456
+ "metadata": {},
457
+ "source": [
458
+ "# KG completion"
459
+ ]
460
+ },
461
+ {
462
+ "cell_type": "code",
463
+ "execution_count": null,
464
+ "id": "272dbecd-b6aa-4d66-b153-630c1660d08a",
465
+ "metadata": {},
466
+ "outputs": [],
467
+ "source": [
468
+ "from collections import defaultdict, deque"
469
+ ]
470
+ },
471
+ {
472
+ "cell_type": "code",
473
+ "execution_count": null,
474
+ "id": "965d77e0-062c-4d53-8e14-4424f51c7950",
475
+ "metadata": {},
476
+ "outputs": [],
477
+ "source": [
478
+ "import numpy as np\n",
479
+ "import copy\n",
480
+ "\n",
481
+ "def build_undirected_graph(triplets):\n",
482
+ "\t\"\"\"\n",
483
+ "\tXây đồ thị vô hướng:\n",
484
+ "\t- Mỗi cạnh (m -r-> n) được thêm cả (m->n) và (n->m với r_rev).\n",
485
+ "\t\"\"\"\n",
486
+ "\tgraph = defaultdict(list)\n",
487
+ "\tfor t in triplets:\n",
488
+ "\t\tm_id = t['m']['id']\n",
489
+ "\t\tn_id = t['n']['id']\n",
490
+ "\t\trel = t['r']['id']\n",
491
+ "\t\tsummary = t['r']['summary']\n",
492
+ "\t\t\n",
493
+ "\t\t# chiều xuôi\n",
494
+ "\t\tgraph[m_id].append({\n",
495
+ "\t\t\t'm': {'id': m_id},\n",
496
+ "\t\t\t'r': {'id': rel, 'summary': summary},\n",
497
+ "\t\t\t'n': {'id': n_id}\n",
498
+ "\t\t})\n",
499
+ "\t\t# chiều ngược\n",
500
+ "\t\tgraph[n_id].append({\n",
501
+ "\t\t\t'm': {'id': n_id},\n",
502
+ "\t\t\t'r': {'id': f\"{rel}_rev\", 'summary': summary},\n",
503
+ "\t\t\t'n': {'id': m_id}\n",
504
+ "\t\t})\n",
505
+ "\treturn graph\n",
506
+ "def bfs_all_paths(KG, start, end, max_length):\n",
507
+ "\t\"\"\"\n",
508
+ "\tTrả về list các đường đi (mỗi đường là list các triplet-dicts)\n",
509
+ "\ttừ start -> end với số bước < max_length.\n",
510
+ "\t\"\"\"\n",
511
+ "\tif start not in KG or end not in KG:\n",
512
+ "\t\treturn []\n",
513
+ "\n",
514
+ "\tall_paths = []\n",
515
+ "\tqueue = deque([(start, [])]) # (node hiện tại, path_so_far)\n",
516
+ "\n",
517
+ "\twhile queue:\n",
518
+ "\t\tcurrent, path = queue.popleft()\n",
519
+ "\t\t# print(f\"Cur: {current}\")\n",
520
+ "\t\t# print(f\"Path: {path}\")\n",
521
+ "\t\t# print(f\"KG curr: {KG[current]}\")\n",
522
+ "\t\t\n",
523
+ "\t\tif len(path) >= max_length:\n",
524
+ "\t\t\tcontinue\n",
525
+ "\n",
526
+ "\t\tfor triplet in KG[current]:\n",
527
+ "\t\t\tneighbor = triplet['n']['id']\n",
528
+ "\t\t\t# print(f\"Neigbor: {neighbor}\")\n",
529
+ "\t\t\tnew_path = path + [triplet]\n",
530
+ "\t\t\tif neighbor == end:\n",
531
+ "\t\t\t\tall_paths.append(new_path)\n",
532
+ "\t\t\telse:\n",
533
+ "\t\t\t\tvisited = {t['m']['id'] for t in path} | {t['n']['id'] for t in path}\n",
534
+ "\t\t\t\t# print(f\"visited: {visited}\")\n",
535
+ "\t\t\t\tif neighbor not in visited:\n",
536
+ "\t\t\t\t\tqueue.append((neighbor, new_path))\n",
537
+ "\t\t# print(\"*\"*50)\n",
538
+ "\t# final = []\n",
539
+ "\t# if all_paths is not None:\n",
540
+ "\t# \tfor p in all_paths:\n",
541
+ "\t# \t\tif p not in final:\n",
542
+ "\t# \t\t\tfinal.append(p)\n",
543
+ "\t# \tall_paths = final\n",
544
+ "\treturn all_paths\n",
545
+ "\n",
546
+ "# ----------------------------------------\n",
547
+ "# Step 5: Hàm mở rộng subgraph theo độ liên quan\n",
548
+ "# ----------------------------------------\n",
549
+ "def normalize_triplet(t):\n",
550
+ "\t\"\"\"Convert triplet into normalized form (no _rev, consistent direction)\"\"\"\n",
551
+ "\tm_id, r_id, n_id = t[\"m\"][\"id\"], t[\"r\"][\"id\"], t[\"n\"][\"id\"]\n",
552
+ "\tsummary = t[\"r\"][\"summary\"]\n",
553
+ "\tif \"_rev\" in r_id:\n",
554
+ "\t\tr_id = r_id.split(\"_rev\")[0]\n",
555
+ "\t\tm_id, n_id = n_id, m_id # swap direction\n",
556
+ "\treturn {\n",
557
+ "\t\t\"m\": {\"id\": m_id},\n",
558
+ "\t\t\"r\": {\"id\": r_id, \"summary\": summary},\n",
559
+ "\t\t\"n\": {\"id\": n_id}\n",
560
+ "\t}\n",
561
+ "\t\n",
562
+ "def relevance_guided_path_addition(KG, T, question, model, K=100, max_path_length=5):\n",
563
+ "\t\"\"\"\n",
564
+ "\tKG: dict[node_id] -> list of triplet-dicts {'m':{'id'}, 'r':{'id','summary'}, 'n':{'id'}}\n",
565
+ "\tT: list of triplet-dicts (subgraph gốc)\n",
566
+ "\tquestion: str hoặc None\n",
567
+ "\tmodel: object có .embed_query(str) và .encode(list_of_str)\n",
568
+ "\tK: số triplet mới tối đa thêm vào\n",
569
+ "\tmax_path_length: độ dài tối đa mỗi path\n",
570
+ "\tTrả về H = T + selected_triplets\n",
571
+ "\t\"\"\"\n",
572
+ "\t# Step 1: Tập entity từ T\n",
573
+ "\tE_T = {t['m']['id'] for t in T} | {t['n']['id'] for t in T}\n",
574
+ "\tprint(E_T)\n",
575
+ "\n",
576
+ "\t# Step 2: Embedding câu hỏi (nếu có)\n",
577
+ "\tquestion_emb = np.array(model.embed_query(question)) if question else None\n",
578
+ "\n",
579
+ "\t# Step 3: Tạo set key của T để kiểm tra nhanh\n",
580
+ "\tT_keys = {(t['m']['id'], t['r'][\"id\"], t['n']['id']) for t in T}\n",
581
+ "\n",
582
+ "\t# Step 4: Tìm tất cả đường đi giữa mọi cặp trong E_T\n",
583
+ "\tall_paths = []\n",
584
+ "\tfor start in E_T:\n",
585
+ "\t\tfor end in E_T:\n",
586
+ "\t\t\tif start == end:\n",
587
+ "\t\t\t\tcontinue\n",
588
+ "\t\t\tall_paths.extend(bfs_all_paths(KG, start, end, max_path_length))\n",
589
+ "\n",
590
+ "\tif not all_paths:\n",
591
+ "\t\tprint(\"H\")\n",
592
+ "\t\treturn T\n",
593
+ "\tprint(len(all_paths))\n",
594
+ "\n",
595
+ "\t# all_paths = all_paths[(len(T)*2 - 1):]\n",
596
+ "\t# print(all_paths)\n",
597
+ "\tfilterp = []\n",
598
+ "\tfor path in all_paths:\n",
599
+ "\t\tseen_keys = set()\n",
600
+ "\t\tnew_path = []\n",
601
+ "\t\tfor t in path:\n",
602
+ "\t\t\tnorm_t = normalize_triplet(t)\n",
603
+ "\t\t\tkey = (norm_t[\"m\"][\"id\"], norm_t[\"r\"][\"id\"], norm_t[\"n\"][\"id\"])\n",
604
+ "\t\t\tif key not in seen_keys:\n",
605
+ "\t\t\t\tnew_path.append(norm_t)\n",
606
+ "\t\t\t\tseen_keys.add(key)\n",
607
+ "\t\tfilterp.append(new_path)\n",
608
+ "\tall_paths = filterp\n",
609
+ "\n",
610
+ "\n",
611
+ "\t# Step 6: Score từng đường đi\n",
612
+ "\tpath_scores = []\n",
613
+ "\tfor path in all_paths:\n",
614
+ "\t\t# print(path)\n",
615
+ "\t\tsummaries = [triplet['r']['summary'] for triplet in path]\n",
616
+ "\t\tembs = model.embed_query(summaries)\n",
617
+ "\t\tembs = np.array(embs)\n",
618
+ "\t\tif question_emb is not None:\n",
619
+ "\t\t\tsims = cosine_similarity(embs, question_emb.reshape(1, -1)).flatten()\n",
620
+ "\t\t\tscore = float(np.mean(sims)) if sims.size else 0.0\n",
621
+ "\t\telse:\n",
622
+ "\t\t\tscore = 0.0\n",
623
+ "\t\tpath_scores.append((path, score))\n",
624
+ "\n",
625
+ "\t# Step 7: Chọn K triplet mới theo thứ tự score giảm dần\n",
626
+ "\tpath_scores.sort(key=lambda x: x[1], reverse=True)\n",
627
+ "\t# for ps in path_scores:\n",
628
+ "\t\t\n",
629
+ "\tprint(f\"len ps:{len(path_scores)}\")\n",
630
+ "\tpath_scores = path_scores[(len(T)*2-1):]\n",
631
+ "\tselected = []\n",
632
+ "\tselected_keys = set()\n",
633
+ "\tfor path, _ in path_scores:\n",
634
+ "\t\tfor triplet in path:\n",
635
+ "\t\t\tkey = (triplet['m']['id'], triplet['r']['id'], triplet['n']['id'])\n",
636
+ "\t\t\tif key not in T_keys and key not in selected_keys:\n",
637
+ "\t\t\t\tselected.append(triplet)\n",
638
+ "\t\t\t\tselected_keys.add(key)\n",
639
+ "\t\t\t\tif len(selected) >= K:\n",
640
+ "\t\t\t\t\tbreak\n",
641
+ "\t\tif len(selected) >= K:\n",
642
+ "\t\t\tbreak\n",
643
+ "\n",
644
+ "\t# Step 8: Trả về subgraph hoàn chỉnh\n",
645
+ "\tH = T + selected\n",
646
+ "\t# t = []\n",
647
+ "\t# for t in H\n",
648
+ "\treturn H\n",
649
+ "\n",
650
+ "\n",
651
+ "def subgraph_completion(task):\n",
652
+ "\tglobal KG\n",
653
+ "\tglobal model\n",
654
+ "\tT, ques, K, max_path_length = task[0], task[1], task[2], task[3]\n",
655
+ "\tresult = relevance_guided_path_addition(KG, T, ques, model, K, max_path_length)\n",
656
+ "\tto_save = (ques, result)\n",
657
+ "\tfile_name = uuid.uuid4()\n",
658
+ "\twith open(f\"/home/ubuntu/work/minhbc/doan/sgcp_318b/{file_name}.pkl\", \"wb\") as file:\n",
659
+ "\t\tpickle.dump(to_save, file)\n",
660
+ "\treturn result\n",
661
+ "\t\n",
662
+ "\t"
663
+ ]
664
+ },
665
+ {
666
+ "cell_type": "code",
667
+ "execution_count": null,
668
+ "id": "fdbdacc7-2cfb-4168-ac2c-9a816cb9cbae",
669
+ "metadata": {},
670
+ "outputs": [],
671
+ "source": [
672
+ "with open(\"runned.pkl\", \"rb\") as f:\n",
673
+ "\trunned = pickle.load(f)"
674
+ ]
675
+ },
676
+ {
677
+ "cell_type": "code",
678
+ "execution_count": null,
679
+ "id": "a2f4d12a-1b1d-4852-80c2-e15ae603f043",
680
+ "metadata": {},
681
+ "outputs": [],
682
+ "source": [
683
+ "with open(\"rb_filtered_318b.pkl\", \"rb\") as file:\n",
684
+ "\tftriplets = pickle.load(file)"
685
+ ]
686
+ },
687
+ {
688
+ "cell_type": "code",
689
+ "execution_count": null,
690
+ "id": "4bd3a65d-0cb6-4379-9b67-d6bd0c1df340",
691
+ "metadata": {},
692
+ "outputs": [],
693
+ "source": [
694
+ "n_questions, n_ftriplets = [], []\n",
695
+ "for i in tqdm(range(len(questions))):\n",
696
+ "\tif questions[i] not in runned:\n",
697
+ "\t\tn_questions.append(questions[i])\n",
698
+ "\t\tn_ftriplets.append(ftriplets[i])\n",
699
+ "\t\t"
700
+ ]
701
+ },
702
+ {
703
+ "cell_type": "code",
704
+ "execution_count": null,
705
+ "id": "0d6a9178-4516-4a79-ad72-9116035eaaea",
706
+ "metadata": {},
707
+ "outputs": [],
708
+ "source": [
709
+ "len(n_questions), len(n_ftriplets)"
710
+ ]
711
+ },
712
+ {
713
+ "cell_type": "code",
714
+ "execution_count": null,
715
+ "id": "ae5afc14-e79e-4ca3-b3da-238a5a751c3d",
716
+ "metadata": {},
717
+ "outputs": [],
718
+ "source": [
719
+ "# Format filter triplet\n",
720
+ "\n",
721
+ "# len(ftriplets)\n",
722
+ "\n",
723
+ "formatted_ftriplets = []\n",
724
+ "for recs in tqdm(n_ftriplets, total=len(n_ftriplets)):\n",
725
+ "\tformatted_rec = []\n",
726
+ "\tfor rec in recs:\n",
727
+ "\t\tsubj = rec['r'][0]['id']\n",
728
+ "\t\trel = rec['r'][1]\n",
729
+ "\t\tobj = rec['r'][2]['id']\n",
730
+ "\t\tsummary = rec['r.summary']\n",
731
+ "\t\tformatted_rec.append({\n",
732
+ "\t\t\t'm': {'id': subj},\n",
733
+ "\t\t\t'r': {'id': rel, 'summary': summary},\n",
734
+ "\t\t\t'n': {'id': obj}\n",
735
+ "\t\t})\n",
736
+ "\tformatted_ftriplets.append(formatted_rec)\n",
737
+ "len(formatted_ftriplets)"
738
+ ]
739
+ },
740
+ {
741
+ "cell_type": "code",
742
+ "execution_count": null,
743
+ "id": "e05057b6-b363-4a4c-bfcb-fec3e94b5330",
744
+ "metadata": {},
745
+ "outputs": [],
746
+ "source": [
747
+ "KG_data = []\n",
748
+ "for rec in tqdm(dct_mapping_triplet, total=len(dct_mapping_triplet)):\n",
749
+ " subj = rec['r'][0]['id']\n",
750
+ " rel = rec['r'][1]\n",
751
+ " obj = rec['r'][2]['id']\n",
752
+ " summary = rec['r.summary']\n",
753
+ " KG_data.append({\n",
754
+ " 'm': {'id': subj},\n",
755
+ " 'r': {'id': rel, 'summary': summary},\n",
756
+ " 'n': {'id': obj}\n",
757
+ " })\n",
758
+ "KG = build_undirected_graph(KG_data)"
759
+ ]
760
+ },
761
+ {
762
+ "cell_type": "code",
763
+ "execution_count": null,
764
+ "id": "a5bc5d2a-c67b-4877-823c-1609e046056b",
765
+ "metadata": {
766
+ "scrolled": true
767
+ },
768
+ "outputs": [],
769
+ "source": [
770
+ "model = embeddings\n",
771
+ "\n",
772
+ "tasks = [(formatted_ftriplets[i], n_questions[i], 10, 2) for i in range(len(n_questions))]\n",
773
+ "with Pool(20) as pool:\n",
774
+ "\tsgcp = list(tqdm(pool.imap(subgraph_completion, tasks), total =len(tasks)))"
775
+ ]
776
+ },
777
+ {
778
+ "cell_type": "code",
779
+ "execution_count": null,
780
+ "id": "03a2650f-638d-4812-abe5-794634232462",
781
+ "metadata": {},
782
+ "outputs": [],
783
+ "source": [
784
+ "with open(\"sgcp_318b.pkl\", \"wb\") as file:\n",
785
+ "\tpickle.dump(sgcp, file)"
786
+ ]
787
+ },
788
+ {
789
+ "cell_type": "code",
790
+ "execution_count": null,
791
+ "id": "ed560d4d-8caf-49d3-a913-15874b953209",
792
+ "metadata": {},
793
+ "outputs": [],
794
+ "source": [
795
+ "T = formatted_ftriplets[0]\n",
796
+ "ques = questions[0]\n",
797
+ "T"
798
+ ]
799
+ },
800
+ {
801
+ "cell_type": "code",
802
+ "execution_count": null,
803
+ "id": "8461606b-69ec-474a-9682-27b5022a12df",
804
+ "metadata": {},
805
+ "outputs": [],
806
+ "source": []
807
+ },
808
+ {
809
+ "cell_type": "code",
810
+ "execution_count": null,
811
+ "id": "80728aa1-7870-45f4-84ef-359bd08963aa",
812
+ "metadata": {},
813
+ "outputs": [],
814
+ "source": [
815
+ "\n",
816
+ "relevance_guided_path_addition(KG, T, ques, embeddings, 10, 2)"
817
+ ]
818
+ },
819
+ {
820
+ "cell_type": "code",
821
+ "execution_count": null,
822
+ "id": "2945c9ce-4c42-402d-a1c4-452f5f52a78c",
823
+ "metadata": {},
824
+ "outputs": [],
825
+ "source": []
826
+ }
827
+ ],
828
+ "metadata": {
829
+ "kernelspec": {
830
+ "display_name": "Python 3 (ipykernel)",
831
+ "language": "python",
832
+ "name": "python3"
833
+ },
834
+ "language_info": {
835
+ "codemirror_mode": {
836
+ "name": "ipython",
837
+ "version": 3
838
+ },
839
+ "file_extension": ".py",
840
+ "mimetype": "text/x-python",
841
+ "name": "python",
842
+ "nbconvert_exporter": "python",
843
+ "pygments_lexer": "ipython3",
844
+ "version": "3.11.5"
845
+ }
846
+ },
847
+ "nbformat": 4,
848
+ "nbformat_minor": 5
849
+ }