chatbot_server / function /file /extract_file.py
kltn21110's picture
Upload 171 files
04d020c verified
raw
history blame
30.3 kB
from langchain_community.vectorstores import FAISS
import os
from langchain.text_splitter import CharacterTextSplitter
import json
import os
import random
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
import google.generativeai as genai
import nltk
import pandas as pd
from groq import Groq
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain_community.retrievers import BM25Retriever
from langchain.retrievers import EnsembleRetriever
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_cohere import CohereRerank
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders import UnstructuredCSVLoader
from langchain_community.document_loaders import UnstructuredExcelLoader
from langchain_community.document_loaders import UnstructuredHTMLLoader
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_community.document_loaders import UnstructuredPDFLoader
from langchain_community.document_loaders import UnstructuredPowerPointLoader
from langchain_community.document_loaders import UnstructuredXMLLoader
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.llms import Cohere
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
from typing import List
nltk.download('punkt')
from dotenv import load_dotenv
load_dotenv()
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
GOOGLE_API_KEY1= os.getenv("GOOGLE_API_KEY_1")
GOOGLE_API_KEY= os.getenv("GOOGLE_API_KEY")
client = Groq(
api_key= GROQ_API_KEY,
)
genai.configure(api_key=GOOGLE_API_KEY)
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from langchain_google_vertexai import VertexAIEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", task_type="retrieval_document")
llm = ChatGoogleGenerativeAI(model='gemini-2.0-flash-thinking-exp-01-21',temperature=0.6)
def check_persist_directory(id, file_name):
directory_path = f"./vector_database/{file_name}"
check = os.path.exists(directory_path)
return check
def check_path_exists(path):
return os.path.exists(path)
def load_file(loader):
return loader.load()
from langchain_community.document_loaders import PyPDFLoader
def extract_data2():
documents = []
base_dir = os.path.dirname(os.path.abspath(__file__))
directory_path = os.path.join(base_dir, "..", "data")
# Chuẩn hóa đường dẫn
directory_path = os.path.abspath(directory_path)
if not os.path.exists(directory_path) or not any(
os.path.isfile(os.path.join(directory_path, f)) for f in os.listdir(directory_path)):
return False
tasks = []
with ThreadPoolExecutor() as executor:
for file in os.listdir(directory_path):
if file.endswith(".pdf"):
pdf_path = os.path.join(directory_path, file)
loader = PyPDFLoader(pdf_path)
tasks.append(executor.submit(load_file, loader))
elif file.endswith('.docx') or file.endswith('.doc'):
doc_path = os.path.join(directory_path, file)
loader = Docx2txtLoader(doc_path)
tasks.append(executor.submit(load_file, loader))
elif file.endswith('.txt'):
txt_path = os.path.join(directory_path, file)
loader = TextLoader(txt_path, encoding="utf8")
tasks.append(executor.submit(load_file, loader))
elif file.endswith('.pptx'):
ppt_path = os.path.join(directory_path, file)
loader = UnstructuredPowerPointLoader(ppt_path)
tasks.append(executor.submit(load_file, loader))
elif file.endswith('.csv'):
csv_path = os.path.join(directory_path, file)
loader = UnstructuredCSVLoader(csv_path)
tasks.append(executor.submit(load_file, loader))
elif file.endswith('.xlsx'):
excel_path = os.path.join(directory_path, file)
loader = UnstructuredExcelLoader(excel_path)
tasks.append(executor.submit(load_file, loader))
elif file.endswith('.json'):
json_path = os.path.join(directory_path, file)
loader = TextLoader(json_path)
tasks.append(executor.submit(load_file, loader))
elif file.endswith('.md'):
md_path = os.path.join(directory_path, file)
loader = UnstructuredMarkdownLoader(md_path)
tasks.append(executor.submit(load_file, loader))
for future in as_completed(tasks):
result = future.result()
documents.extend(result)
text_splitter = CharacterTextSplitter(chunk_size=1500, chunk_overlap=500)
texts = text_splitter.split_documents(documents)
Chroma.from_documents(documents=texts,
embedding=embeddings,
persist_directory=f"./vector_database")
return texts
class Search(BaseModel):
queries: List[str] = Field(
...,
description="Truy vấn riêng biệt để tìm kiếm, giữ nguyên ý chính câu hỏi riêng biệt",
)
def query_analyzer(query):
output_parser = PydanticToolsParser(tools=[Search])
system = """Bạn có khả năng đưa ra các truy vấn tìm kiếm chính xác để lấy thông tin giúp trả lời các yêu cầu của người dùng. Các truy vấn của bạn phải chính xác, không được bỏ ngắn rút gọn.
Nếu bạn cần tra cứu hai hoặc nhiều thông tin riêng biệt, bạn có thể làm điều đó!. Trả lời câu hỏi bằng tiếng Việt(Vietnamese), không được dùng ngôn ngữ khác. Bạn chỉ cần tách câu hỏi khi cần thiết hoặc giữ nguyên câu hỏi vui lòng là câu hỏi không phải tên của người dùng"""
prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "{question}"),
]
)
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.0)
structured_llm = llm.with_structured_output(Search)
query_analyzer = {"question": RunnablePassthrough()} | prompt | structured_llm
text = query_analyzer.invoke(query)
return text
def chat_llama3(prompt_query):
try:
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "Bạn là một trợ lý trung thưc, trả lời dựa trên nội dung tài liệu được cung cấp. Chỉ trả lời liên quan đến câu hỏi một cách đầy đủ chính xác, không bỏ sót thông tin."
},
{
"role": "user",
"content": f"{prompt_query}",
}
],
model="llama3-70b-8192",
temperature=0.0,
max_tokens=9000,
stop=None,
stream=False,
)
return chat_completion.choices[0].message.content
except Exception as error:
return False
import os
import os
def extract_multi_metadata_content(texts, tests):
extracted_content = []
for idx, test in enumerate(tests):
test_filename = os.path.basename(test).lower()
temp_content = []
for x in texts:
source_path = x.metadata.get('source', '')
source_filename = os.path.basename(source_path).lower()
if source_filename == test_filename:
temp_content.append(x.page_content)
if not temp_content:
print(f"[!] Không tìm thấy nội dung cho file {test_filename}")
if idx == 0:
extracted_content.append(f"Dữ liệu của {test}:\n{''.join(temp_content)}")
else:
extracted_content.append(''.join(temp_content))
return '\n'.join(extracted_content)
def find_matching_files_in_docs_12_id(text, id):
base_dir = os.path.dirname(os.path.abspath(__file__))
directory_path = os.path.join(base_dir, "..", "data")
# Chuẩn hóa đường dẫn
directory_path = os.path.abspath(directory_path)
folder_path = directory_path
search_terms = []
search_terms_old = []
matching_index = []
search_origin = re.findall(r'\b\w+\.\w+\b|\b\w+\b', text)
search_terms_origin = []
for word in search_origin:
if '.' in word:
search_terms_origin.append(word)
else:
search_terms_origin.extend(re.findall(r'\b\w+\b', word))
file_names_with_extension = re.findall(r'\b\w+\.\w+\b|\b\w+\b', text.lower())
file_names_with_extension_old = re.findall(r'\b(\w+\.\w+)\b', text)
for file_name in search_terms_origin:
if "." in file_name:
term_position = search_terms_origin.index(file_name)
search_terms_old.append(file_name)
for file_name in file_names_with_extension_old:
if "." in file_name:
search_terms_old.append(file_name)
for file_name in file_names_with_extension:
search_terms.append(file_name)
clean_text_old = text
clean_text = text.lower()
search_terms_old1 = list(set(search_terms_old))
for term in search_terms_old:
clean_text_old = clean_text_old.replace(term, '')
for term in search_terms:
clean_text = clean_text.replace(term, '')
words_old = re.findall(r'\b\w+\b', clean_text_old)
search_terms_old.extend(words_old)
matching_files = set()
matching_files_old = set()
for root, dirs, files in os.walk(folder_path):
for file in files:
for term in search_terms:
if term.lower() in file.lower():
term_position = search_terms.index(term)
term_value = search_terms_origin[term_position]
matching_files.add(file)
matching_index.append(term_position)
break
matching_files_old1 = []
matching_index.sort()
for x in matching_index:
matching_files_old1.append(search_terms_origin[x])
return matching_files, matching_files_old1
def convert_xlsx_to_csv(xlsx_file_path, csv_file_path):
df = pd.read_excel(xlsx_file_path)
df.to_csv(csv_file_path, index=False)
def save_list_CSV_id(file_list, id):
text = ""
for x in file_list:
if x.endswith('.xlsx'):
old = f"./user_file/{id}/{x}"
new = old.replace(".xlsx", ".csv")
convert_xlsx_to_csv(old, new)
x = x.replace(".xlsx", ".csv")
loader1 = CSVLoader(f"./user_file/{id}/{x}")
docs1 = loader1.load()
text += f"Dữ liệu file {x}:\n"
for z in docs1:
text += z.page_content + "\n"
return text
def merge_files(file_set, file_list):
"""Hàm này ghép lại các tên file dựa trên điều kiện đã cho."""
merged_files = {}
for file_name in file_list:
name = file_name.split('.')[0]
for f in file_set:
if name in f:
merged_files[name] = f
break
return merged_files
def replace_keys_with_values(original_dict, replacement_dict):
new_dict = {}
for key, value in original_dict.items():
if key in replacement_dict:
new_key = replacement_dict[key]
new_dict[new_key] = value
else:
new_dict[key] = value
return new_dict
def aws1_csv_id(new_dict_csv, id):
text = ""
query_all = ""
keyword = []
for key, value in new_dict_csv.items():
query_all += value
keyword.append(key)
test = save_list_CSV_id(keyword, id)
text += test
sources = ",".join(keyword)
return text, query_all, sources
def chat_gemini(prompt):
generation_config = {
"temperature": 0.0,
"top_p": 0.0,
"top_k": 0,
"max_output_tokens": 8192,
}
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
]
model = genai.GenerativeModel(model_name="gemini-2.0-flash",
generation_config=generation_config,
safety_settings=safety_settings)
convo = model.start_chat(history=[])
convo.send_message(prompt)
return convo.last.text
def question_answer(question): # Hàm sinh ra câu trả lời
try:
answer = chat_gemini(question)
return answer
except:
completion = chat_llama3(question)
if completion:
return completion
#bước 5,6
def aws1_all_id(new_dict, text_alls, id, thread_id):
answer = ""
COHERE_API_KEY1 = os.getenv("COHERE_API_KEY_2")
os.environ["COHERE_API_KEY"] = COHERE_API_KEY1
answer_relevant = ""
directory = ""
for key, value in new_dict.items():
query = value
keyword, keyword2 = find_matching_files_in_docs_12_id(query, id) # Tìm kiếm có từ khóa file trong câu hay không
data = extract_multi_metadata_content(text_alls, keyword) # lấy tất cả data liên quan đến keyword
if keyword:
# Extraction -> Spliting -> Embedding
file_name = next(iter(keyword))
text_splitter = CharacterTextSplitter(chunk_size=3200, chunk_overlap=1500)
texts_data = text_splitter.split_text(data)
# bước 5: retrieal <-> retriever file
if check_persist_directory(id, file_name):
vectordb_query = Chroma(persist_directory=f"./vector_database/{file_name}", embedding_function=embeddings)
else:
vectordb_query = Chroma.from_texts(texts_data,
embedding=embeddings,
persist_directory=f"./vector_database/{file_name}")
# bước 6
# fustion retriever + bm25
k_1 = len(texts_data)
retriever = vectordb_query.as_retriever(search_kwargs={f"k": k_1}) # Truy vấn từ vectordatabase
# Thiết lập BM25
bm25_retriever = BM25Retriever.from_texts(texts_data) # Truy vấn từ đoạn text trích xuất đước
bm25_retriever.k = k_1 # Thiết lập số đoạn
#Kết hợp cả 2 loại retriever
ensemble_retriever = EnsembleRetriever(retrievers=[bm25_retriever, retriever],
weights=[0.7, 0.4])
# Lấy kết quả truy vấn
docs = ensemble_retriever.get_relevant_documents(f"{query}")
# storage and retriever query
# Bước 6 --> Lưu vào FAISS
path = f"./vector_database/FAISS/{file_name}"
if check_path_exists(path):
docsearch = FAISS.load_local(path, embeddings, allow_dangerous_deserialization=True)
else:
docsearch = FAISS.from_documents(docs, embeddings)
docsearch.save_local(f"./vector_database/FAISS/{file_name}")
docsearch = FAISS.load_local(path, embeddings, allow_dangerous_deserialization=True)
k_2 = len(docs)
#bước 7 DÙNG cohere để xếp hạng lại các đoạn liên quan
compressor = CohereRerank(top_n=10,model = "rerank-multilingual-v3.0")
retrieve3 = docsearch.as_retriever(search_kwargs={f"k": k_2})
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retrieve3
)
compressed_docs = compression_retriever.get_relevant_documents(f"{query}") # Lấy các đoạn liên quan (10 đoạn)
# bước 8
if compressed_docs:
data = compressed_docs[0].page_content
text = ''.join(map(lambda x: x.page_content, compressed_docs)) # Tổng hợp 10 đoạn liên quan nhất để bổ sung ngữ nghĩa
prompt_document = f"Dựa vào nội dung sau:{text}. Hãy trả lời câu hỏi sau đây: {query}. Mà không thay đổi nội dung mà mình đã cung cấp"
answer_for = question_answer(prompt_document) # Dùng gemini để response
answer += answer_for + "\n"
answer_relevant = data
directory = file_name
return answer, answer_relevant, directory
def extract_content_between_keywords(query, keywords):
contents = {}
num_keywords = len(keywords)
keyword_positions = []
for i in range(num_keywords):
keyword = keywords[i]
keyword_position = query.find(keyword)
keyword_positions.append(keyword_position)
if keyword_position == -1:
continue
next_keyword_position = len(query)
for j in range(i + 1, num_keywords):
next_keyword = keywords[j]
next_keyword_position = query.find(next_keyword)
if next_keyword_position != -1:
break
if i == 0:
content_before = query[:keyword_position].strip()
else:
content_before = query[keyword_positions[i - 1] + len(keywords[i - 1]):keyword_position].strip()
if i == num_keywords - 1:
content_after = query[keyword_position + len(keyword):].strip()
else:
content_after = query[keyword_position + len(keyword):next_keyword_position].strip()
content = f"{content_before} {keyword} {content_after}"
contents[keyword] = content
return contents
def handle_query(question, text_all, compression_retriever, id, thread_id):
COHERE_API_KEY_3 = os.environ["COHERE_API_KEY_3"]
os.environ["COHERE_API_KEY"] = COHERE_API_KEY_3
query = question
x = query
# Tìm kiếm keyword liên quan
keyword, key_words_old = find_matching_files_in_docs_12_id(query, id)
file_list = keyword
#bước 4 --> Nhánh có key word
if file_list:
list_keywords2 = list(key_words_old)
contents1 = extract_content_between_keywords(query, list_keywords2)
merged_result = merge_files(keyword, list_keywords2)
original_dict = contents1
replacement_dict = merged_result
new_dict = replace_keys_with_values(original_dict, replacement_dict)
files_to_remove = [filename for filename in new_dict.keys() if
filename.endswith('.xlsx') or filename.endswith('.csv')]
removed_files = {}
for filename in files_to_remove:
removed_files[filename] = new_dict[filename]
for filename in files_to_remove:
new_dict.pop(filename)
test_csv = ""
text_csv, query_csv, source = aws1_csv_id(removed_files, id)
prompt_csv = ""
answer_csv = ""
if test_csv:
prompt_csv = f"Dựa vào nội dung sau: {text_csv}. Hãy trả lời câu hỏi sau đây: {query_csv}. Bằng tiếng Việt"
answer_csv = question_answer(prompt_csv)
answer_document, data_relevant, source = aws1_all_id(new_dict, text_all, id, thread_id)
answer_all1 = answer_document + answer_csv
return answer_all1, data_relevant, source
#bước 4 --> Nhánh không có keyword
else:
compressed_docs = compression_retriever.get_relevant_documents(f"{query}")
relevance_score_float = float(compressed_docs[0].metadata['relevance_score'])
# Xử lý khi điểm số thấp
if relevance_score_float <= 0:
documents1 = []
for file in os.listdir(f"./data"):
if file.endswith('.csv'):
csv_path = f"./data" + file
loader = UnstructuredCSVLoader(csv_path)
documents1.extend(loader.load())
elif file.endswith('.xlsx'):
excel_path = f"./data" + file
loader = UnstructuredExcelLoader(excel_path)
documents1.extend(loader.load())
text_splitter_csv = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=2200, chunk_overlap=1500)
texts_csv = text_splitter_csv.split_documents(documents1)
vectordb_csv = Chroma.from_documents(documents=texts_csv,
embedding=embeddings, persist_directory=f'./vector_database/csv/{thread_id}')
k = len(texts_csv)
retriever_csv = vectordb_csv.as_retriever(search_kwargs={"k": k})
llm = Cohere(temperature=0)
compressor_csv = CohereRerank(top_n=3, model="rerank-multilingual-v3.0")
compression_retriever_csv = ContextualCompressionRetriever(
base_compressor=compressor_csv, base_retriever=retriever_csv
)
compressed_docs_csv = compression_retriever_csv.get_relevant_documents(f"{query}")
file_path = compressed_docs_csv[0].metadata['source']
if file_path.endswith('.xlsx'):
new = file_path.replace(".xlsx", ".csv")
convert_xlsx_to_csv(file_path, new)
loader1 = CSVLoader(new)
else:
loader1 = CSVLoader(file_path)
docs1 = loader1.load()
text = " "
for z in docs1:
text += z.page_content + "\n"
prompt_csv = f"Dựa vào nội dung sau: {text}. Hãy trả lời câu hỏi sau đây: {query}. Bằng tiếng Việt"
answer_csv = question_answer(prompt_csv)
return answer_csv
else:
#bước 4 - trích xuất thông tin file liên quan nhất
file_path = compressed_docs[0].metadata['source']
file_path = file_path.replace('\\', '/')
if file_path.endswith(".pdf"):
loader = PyPDFLoader(file_path)
elif file_path.endswith('.docx') or file_path.endswith('doc'):
loader = Docx2txtLoader(file_path)
elif file_path.endswith('.txt'):
loader = TextLoader(file_path, encoding="utf8")
elif file_path.endswith('.pptx'):
loader = UnstructuredPowerPointLoader(file_path)
elif file_path.endswith('.xml'):
loader = UnstructuredXMLLoader(file_path)
elif file_path.endswith('.html'):
loader = UnstructuredHTMLLoader(file_path)
elif file_path.endswith('.json'):
loader = TextLoader(file_path)
elif file_path.endswith('.md'):
loader = UnstructuredMarkdownLoader(file_path)
elif file_path.endswith('.xlsx'):
file_path_new = file_path.replace(".xlsx", ".csv")
convert_xlsx_to_csv(file_path, file_path_new)
loader = CSVLoader(file_path_new)
elif file_path.endswith('.csv'):
loader = CSVLoader(file_path)
# Extraction -> Spliting -> Embedding(phân tách chia file và nhúng vào Chroma)
text_splitter = CharacterTextSplitter(chunk_size=3200, chunk_overlap=1500)
texts = text_splitter.split_documents(loader.load())
k_1 = len(texts)
# Bước 5 (Lưu vào trong Chroma)
file_name = os.path.basename(file_path)
if check_persist_directory(id, file_name):
vectordb_file = Chroma(persist_directory=f"./vector_database/{file_name}",
embedding_function=embeddings)
else:
vectordb_file = Chroma.from_documents(texts,
embedding=embeddings,
persist_directory=f"./vector_database/{file_name}")
# set up bm25 (Bước 6)
retriever_file = vectordb_file.as_retriever(search_kwargs={f"k": k_1}) # Truy vấn từ nội dung được lưu trong vectordb Chroma
bm25_retriever = BM25Retriever.from_documents(texts) # Truy vấn từ tài liệu extract được trong bước Extraction -> Spliting -> Embedding
bm25_retriever.k = k_1
#Kết hợp truy vấn
ensemble_retriever = EnsembleRetriever(retrievers=[bm25_retriever, retriever_file],
weights=[0.6, 0.4])
docs = ensemble_retriever.get_relevant_documents(f"{query}")
# Bước 6 sau khi xong Lưu vào FAISS
path = f"./vector_database/FAISS/{file_name}"
if check_path_exists(path): # Kiểm tra tồn tại đường dẫn hay không
docsearch = FAISS.load_local(path, embeddings, allow_dangerous_deserialization=True)
else:
docsearch = FAISS.from_documents(docs, embeddings)
docsearch.save_local(f"./vector_database/FAISS/{file_name}")
docsearch = FAISS.load_local(path, embeddings, allow_dangerous_deserialization=True)
k_2 = len(docs)
#Dùng cohere truy vấn rerank xếp hạng lại một lần nx
retrieve3 = docsearch.as_retriever(search_kwargs={f"k": k_2})
compressor_file = CohereRerank(top_n=10, model="rerank-multilingual-v3.0") # Sử dụng cohere để rerank
compression_retriever_file = ContextualCompressionRetriever(
base_compressor=compressor_file, base_retriever=retrieve3
)
compressed_docs_file = compression_retriever_file.get_relevant_documents(f"{x}")
query = question
text = ''.join(map(lambda x: x.page_content, compressed_docs_file)) # Tổng hợp lại các đoạn liên quan để trả lời
prompt = f"Dựa vào nội dung sau:{text}. Hãy trả lời câu hỏi sau đây: {query}. Mà không thay đổi, chỉnh sửa nội dung mà mình đã cung cấp"
answer = question_answer(prompt) # Tạo ra câu trả lời
list_relevant = compressed_docs_file[0].page_content
source = file_name
return answer, list_relevant, source
def handle_query_upgrade_keyword_old(query_all, text_all, id):
COHERE_API_KEY_2 = os.environ["COHERE_API_KEY_2"]
#bước 3
os.environ["COHERE_API_KEY"] = COHERE_API_KEY_2
# phân tách câu hỏi ng dùng
test = query_analyzer(query_all) #nhận câu truy vấn được phân tích thành nhiều ý nhỏ
test_string = str(test)
#lấy list câu hỏi
matches = re.findall(r"'([^']*)'", test_string)
vectordb = Chroma(persist_directory=f"./vector_database", embedding_function=embeddings)
k = len(text_all)
retriever = vectordb.as_retriever(search_kwargs={"k": k})
compressor = CohereRerank(top_n=5, model="rerank-multilingual-v3.0")
compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever= retriever)
with ThreadPoolExecutor() as executor:
futures = {executor.submit(handle_query, query, text_all, compression_retriever, id, i): query for i, query in
enumerate(matches)}
results = []
data_relevant = []
sources = []
for future in as_completed(futures):
try:
result, list_data, list_source = future.result()
results.append(result)
data_relevant.append(list_data)
sources.append(list_source)
except Exception as e:
print(f'An error occurred: {e}')
answer_all = ''.join(results)
prompt1 = f"Dựa vào nội dung sau:{answer_all}. Hãy trả lời câu hỏi sau đây: {query_all}. Mà không thay đổi, chỉnh sửa nội dung mà mình đã cung cấp"
answer1 = question_answer(prompt1)
return answer1, data_relevant, sources
# text_all1 = extract_data2()
# data = handle_query_upgrade_keyword_old("Tên người làm cùng khóa luận tốt nghiệp với Võ Như Ý trong file KLTN_20133118_20133080_Tuy_chinh_chatbot ",text_all1,"hello")
# print(data[0])