|
|
from pymilvus import MilvusClient, connections |
|
|
from pymilvus import Collection, DataType, FieldSchema, CollectionSchema, utility |
|
|
|
|
|
from sentence_transformers import SentenceTransformer |
|
|
import re |
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
import requests |
|
|
import json |
|
|
|
|
|
def connect_milvus(endpoint, token): |
|
|
connections.connect( |
|
|
"default", |
|
|
uri=CLUSTER_ENDPOINT, |
|
|
token=TOKEN |
|
|
) |
|
|
|
|
|
client = MilvusClient( |
|
|
uri=CLUSTER_ENDPOINT, |
|
|
token=TOKEN |
|
|
) |
|
|
|
|
|
|
|
|
def create_vectordb(COLLECTION_NAME, EMBEDDING_DIM=1024): |
|
|
|
|
|
COLLECTION_NAME = "giao_thong_duong_bo" |
|
|
check_collection = utility.has_collection(COLLECTION_NAME) |
|
|
if check_collection: |
|
|
drop_result = utility.drop_collection(COLLECTION_NAME) |
|
|
print('drop completed!') |
|
|
|
|
|
chunk_id = FieldSchema( |
|
|
name="chunk_id", |
|
|
dtype=DataType.INT64, |
|
|
is_primary=True, |
|
|
description="Chunk ID" |
|
|
) |
|
|
|
|
|
chunk_ref = FieldSchema( |
|
|
name="chunk_ref", |
|
|
dtype=DataType.VARCHAR, |
|
|
max_length=512, |
|
|
description="Chunk ref") |
|
|
|
|
|
chunk_text = FieldSchema( |
|
|
name="chunk_text", |
|
|
dtype=DataType.VARCHAR, |
|
|
max_length=4096, |
|
|
description="Chunk text") |
|
|
|
|
|
chunk_embedding = FieldSchema( |
|
|
name="chunk_embedding", |
|
|
dtype=DataType.FLOAT_VECTOR, |
|
|
dim=EMBEDDING_DIM, |
|
|
description="Chunk Embedding") |
|
|
|
|
|
schema = CollectionSchema( |
|
|
fields=[chunk_id, chunk_ref, chunk_text, chunk_embedding], |
|
|
auto_id=False, |
|
|
description="Vector Store Chunk using multilingual-e5-large") |
|
|
|
|
|
collection = Collection( |
|
|
name=COLLECTION_NAME, |
|
|
schema=schema |
|
|
) |
|
|
|
|
|
entities = [ids, rules, chunks, embeddings] |
|
|
ins_resp = collection.insert(entities) |
|
|
ins_resp |
|
|
|
|
|
collection.flush() |
|
|
|
|
|
index_params = { |
|
|
"index_type": "IVF_FLAT", |
|
|
"metric_type": "COSINE", |
|
|
"params": {} |
|
|
} |
|
|
collection.create_index( |
|
|
field_name=chunk_embedding.name, |
|
|
index_params=index_params |
|
|
) |
|
|
|
|
|
collection.load() |
|
|
|
|
|
|
|
|
def load_vectordb(COLLECTION_NAME): |
|
|
|
|
|
collection = Collection( |
|
|
name=COLLECTION_NAME, |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
return collection |
|
|
|
|
|
def load_model(model_name): |
|
|
model = SentenceTransformer(model_name) |
|
|
|
|
|
return model |
|
|
|
|
|
def search_chunks(query, topk=5): |
|
|
|
|
|
search_params = { |
|
|
"metric_type": "COSINE", |
|
|
"params": {"level": 2} |
|
|
} |
|
|
|
|
|
collection = load_vectordb(COLLECTION_NAME) |
|
|
|
|
|
embed_query = model.encode(query) |
|
|
results = collection.search( |
|
|
[embed_query], |
|
|
|
|
|
anns_field="chunk_embedding", |
|
|
param=search_params, |
|
|
limit=topk, |
|
|
guarantee_timestamp=1, |
|
|
output_fields=['chunk_ref', 'chunk_text'] |
|
|
) |
|
|
|
|
|
refs, relevant_chunks = [], [] |
|
|
|
|
|
pattern = r"'chunk_ref': '([^']*)', 'chunk_text': '([^']*)'" |
|
|
|
|
|
|
|
|
|
|
|
for sample in results[0]: |
|
|
|
|
|
|
|
|
matches = re.findall(pattern, str(sample)) |
|
|
|
|
|
|
|
|
for match in matches: |
|
|
refs.append(match[0]) |
|
|
relevant_chunks.append(match[1]) |
|
|
|
|
|
return refs, relevant_chunks |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def response_saola2m(PROMPT, temperature=0.7): |
|
|
|
|
|
|
|
|
url = "https://api.fpt.ai/nlp/llm/api/v1/completions" |
|
|
headers = { |
|
|
'Authorization': 'Bearer sk-8oIY6XLrokZEJMl6aopCuQ', |
|
|
'Content-Type': 'application/json', |
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
data = { |
|
|
|
|
|
"model": "SaoLa2M-instruct", |
|
|
"prompt": PROMPT, |
|
|
"temperature": temperature, |
|
|
"max_tokens": 512 |
|
|
} |
|
|
|
|
|
response = requests.post(url, headers=headers, json=data) |
|
|
response_text = response.text |
|
|
response_json = json.loads(response_text) |
|
|
result = response_json['choices'][0]['text'] |
|
|
return result |
|
|
|
|
|
CLUSTER_ENDPOINT = "https://in03-d63abe0e8a8f47b.api.gcp-us-west1.zillizcloud.com" |
|
|
TOKEN = "6529d1f59d5e3d38d6135ec9ddf5820a9c38e3db6ca22c53b3aa2ad9c9148e29ef0f11c312bee71f1544da350f85320b598a30f3" |
|
|
COLLECTION_NAME = "giao_thong_duong_bo" |
|
|
MODEL_NAME = 'qminh369/datn-dense_embedding-giao_thong_duong_bo_2008' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
connect_milvus(CLUSTER_ENDPOINT, TOKEN) |
|
|
|
|
|
model = load_model(MODEL_NAME) |
|
|
|
|
|
def answer(question): |
|
|
|
|
|
refs, relevant_chunks = search_chunks(question) |
|
|
|
|
|
INSTRUCTION = "Hãy trả lời câu hỏi sau dựa trên thông tin được cung cấp. Nếu thông tin được cung cấp không liên quan dến câu hỏi thì trả về câu trả lời 'Không có thông tin'" |
|
|
INPUT_TEXT = "\n".join(relevant_chunks) |
|
|
|
|
|
PROMPT = f"<s>[INST] Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{INSTRUCTION}\n\n{question}\n\n### Input:\n{INPUT_TEXT}\n\n[/INST]### Response:\n" |
|
|
print(PROMPT) |
|
|
|
|
|
response = response_saola2m(PROMPT, temperature=0.7) |
|
|
|
|
|
ref = "\n" + "Trích dẫn từ: " + refs[0] |
|
|
|
|
|
response = response + ref |
|
|
|
|
|
return response.strip() |
|
|
|
|
|
def chatbot(question, history=[]): |
|
|
output = answer(question) |
|
|
history.append((question, output)) |
|
|
return history, history |
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=chatbot, |
|
|
inputs=["text", "state"], |
|
|
outputs=["chatbot", "state"]) |
|
|
|
|
|
demo.queue().launch(share=True) |
|
|
|
|
|
|
|
|
|
|
|
|