Pudding48 commited on
Commit
701dfce
·
verified ·
1 Parent(s): d29e1bc

Delete qabot.py

Browse files
Files changed (1) hide show
  1. qabot.py +0 -60
qabot.py DELETED
@@ -1,60 +0,0 @@
1
- from langchain_community.llms import CTransformers
2
- from langchain.prompts import PromptTemplate
3
- from langchain_core.runnables import RunnableSequence
4
- from langchain.chains import RetrievalQA
5
- from langchain_community.embeddings import GPT4AllEmbeddings
6
- from langchain_community.vectorstores import FAISS
7
-
8
- # Cấu hình
9
- model_file = "tinyllama-1.1b-chat-v1.0.Q8_0.gguf"
10
- vector_dp_path = "vectorstores/db_faiss"
11
-
12
- os.makedirs(vector_dp_path, exist_ok=True)
13
-
14
- # Load LLM
15
- def load_llm(model_file):
16
- llm = CTransformers(
17
- model=model_file,
18
- model_type="llama",
19
- temperature=0.01,
20
- config={'gpu_layers': 0},
21
- max_new_tokens=128,
22
- context_length=512
23
- )
24
- return llm
25
-
26
- # Tạo prompt template
27
- def creat_prompt(template):
28
- prompt = PromptTemplate(template=template, input_variables=["context","question"])
29
- return prompt
30
-
31
- # Tạo pipeline chain (thay cho LLMChain)
32
- def create_qa_chain(prompt, llm, db):
33
- llm_chain = RetrievalQA.from_chain_type(
34
- llm = llm,
35
- chain_type = "stuff",
36
- retriever =db.as_retriever(search_kwargs = {"k":1}),
37
- return_source_documents = False,
38
- chain_type_kwargs={'prompt':prompt}
39
- )
40
- return llm_chain
41
-
42
- def read_vector_db():
43
- embedding_model = GPT4AllEmbeddings(model_file = "tinyllama-1.1b-chat-v1.0.Q8_0.gguf")
44
- db = FAISS.load_local(vector_dp_path, embedding_model,allow_dangerous_deserialization=True)
45
- return db
46
-
47
- db = read_vector_db()
48
- llm = load_llm(model_file)
49
- # Mẫu prompt
50
- template = """<|im_start|>system\nSử dụng thông tin sau đây để trả lời câu hỏi. Nếu bạn không biết câu trả lời, hãy nói không biết, đừng cố tạo ra câu trả lời\n
51
- {context}<|im_end|>\n<|im_start|>user\n{question}<|im_end|>\n<|im_start|>assistant"""
52
-
53
- # Khởi tạo các thành phần
54
- prompt = creat_prompt(template)
55
- llm_chain =create_qa_chain(prompt, llm, db)
56
-
57
- # Chạy thử chain
58
- question = "Khoa công nghệ thông tin thành lập năm nào ?"
59
- response = llm_chain.invoke({"query": question})
60
- print(response)