compliance / backend.py
ANAMARIAMAGALHAES's picture
Update backend.py
839e662 verified
from langchain.chains import RetrievalQA
from langchain_community.llms import Ollama
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
INDEX_PATH = "data/index"
QA_PROMPT = PromptTemplate(
input_variables=["context", "question"],
template="""
Você é um assistente de compliance. Responda com base apenas no contexto fornecido.
Se não souber, diga: "Desculpe, não encontrei essa informação nos documentos."
Contexto:
{context}
Pergunta:
{question}
"""
)
def ask_question(question):
embedding = HuggingFaceEmbeddings(
model_name="sentence-transformers/paraphrase-albert-small-v2",
encode_kwargs={"normalize_embeddings": True}
)
db = FAISS.load_local(INDEX_PATH, embedding, allow_dangerous_deserialization=True)
retriever = db.as_retriever()
llm = Ollama(model="tinyllama", temperature=0.3)
qa = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
return_source_documents=False,
chain_type_kwargs={"prompt": QA_PROMPT}
)
result = qa.invoke({"query": question})
return result["result"]