Spaces:
Sleeping
Sleeping
upload frontend
Browse files- frontend/app_frontend.py +49 -0
- frontend/requirements.txt +2 -0
frontend/app_frontend.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import requests
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
st.set_page_config(page_title="Assistente RAG", page_icon="🤖", layout="centered")
|
| 6 |
+
st.title("🤖 Assistente de Helpdesk (RAG)")
|
| 7 |
+
|
| 8 |
+
API_URL = "http://127.0.0.1:8000/query"
|
| 9 |
+
if "messages" not in st.session_state:
|
| 10 |
+
st.session_state.messages = [{"role": "assistant", "content": "Olá! Como posso te ajudar com suas dúvidas de TI?"}]
|
| 11 |
+
|
| 12 |
+
for message in st.session_state.messages:
|
| 13 |
+
with st.chat_message(message["role"]):
|
| 14 |
+
st.markdown(message["content"])
|
| 15 |
+
|
| 16 |
+
if prompt := st.chat_input("Digite sua pergunta aqui..."):
|
| 17 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 18 |
+
with st.chat_message("user"):
|
| 19 |
+
st.markdown(prompt)
|
| 20 |
+
|
| 21 |
+
with st.chat_message("assistant"):
|
| 22 |
+
message_placeholder = st.empty()
|
| 23 |
+
with st.spinner("Analisando a base de conhecimento..."):
|
| 24 |
+
try:
|
| 25 |
+
payload = {"query": prompt, "top_k": 5}
|
| 26 |
+
response = requests.post(API_URL, json=payload, timeout=120)
|
| 27 |
+
|
| 28 |
+
if response.status_code == 200:
|
| 29 |
+
data = response.json()
|
| 30 |
+
answer = data.get("answer", "Não foi possível obter uma resposta.")
|
| 31 |
+
sources = data.get("source_chunks", [])
|
| 32 |
+
|
| 33 |
+
full_response = answer
|
| 34 |
+
if sources:
|
| 35 |
+
unique_sources = set(chunk['source'] for chunk in sources)
|
| 36 |
+
full_response += "\n\n---\n*Fontes consultadas:*"
|
| 37 |
+
for source_file in unique_sources:
|
| 38 |
+
full_response += f"\n- `{os.path.basename(source_file)}`"
|
| 39 |
+
|
| 40 |
+
message_placeholder.markdown(full_response)
|
| 41 |
+
else:
|
| 42 |
+
full_response = f"Erro da API: {response.status_code} - {response.text}"
|
| 43 |
+
message_placeholder.error(full_response)
|
| 44 |
+
|
| 45 |
+
except requests.exceptions.RequestException as e:
|
| 46 |
+
full_response = f"Erro de conexão com o back-end: {e}"
|
| 47 |
+
message_placeholder.error(full_response)
|
| 48 |
+
|
| 49 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
frontend/requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
requests
|