chat_docker / app.py
profdanielvieira95's picture
update app
be91656 verified
import os
import gradio as gr
from typing import List
from llama_index.core import SimpleDirectoryReader, StorageContext, VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.llms.groq import Groq
from llama_index.core.memory import ChatSummaryMemoryBuffer
import chromadb
from tempfile import TemporaryDirectory
from PyPDF2 import PdfReader
from corretor import corrigir_texto # <<< Correção importada aqui
import platform
# Wrapper de embedding compatível com ChromaDB
class ChromaEmbeddingWrapper:
def __init__(self, model_name: str):
self.model = HuggingFaceEmbedding(model_name=model_name)
def __call__(self, input: List[str]) -> List[List[float]]:
return self.model.embed_documents(input)
# Inicializa modelos de embedding
embed_model = HuggingFaceEmbedding(model_name='intfloat/multilingual-e5-large')
embed_model_chroma = ChromaEmbeddingWrapper(model_name='intfloat/multilingual-e5-large')
# Inicializa ChromaDB
# Define caminho seguro dependendo do sistema operacional
if platform.system() == "Windows":
chroma_path = "./chroma_db"
else:
chroma_path = "/tmp/chroma_db"
chroma_client = chromadb.PersistentClient(path=chroma_path)
collection_name = 'documentos_bitdoglab'
chroma_collection = chroma_client.get_or_create_collection(
name=collection_name,
embedding_function=embed_model_chroma
)
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# Inicializa LLM da Groq
Groq_api = os.environ.get("GROQ_API_KEY")
llms = Groq(model='llama3-70b-8192', api_key=Groq_api or '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')
# Estados globais
document_index = None
chat_engine = None
# Carregamento único do PDF
def carregar_pdf_inicial():
global document_index, chat_engine
try:
with TemporaryDirectory() as tmpdir:
pdf_path = "BitDogLab_info_v2.pdf"
text = ""
reader = PdfReader(pdf_path)
for page in reader.pages:
text += page.extract_text() or ""
with open(os.path.join(tmpdir, "temp.txt"), "w", encoding="utf-8") as f:
f.write(text)
documentos = SimpleDirectoryReader(input_dir=tmpdir)
docs = documentos.load_data()
node_parser = SentenceSplitter(chunk_size=1200,chunk_overlap=150)
nodes = node_parser.get_nodes_from_documents(docs, show_progress=True)
document_index = VectorStoreIndex(nodes, storage_context=storage_context, embed_model=embed_model)
memory = ChatSummaryMemoryBuffer(llm=llms, token_limit=256)
chat_engine = document_index.as_chat_engine(
chat_mode='context',
llm=llms,
memory=memory,
system_prompt='''Você é especialista na placa BitDog Lab e sua função é ajudar os usuários nas dúvidas e informações sobre a placa e como criar códigos.'''
)
print("PDF carregado com sucesso.")
except Exception as e:
print(f"Erro ao carregar PDF: {e}")
# Função de chat com correção de texto
def converse_com_bot(message, chat_history):
global chat_engine
if chat_engine is None:
return "Erro: o bot ainda não está pronto.", chat_history
response = chat_engine.chat(message)
resposta_corrigida = corrigir_texto(response.response) # <<< Aplica correção
if chat_history is None:
chat_history = []
chat_history.append({"role": "user", "content": message})
chat_history.append({"role": "assistant", "content": resposta_corrigida})
return "", chat_history
# Resetar conversa
def resetar_chat():
global chat_engine
if chat_engine:
chat_engine.reset()
return []
# Carregar PDF na inicialização
carregar_pdf_inicial()
# Interface Gradio
with gr.Blocks() as app:
gr.Markdown("# 🤖 Chatbot BitDog Lab - Seu assistente para esclarecer dúvidas")
chatbot = gr.Chatbot(label="Conversa", type="messages")
msg = gr.Textbox(label='Digite a sua mensagem')
limpar = gr.Button('Limpar')
msg.submit(converse_com_bot, [msg, chatbot], [msg, chatbot])
limpar.click(resetar_chat, None, chatbot, queue=False)
#app.launch()
app.launch(server_name="0.0.0.0", server_port=7860,share=True)