ToM / app-gg.py
caarleexx's picture
Rename app.py to app-gg.py
e92077c verified
# ╔════════════════════════════════════════════════════════════════════════════╗
# β•‘ PIPELINE v27: UI LIMPA & ESTRUTURA DE ABAS β•‘
# β•‘ Layout: Chat (Aba 1) | Debug (Aba 2) | Config (Aba 3) β•‘
# β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
import os
import json
import re
import time
from datetime import datetime
import gradio as gr
import google.generativeai as genai
# ==================== 1. CONFIGURAÇÃO ====================
api_key = os.getenv("GOOGLE_API_KEY", "SUA_API_KEY_AQUI")
if api_key: genai.configure(api_key=api_key)
model_flash = genai.GenerativeModel("gemini-flash-latest")
model_pro = genai.GenerativeModel("gemini-pro-latest")
ARQUIVO_CONFIG = "protocolo.json"
# ==================== 2. UTILIDADES ====================
def carregar_protocolo():
try:
with open(ARQUIVO_CONFIG, "r", encoding="utf-8") as f: return f.read()
except: return "[]"
def salvar_protocolo(conteudo):
try:
json.loads(conteudo)
with open(ARQUIVO_CONFIG, "w", encoding="utf-8") as f: f.write(conteudo)
return "βœ… Salvo"
except: return "❌ Erro JSON"
def ler_anexo(arquivo):
if arquivo is None: return ""
try:
with open(arquivo.name, "r", encoding="utf-8") as f:
return f"\n\n[ANEXO SISTEMA: {os.path.basename(arquivo.name)}]\n{f.read()}\n[FIM ANEXO]\n"
except: return ""
# ==================== 3. ENGINE DE EXECUÇÃO ====================
def executar_no(timeline, config):
modelo = model_pro if config.get("modelo") == "pro" else model_flash
contexto = json.dumps(timeline, ensure_ascii=False, indent=2)
prompt = f"--- TIMELINE ---\n{contexto}\n----------------\nAGENTE: {config['nome']}\nMISSÃO: {config['missao']}"
log = f"\nπŸ”Έ {config['nome']}..."
try:
inicio = time.time()
resp = modelo.generate_content(prompt)
out = resp.text
tempo = time.time() - inicio
content = json.loads(out.strip().replace('```json','').replace('```','')) if config['tipo_saida']=='json' else out
log += f" (OK - {tempo:.2f}s)"
return {"role": "assistant", "agent": config['nome'], "content": content}, log, out
except Exception as e:
return {"role": "system", "error": str(e)}, f" (ERRO: {e})", str(e)
# ==================== 4. ORQUESTRADOR ====================
def orquestrador(texto, arquivo, history, json_config):
# 1. Input Check
anexo = ler_anexo(arquivo)
full_input = f"{texto}\n{anexo}".strip()
if not full_input:
yield history, {}, "Sem input."
return
# 2. Setup
history = history + [[texto + (" πŸ“Ž" if arquivo else ""), None]]
try: protocolo = json.loads(json_config)
except:
history[-1][1] = "❌ Erro no JSON de Configuração."
yield history, {}, "Erro JSON"
return
timeline = [{"role": "user", "content": full_input}]
logs = f"πŸš€ START: {datetime.now().strftime('%H:%M:%S')}\n"
history[-1][1] = "⏳ Iniciando anÑlise..."
yield history, timeline, logs
# 3. Loop
final_response = ""
for cfg in protocolo:
history[-1][1] = f"βš™οΈ {cfg['nome']} trabalhando..."
yield history, timeline, logs
res, log_add, raw = executar_no(timeline, cfg)
timeline.append(res)
logs += log_add + "\n"
if cfg['tipo_saida'] == 'texto':
final_response = res['content']
history[-1][1] = final_response # Mostra texto progressivo se fosse stream, aqui mostra final
yield history, timeline, logs
logs += "βœ… FIM."
yield history, timeline, logs
# ==================== 5. UI LIMPA (v27) ====================
def ui_clean():
css = """
footer {display: none !important;}
.contain {border: none !important;}
"""
config_init = carregar_protocolo()
with gr.Blocks(title="AI Forensics", css=css, theme=gr.themes.Soft()) as app:
with gr.Tabs():
# === ABA 1: CHAT (LIMPO) ===
with gr.Tab("πŸ’¬ Investigador"):
chatbot = gr.Chatbot(
label="",
show_label=False,
height=600,
show_copy_button=True,
render_markdown=True
)
with gr.Row():
with gr.Column(scale=10):
txt_in = gr.Textbox(
show_label=False,
placeholder="Descreva o caso ou instruΓ§Γ£o...",
lines=1,
max_lines=5,
container=False
)
with gr.Column(scale=1, min_width=50):
file_in = gr.UploadButton(
"πŸ“Ž",
file_types=[".txt", ".md", ".csv", ".json"],
size="sm"
)
with gr.Column(scale=1, min_width=80):
btn_send = gr.Button("Enviar", variant="primary", size="sm")
# Feedback visual sutil do arquivo
file_status = gr.Markdown("", visible=True)
file_in.upload(lambda x: f"πŸ“Ž Anexo: {os.path.basename(x.name)}", file_in, file_status)
# === ABA 2: DEPURAÇÃO (ESCONDIDO) ===
with gr.Tab("πŸ•΅οΈ DepuraΓ§Γ£o"):
with gr.Row():
out_dna = gr.JSON(label="DNA (Timeline)")
out_logs = gr.Textbox(label="Logs do Sistema", lines=20)
# === ABA 3: CONFIG (TÉCNICO) ===
with gr.Tab("βš™οΈ Config"):
with gr.Row():
btn_save = gr.Button("Salvar Config")
lbl_save = gr.Label(show_label=False)
code_json = gr.Code(value=config_init, language="json", label="protocolo.json")
btn_save.click(salvar_protocolo, code_json, lbl_save)
# === TRIGGERS ===
# Enter ou BotΓ£o Enviar
triggers = [btn_send.click, txt_in.submit]
for trig in triggers:
trig(
orquestrador,
inputs=[txt_in, file_in, chatbot, code_json],
outputs=[chatbot, out_dna, out_logs]
).then(
lambda: (None, ""), # Limpa input e label do arquivo apΓ³s envio
outputs=[txt_in, file_status]
)
return app
if __name__ == "__main__":
ui_clean().launch()