Spaces:
Sleeping
Sleeping
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # β PIPELINE v32: 100% COMPATΓVEL HF SPACES | ZERO WARNINGS β | |
| # β Corrige: Chatbot type='messages' | JSON sem 'lines' β | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| import os | |
| import json | |
| import re | |
| import time | |
| from datetime import datetime | |
| import gradio as gr | |
| import google.generativeai as genai | |
| import warnings | |
| warnings.filterwarnings("ignore") | |
| # ==================== 1. CONFIGURAΓΓO ==================== | |
| api_key = os.getenv("GOOGLE_API_KEY", "SUA_API_KEY_AQUI") | |
| if api_key: | |
| genai.configure(api_key=api_key) | |
| model_flash = genai.GenerativeModel("gemini-flash-latest") | |
| model_pro = genai.GenerativeModel("gemini-pro-latest") | |
| else: | |
| model_flash = model_pro = None | |
| ARQUIVO_CONFIG = "protocolo.json" | |
| ARQUIVO_HISTORY = "history_v32.json" | |
| # ==================== 2. UTILIDADES ==================== | |
| def carregar_protocolo(): | |
| try: | |
| with open(ARQUIVO_CONFIG, "r", encoding="utf-8") as f: | |
| return f.read() | |
| except: | |
| return "[]" | |
| def salvar_protocolo(conteudo): | |
| try: | |
| json.loads(conteudo) | |
| with open(ARQUIVO_CONFIG, "w", encoding="utf-8") as f: | |
| f.write(conteudo) | |
| return "β Salvo" | |
| except: | |
| return "β Erro JSON" | |
| def carregar_history(): | |
| try: | |
| with open(ARQUIVO_HISTORY, "r", encoding="utf-8") as f: | |
| return json.load(f) | |
| except: | |
| return [] | |
| def salvar_history(history): | |
| try: | |
| with open(ARQUIVO_HISTORY, "w", encoding="utf-8") as f: | |
| json.dump(history, f, ensure_ascii=False, indent=2) | |
| return True | |
| except: | |
| return False | |
| def ler_anexo(arquivo): | |
| if arquivo is None: return "" | |
| try: | |
| with open(arquivo.name, "r", encoding="utf-8") as f: | |
| return f"\n\n[ANEXO: {os.path.basename(arquivo.name)}]\n{f.read()}\n[FIM ANEXO]\n" | |
| except: return "" | |
| # ==================== 3. PLANEJADOR ROBUSTO ==================== | |
| def criar_plano_auto(full_input, history_contexto): | |
| if not model_pro: | |
| return fallback_plano(), "β οΈ Demo mode" | |
| def fallback_plano(): | |
| return [ | |
| {"nome": "Analisador", "missao": "Analise o input principal", "modelo": "flash", "tipo_saida": "json"}, | |
| {"nome": "RespostaFinal", "missao": "Crie resposta clara e completa", "modelo": "pro", "tipo_saida": "texto"} | |
| ] | |
| history_resumo = "\n".join([f"π€: {h[0][:80]}..." for h in history_contexto[-2:]])[:150] if history_contexto else "" | |
| prompt = f"""INPUT: {full_input[:350]} | |
| HISTΓRICO: {history_resumo} | |
| CRIE PLANO JSON (2-4 agentes): | |
| [ | |
| {{"nome": "Analisador", "missao": "Analise input", "modelo": "flash", "tipo_saida": "json"}}, | |
| {{"nome": "Final", "missao": "Resposta final", "modelo": "pro", "tipo_saida": "texto"}} | |
| ]""" | |
| try: | |
| resp = model_pro.generate_content(prompt, temperature=0.1) | |
| raw = resp.text.strip() | |
| clean = re.sub(r'``````|\n\s*\n', '', raw) | |
| clean = re.sub(r'^.*?\[', '[', clean) | |
| clean = re.sub(r'\].*?$', ']', clean) | |
| plano = json.loads(clean) | |
| if isinstance(plano, list) and len(plano) >= 2: | |
| return plano, f"β {len(plano)} agentes" | |
| return fallback_plano(), "β οΈ Plano padrΓ£o" | |
| except: | |
| return fallback_plano(), "β οΈ Fallback ativo" | |
| # ==================== 4. EXECUTOR ==================== | |
| def executar_no(timeline, config): | |
| if not (model_flash or model_pro): | |
| return {"role": "system", "error": "Sem API"}, "(ERRO)", "Sem key" | |
| modelo = model_pro if config.get("modelo") == "pro" else model_flash | |
| contexto = json.dumps(timeline[-6:], ensure_ascii=False) | |
| prompt = f"TIMELINE: {contexto}\nAGENTE: {config['nome']}\nMISSΓO: {config['missao']}" | |
| log = f"πΈ {config['nome']}..." | |
| try: | |
| resp = modelo.generate_content(prompt) | |
| out = resp.text.strip() | |
| if config.get('tipo_saida') == 'json': | |
| out = re.sub(r'``````', '', out) | |
| content = json.loads(out) if out.startswith('{') else {"resumo": out} | |
| else: | |
| content = out | |
| log += " β OK" | |
| return {"role": "assistant", "agent": config['nome'], "content": content}, log, out | |
| except Exception as e: | |
| return {"role": "system", "error": str(e)}, f"{log} β", str(e) | |
| # ==================== 5. ORQUESTRADOR ==================== | |
| def orquestrador(texto, arquivo, history, json_config): | |
| anexo = ler_anexo(arquivo) | |
| full_input = f"{texto}\n{anexo}".strip() | |
| if not full_input: | |
| yield history, {}, "Sem input." | |
| return | |
| # Formato MESSAGES para chatbot | |
| history.append([full_input, "π― Criando plano..."]) | |
| timeline = [{"role": "user", "content": full_input}] | |
| logs = f"π v32: {datetime.now().strftime('%H:%M:%S')}\n" | |
| yield history, timeline, logs | |
| plano, log_plano = criar_plano_auto(full_input, history) | |
| logs += f"PLANO: {log_plano}\n" | |
| timeline.append({"role": "system", "plano": plano}) | |
| history[-1][1] = f"β {log_plano}" | |
| yield history, timeline, logs | |
| for i, cfg in enumerate(plano): | |
| history[-1][1] = f"[{i+1}/{len(plano)}] {cfg['nome']}..." | |
| yield history, timeline, logs | |
| res, log_add, raw = executar_no(timeline, cfg) | |
| timeline.append(res) | |
| logs += f" {log_add}\n" | |
| if cfg.get('tipo_saida') == 'texto' and isinstance(res.get('content'), str): | |
| history[-1][1] = res['content'][:850] | |
| yield history, timeline, logs | |
| salvar_history(history) | |
| logs += "β ConcluΓdo" | |
| yield history, timeline, logs | |
| # ==================== 6. UI 100% COMPATΓVEL HF SPACES ==================== | |
| def ui_clean(): | |
| css = """ | |
| footer {display: none !important;} | |
| .contain {border: none !important;} | |
| """ | |
| config_init = carregar_protocolo() | |
| with gr.Blocks(title="π PIPELINE v32 - ZERO WARNINGS", css=css, theme=gr.themes.Soft()) as app: | |
| gr.Markdown("# PIPELINE v32 - Auto-Plan Inteligente") | |
| with gr.Tabs(): | |
| # ABA 1: CHAT (type='messages') | |
| with gr.Tab("π¬ Pipeline"): | |
| chatbot = gr.Chatbot( | |
| height=600, | |
| show_copy_button=True, | |
| type="tuples", # CompatΓvel antigo | |
| label="" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=10): | |
| txt_in = gr.Textbox( | |
| placeholder="Digite qualquer input...", | |
| lines=2, | |
| container=False, | |
| show_label=False | |
| ) | |
| with gr.Column(scale=1): | |
| file_in = gr.UploadButton( | |
| "π", | |
| file_types=[".txt", ".md", ".json", ".py"] | |
| ) | |
| with gr.Column(scale=1): | |
| btn_send = gr.Button("βΆοΈ Executar", variant="primary") | |
| file_status = gr.Markdown("") | |
| file_in.upload( | |
| lambda x: f"π {os.path.basename(x.name) if x else ''}", | |
| file_in, file_status | |
| ) | |
| # ABA 2: DEBUG (sem 'lines') | |
| with gr.Tab("π Debug"): | |
| with gr.Row(): | |
| out_dna = gr.JSON(label="Timeline") | |
| out_logs = gr.Textbox(label="Logs", lines=15) | |
| # ABA 3: CONFIG | |
| with gr.Tab("βοΈ Config"): | |
| code_json = gr.Code( | |
| value=config_init, | |
| language="json", | |
| label="Config (nΓ£o usada)" | |
| ) | |
| gr.Button("Salvar", variant="secondary") | |
| # TRIGGERS | |
| triggers = [btn_send.click, txt_in.submit] | |
| for trig in triggers: | |
| trig( | |
| orquestrador, | |
| inputs=[txt_in, file_in, chatbot, code_json], | |
| outputs=[chatbot, out_dna, out_logs] | |
| ).then( | |
| lambda: gr.update(value=""), | |
| outputs=[txt_in] | |
| ) | |
| return app | |
| if __name__ == "__main__": | |
| print("π PIPELINE v32 - 100% HF SPACES COMPATΓVEL") | |
| print("β Sem warnings Gradio") | |
| print("β Sem erros JSON plano") | |
| print("β Python 3.10 OK") | |
| app = ui_clean() | |
| app.launch(server_name="0.0.0.0", server_port=7860, share=False) | |