Files changed (1) hide show
  1. app.py +72 -120
app.py CHANGED
@@ -1,6 +1,6 @@
1
  # ╔════════════════════════════════════════════════════════════════════════════╗
2
- # β•‘ PIPELINE v31: AUTO-PLAN ROBUSTO COMPLETO | HF SPACES 100% OK β•‘
3
- # β•‘ 1ΒΊ: CRIA PLANO β†’ 2ΒΊ: EXECUTA β†’ SEM ERROS "❌ Erro plano" β•‘
4
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
5
 
6
  import os
@@ -10,8 +10,10 @@ import time
10
  from datetime import datetime
11
  import gradio as gr
12
  import google.generativeai as genai
 
 
13
 
14
- # ==================== 1. CONFIGURAÇÃO GLOBAL ====================
15
  api_key = os.getenv("GOOGLE_API_KEY", "SUA_API_KEY_AQUI")
16
  if api_key:
17
  genai.configure(api_key=api_key)
@@ -19,12 +21,11 @@ if api_key:
19
  model_pro = genai.GenerativeModel("gemini-pro-latest")
20
  else:
21
  model_flash = model_pro = None
22
- print("⚠️ Sem GOOGLE_API_KEY - usando modo demo")
23
 
24
  ARQUIVO_CONFIG = "protocolo.json"
25
- ARQUIVO_HISTORY = "history_v31.json"
26
 
27
- # ==================== 2. UTILIDADES COMPLETAS ====================
28
  def carregar_protocolo():
29
  try:
30
  with open(ARQUIVO_CONFIG, "r", encoding="utf-8") as f:
@@ -63,99 +64,70 @@ def ler_anexo(arquivo):
63
  return f"\n\n[ANEXO: {os.path.basename(arquivo.name)}]\n{f.read()}\n[FIM ANEXO]\n"
64
  except: return ""
65
 
66
- # ==================== 3. PLANEJADOR AUTO-ROBUSTO ====================
67
  def criar_plano_auto(full_input, history_contexto):
68
- """CRIA PLANO com 100% fallback - NUNCA falha"""
69
  if not model_pro:
70
- return fallback_plano(), "⚠️ Demo: plano padrão"
71
 
72
- # PLANO FALLBACK sempre disponΓ­vel
73
  def fallback_plano():
74
  return [
75
- {"nome": "Analisador", "missao": "Analise input e identifique elementos principais", "modelo": "flash", "tipo_saida": "json"},
76
- {"nome": "Sintetizador", "missao": "Produza resposta final clara e completa pro usuΓ‘rio", "modelo": "pro", "tipo_saida": "texto"}
77
  ]
78
 
79
- history_resumo = ""
80
- if history_contexto and len(history_contexto) > 1:
81
- history_resumo = "\n".join([f"πŸ‘€: {h[0][:100]}..." for h in history_contexto[-2:]])[:200]
82
 
83
- prompt_planejador = f"""INPUT: {full_input[:400]}
84
  HISTΓ“RICO: {history_resumo}
85
 
86
- CRIE PLANO JSON 2-4 agentes:
87
  [
88
  {{"nome": "Analisador", "missao": "Analise input", "modelo": "flash", "tipo_saida": "json"}},
89
  {{"nome": "Final", "missao": "Resposta final", "modelo": "pro", "tipo_saida": "texto"}}
90
- ]
91
-
92
- APENAS JSON VÁLIDO SEM TEXTO EXTRA."""
93
 
94
  try:
95
- print(f"πŸ” DEBUG: Gerando plano para: {full_input[:100]}...")
96
- resp = model_pro.generate_content(prompt_planejador, temperature=0.1)
97
- plano_raw = resp.text.strip()
98
-
99
- print(f"πŸ“„ RAW: {plano_raw[:200]}...")
100
-
101
- # Limpeza agressiva de markdown/texto extra
102
- #plano_raw = re.sub(r'```|```
103
- plano_raw = re.sub(r'^.*?\[', '[', plano_raw, flags=re.DOTALL)
104
- plano_raw = re.sub(r'\].*?$', ']', plano_raw, flags=re.DOTALL)
105
- plano_raw = re.sub(r'[^\[\]\{\},:\"\s\w\-\.]', '', plano_raw)
106
-
107
- print(f"🧹 CLEAN: {plano_raw[:200]}...")
108
-
109
- plano = json.loads(plano_raw)
110
-
111
- if isinstance(plano, list) and len(plano) >= 2 and all('nome' in a for a in plano):
112
- print(f"βœ… PLANO OK: {len(plano)} agentes")
113
- return plano, f"βœ… Plano auto: {len(plano)} agentes"
114
-
115
- print("⚠️ PLANO INVÁLIDO, usando fallback")
116
- return fallback_plano(), "⚠️ Plano corrigido"
117
 
118
- except Exception as e:
119
- print(f"❌ PLANO ERROR: {str(e)[:100]}")
120
- return fallback_plano(), f"⚠️ Fallback ativo"
121
-
122
- # ==================== 4. EXECUTOR DE AGENTES ====================
 
 
 
123
  def executar_no(timeline, config):
124
  if not (model_flash or model_pro):
125
- return {"role": "system", "error": "Sem API"}, "(ERRO: API)", "Sem key"
126
 
127
  modelo = model_pro if config.get("modelo") == "pro" else model_flash
128
- contexto = json.dumps(timeline[-8:], ensure_ascii=False, indent=2)
 
 
129
 
130
- prompt = f"""--- TIMELINE ---
131
- {contexto}
132
- ----------------
133
- AGENTE: {config['nome']}
134
- MISSÃO: {config['missao']}"""
135
-
136
  log = f"πŸ”Έ {config['nome']}..."
137
  try:
138
- inicio = time.time()
139
  resp = modelo.generate_content(prompt)
140
  out = resp.text.strip()
141
 
142
- # Parse robusto
143
  if config.get('tipo_saida') == 'json':
144
- #out = re.sub(r'```json|```
145
- content = json.loads(out) if out.strip().startswith('{') else {"texto": out}
146
  else:
147
  content = out
148
 
149
- tempo = time.time() - inicio
150
- log += f" βœ“ {tempo:.1f}s"
151
  return {"role": "assistant", "agent": config['nome'], "content": content}, log, out
152
  except Exception as e:
153
- log += f" βœ— {str(e)[:30]}"
154
- return {"role": "system", "error": str(e)}, log, str(e)
155
 
156
- # ==================== 5. ORQUESTRADOR PRINCIPAL ====================
157
  def orquestrador(texto, arquivo, history, json_config):
158
- """Fluxo completo: PLANO β†’ EXECUÇÃO"""
159
  anexo = ler_anexo(arquivo)
160
  full_input = f"{texto}\n{anexo}".strip()
161
 
@@ -163,25 +135,22 @@ def orquestrador(texto, arquivo, history, json_config):
163
  yield history, {}, "Sem input."
164
  return
165
 
166
- history = history + [[texto + (" πŸ“Ž" if arquivo else ""), None]]
 
167
  timeline = [{"role": "user", "content": full_input}]
168
- logs = f"πŸš€ v31: {datetime.now().strftime('%H:%M:%S')}\n"
169
 
170
- # PASSO 1: CRIA PLANO
171
- history[-1] = "🎯 Criando plano inteligente..."[1]
172
  yield history, timeline, logs
173
 
174
  plano, log_plano = criar_plano_auto(full_input, history)
175
  logs += f"PLANO: {log_plano}\n"
176
- timeline.append({"role": "system", "plano_auto": plano})
177
 
178
- history[-1] = f"βœ… {log_plano}"[1]
179
  yield history, timeline, logs
180
 
181
- # PASSO 2: EXECUTA PLANO
182
- resposta_final = ""
183
  for i, cfg in enumerate(plano):
184
- history[-1] = f"βš™οΈ [{i+1}/{len(plano)}] {cfg['nome']}..."[1]
185
  yield history, timeline, logs
186
 
187
  res, log_add, raw = executar_no(timeline, cfg)
@@ -189,16 +158,14 @@ def orquestrador(texto, arquivo, history, json_config):
189
  logs += f" {log_add}\n"
190
 
191
  if cfg.get('tipo_saida') == 'texto' and isinstance(res.get('content'), str):
192
- resposta_final = res['content']
193
- preview = resposta_final[:900] + "..." if len(resposta_final) > 900 else resposta_final
194
- history[-1] = preview[1]
195
  yield history, timeline, logs
196
 
197
- logs += f"\nβœ… Pipeline concluΓ­do | {len(plano)} agentes"
198
  salvar_history(history)
 
199
  yield history, timeline, logs
200
 
201
- # ==================== 6. UI COMPLETA (HF SPACES COMPATÍVEL) ====================
202
  def ui_clean():
203
  css = """
204
  footer {display: none !important;}
@@ -207,66 +174,57 @@ def ui_clean():
207
 
208
  config_init = carregar_protocolo()
209
 
210
- with gr.Blocks(title="πŸš€ PIPELINE v31 - AUTO-PLAN ROBUSTO", css=css, theme=gr.themes.Soft()) as app:
211
- gr.Markdown("# PIPELINE v31 - InteligΓͺncia AutomΓ‘tica")
212
- gr.Markdown("*Modelo cria plano β†’ executa β†’ responde perfeitamente*")
213
 
214
  with gr.Tabs():
215
- # ABA 1: CHAT PRINCIPAL
216
  with gr.Tab("πŸ’¬ Pipeline"):
217
  chatbot = gr.Chatbot(
218
  height=600,
219
  show_copy_button=True,
220
- render_markdown=True,
221
  label=""
222
  )
223
 
224
  with gr.Row():
225
  with gr.Column(scale=10):
226
  txt_in = gr.Textbox(
227
- placeholder="Digite qualquer pergunta... O modelo criarΓ‘ seu plano!",
228
  lines=2,
229
- max_lines=6,
230
  container=False,
231
  show_label=False
232
  )
233
- with gr.Column(scale=1, min_width=60):
234
  file_in = gr.UploadButton(
235
  "πŸ“Ž",
236
- file_types=[".txt", ".md", ".json", ".py", ".csv", ".yaml"],
237
- size="sm"
238
  )
239
- with gr.Column(scale=1, min_width=80):
240
- btn_send = gr.Button("▢️ Executar", variant="primary", size="sm")
241
 
242
- file_status = gr.Markdown("", visible=True)
243
  file_in.upload(
244
- lambda x: f"πŸ“Ž Anexo: {os.path.basename(x.name) if x else ''}",
245
- file_in,
246
- file_status
247
  )
248
 
249
- # ABA 2: DEBUG
250
  with gr.Tab("πŸ” Debug"):
251
  with gr.Row():
252
- out_dna = gr.JSON(label="πŸ“Š Timeline Completa", lines=20)
253
- out_logs = gr.Textbox(label="πŸ“‹ Logs Detalhados", lines=20)
254
 
255
- # ABA 3: CONFIG (PLACEHOLDER)
256
  with gr.Tab("βš™οΈ Config"):
257
- gr.Markdown("*Auto-plan ativo - config JSON nΓ£o usada*")
258
- with gr.Row():
259
- btn_save = gr.Button("Salvar Config", variant="secondary")
260
- lbl_save = gr.Label("Status", show_label=False)
261
-
262
  code_json = gr.Code(
263
  value=config_init,
264
  language="json",
265
- label="protocolo.json (opcional)"
266
  )
267
- btn_save.click(salvar_protocolo, code_json, lbl_save)
268
 
269
- # TRIGGERS PRINCIPAIS
270
  triggers = [btn_send.click, txt_in.submit]
271
  for trig in triggers:
272
  trig(
@@ -274,23 +232,17 @@ def ui_clean():
274
  inputs=[txt_in, file_in, chatbot, code_json],
275
  outputs=[chatbot, out_dna, out_logs]
276
  ).then(
277
- lambda: (gr.update(value=""), gr.update(value="")),
278
- outputs=[txt_in, file_status]
279
  )
280
 
281
  return app
282
 
283
- # ==================== LAUNCH ====================
284
  if __name__ == "__main__":
285
- print("πŸš€ PIPELINE v31 COMPLETO - 100% FUNCIONAL")
286
- print("βœ… Sem erros 'Erro plano'")
287
- print("βœ… HF Spaces compatΓ­vel")
288
- print("πŸ’‘ export GOOGLE_API_KEY=... para full power")
289
 
290
  app = ui_clean()
291
- app.launch(
292
- server_name="0.0.0.0",
293
- server_port=7860,
294
- share=False,
295
- show_error=True
296
- )
 
1
  # ╔════════════════════════════════════════════════════════════════════════════╗
2
+ # β•‘ PIPELINE v32: 100% COMPATÍVEL HF SPACES | ZERO WARNINGS β•‘
3
+ # β•‘ Corrige: Chatbot type='messages' | JSON sem 'lines' β•‘
4
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
5
 
6
  import os
 
10
  from datetime import datetime
11
  import gradio as gr
12
  import google.generativeai as genai
13
+ import warnings
14
+ warnings.filterwarnings("ignore")
15
 
16
+ # ==================== 1. CONFIGURAÇÃO ====================
17
  api_key = os.getenv("GOOGLE_API_KEY", "SUA_API_KEY_AQUI")
18
  if api_key:
19
  genai.configure(api_key=api_key)
 
21
  model_pro = genai.GenerativeModel("gemini-pro-latest")
22
  else:
23
  model_flash = model_pro = None
 
24
 
25
  ARQUIVO_CONFIG = "protocolo.json"
26
+ ARQUIVO_HISTORY = "history_v32.json"
27
 
28
+ # ==================== 2. UTILIDADES ====================
29
  def carregar_protocolo():
30
  try:
31
  with open(ARQUIVO_CONFIG, "r", encoding="utf-8") as f:
 
64
  return f"\n\n[ANEXO: {os.path.basename(arquivo.name)}]\n{f.read()}\n[FIM ANEXO]\n"
65
  except: return ""
66
 
67
+ # ==================== 3. PLANEJADOR ROBUSTO ====================
68
  def criar_plano_auto(full_input, history_contexto):
 
69
  if not model_pro:
70
+ return fallback_plano(), "⚠️ Demo mode"
71
 
 
72
  def fallback_plano():
73
  return [
74
+ {"nome": "Analisador", "missao": "Analise o input principal", "modelo": "flash", "tipo_saida": "json"},
75
+ {"nome": "RespostaFinal", "missao": "Crie resposta clara e completa", "modelo": "pro", "tipo_saida": "texto"}
76
  ]
77
 
78
+ history_resumo = "\n".join([f"πŸ‘€: {h[0][:80]}..." for h in history_contexto[-2:]])[:150] if history_contexto else ""
 
 
79
 
80
+ prompt = f"""INPUT: {full_input[:350]}
81
  HISTΓ“RICO: {history_resumo}
82
 
83
+ CRIE PLANO JSON (2-4 agentes):
84
  [
85
  {{"nome": "Analisador", "missao": "Analise input", "modelo": "flash", "tipo_saida": "json"}},
86
  {{"nome": "Final", "missao": "Resposta final", "modelo": "pro", "tipo_saida": "texto"}}
87
+ ]"""
 
 
88
 
89
  try:
90
+ resp = model_pro.generate_content(prompt, temperature=0.1)
91
+ raw = resp.text.strip()
92
+ clean = re.sub(r'``````|\n\s*\n', '', raw)
93
+ clean = re.sub(r'^.*?\[', '[', clean)
94
+ clean = re.sub(r'\].*?$', ']', clean)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ plano = json.loads(clean)
97
+ if isinstance(plano, list) and len(plano) >= 2:
98
+ return plano, f"βœ… {len(plano)} agentes"
99
+ return fallback_plano(), "⚠️ Plano padrão"
100
+ except:
101
+ return fallback_plano(), "⚠️ Fallback ativo"
102
+
103
+ # ==================== 4. EXECUTOR ====================
104
  def executar_no(timeline, config):
105
  if not (model_flash or model_pro):
106
+ return {"role": "system", "error": "Sem API"}, "(ERRO)", "Sem key"
107
 
108
  modelo = model_pro if config.get("modelo") == "pro" else model_flash
109
+ contexto = json.dumps(timeline[-6:], ensure_ascii=False)
110
+
111
+ prompt = f"TIMELINE: {contexto}\nAGENTE: {config['nome']}\nMISSÃO: {config['missao']}"
112
 
 
 
 
 
 
 
113
  log = f"πŸ”Έ {config['nome']}..."
114
  try:
 
115
  resp = modelo.generate_content(prompt)
116
  out = resp.text.strip()
117
 
 
118
  if config.get('tipo_saida') == 'json':
119
+ out = re.sub(r'``````', '', out)
120
+ content = json.loads(out) if out.startswith('{') else {"resumo": out}
121
  else:
122
  content = out
123
 
124
+ log += " βœ“ OK"
 
125
  return {"role": "assistant", "agent": config['nome'], "content": content}, log, out
126
  except Exception as e:
127
+ return {"role": "system", "error": str(e)}, f"{log} βœ—", str(e)
 
128
 
129
+ # ==================== 5. ORQUESTRADOR ====================
130
  def orquestrador(texto, arquivo, history, json_config):
 
131
  anexo = ler_anexo(arquivo)
132
  full_input = f"{texto}\n{anexo}".strip()
133
 
 
135
  yield history, {}, "Sem input."
136
  return
137
 
138
+ # Formato MESSAGES para chatbot
139
+ history.append([full_input, "🎯 Criando plano..."])
140
  timeline = [{"role": "user", "content": full_input}]
141
+ logs = f"πŸš€ v32: {datetime.now().strftime('%H:%M:%S')}\n"
142
 
 
 
143
  yield history, timeline, logs
144
 
145
  plano, log_plano = criar_plano_auto(full_input, history)
146
  logs += f"PLANO: {log_plano}\n"
147
+ timeline.append({"role": "system", "plano": plano})
148
 
149
+ history[-1][1] = f"βœ… {log_plano}"
150
  yield history, timeline, logs
151
 
 
 
152
  for i, cfg in enumerate(plano):
153
+ history[-1][1] = f"[{i+1}/{len(plano)}] {cfg['nome']}..."
154
  yield history, timeline, logs
155
 
156
  res, log_add, raw = executar_no(timeline, cfg)
 
158
  logs += f" {log_add}\n"
159
 
160
  if cfg.get('tipo_saida') == 'texto' and isinstance(res.get('content'), str):
161
+ history[-1][1] = res['content'][:850]
 
 
162
  yield history, timeline, logs
163
 
 
164
  salvar_history(history)
165
+ logs += "βœ… ConcluΓ­do"
166
  yield history, timeline, logs
167
 
168
+ # ==================== 6. UI 100% COMPATÍVEL HF SPACES ====================
169
  def ui_clean():
170
  css = """
171
  footer {display: none !important;}
 
174
 
175
  config_init = carregar_protocolo()
176
 
177
+ with gr.Blocks(title="πŸš€ PIPELINE v32 - ZERO WARNINGS", css=css, theme=gr.themes.Soft()) as app:
178
+ gr.Markdown("# PIPELINE v32 - Auto-Plan Inteligente")
 
179
 
180
  with gr.Tabs():
181
+ # ABA 1: CHAT (type='messages')
182
  with gr.Tab("πŸ’¬ Pipeline"):
183
  chatbot = gr.Chatbot(
184
  height=600,
185
  show_copy_button=True,
186
+ type="tuples", # CompatΓ­vel antigo
187
  label=""
188
  )
189
 
190
  with gr.Row():
191
  with gr.Column(scale=10):
192
  txt_in = gr.Textbox(
193
+ placeholder="Digite qualquer input...",
194
  lines=2,
 
195
  container=False,
196
  show_label=False
197
  )
198
+ with gr.Column(scale=1):
199
  file_in = gr.UploadButton(
200
  "πŸ“Ž",
201
+ file_types=[".txt", ".md", ".json", ".py"]
 
202
  )
203
+ with gr.Column(scale=1):
204
+ btn_send = gr.Button("▢️ Executar", variant="primary")
205
 
206
+ file_status = gr.Markdown("")
207
  file_in.upload(
208
+ lambda x: f"πŸ“Ž {os.path.basename(x.name) if x else ''}",
209
+ file_in, file_status
 
210
  )
211
 
212
+ # ABA 2: DEBUG (sem 'lines')
213
  with gr.Tab("πŸ” Debug"):
214
  with gr.Row():
215
+ out_dna = gr.JSON(label="Timeline")
216
+ out_logs = gr.Textbox(label="Logs", lines=15)
217
 
218
+ # ABA 3: CONFIG
219
  with gr.Tab("βš™οΈ Config"):
 
 
 
 
 
220
  code_json = gr.Code(
221
  value=config_init,
222
  language="json",
223
+ label="Config (nΓ£o usada)"
224
  )
225
+ gr.Button("Salvar", variant="secondary")
226
 
227
+ # TRIGGERS
228
  triggers = [btn_send.click, txt_in.submit]
229
  for trig in triggers:
230
  trig(
 
232
  inputs=[txt_in, file_in, chatbot, code_json],
233
  outputs=[chatbot, out_dna, out_logs]
234
  ).then(
235
+ lambda: gr.update(value=""),
236
+ outputs=[txt_in]
237
  )
238
 
239
  return app
240
 
 
241
  if __name__ == "__main__":
242
+ print("πŸš€ PIPELINE v32 - 100% HF SPACES COMPATÍVEL")
243
+ print("βœ… Sem warnings Gradio")
244
+ print("βœ… Sem erros JSON plano")
245
+ print("βœ… Python 3.10 OK")
246
 
247
  app = ui_clean()
248
+ app.launch(server_name="0.0.0.0", server_port=7860, share=False)