caarleexx commited on
Commit
f66af6f
Β·
verified Β·
1 Parent(s): 624872e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +199 -167
app.py CHANGED
@@ -1,6 +1,6 @@
1
  # ╔════════════════════════════════════════════════════════════════════════════╗
2
- # β•‘ PIPELINE v32: 100% COMPATÍVEL HF SPACES | ZERO WARNINGS β•‘
3
- # β•‘ Corrige: Chatbot type='messages' | JSON sem 'lines' β•‘
4
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
5
 
6
  import os
@@ -14,7 +14,7 @@ import warnings
14
  warnings.filterwarnings("ignore")
15
 
16
  # ==================== 1. CONFIGURAÇÃO ====================
17
- api_key = os.getenv("GOOGLE_API_KEY", "SUA_API_KEY_AQUI")
18
  if api_key:
19
  genai.configure(api_key=api_key)
20
  model_flash = genai.GenerativeModel("gemini-flash-latest")
@@ -22,112 +22,170 @@ if api_key:
22
  else:
23
  model_flash = model_pro = None
24
 
25
- ARQUIVO_CONFIG = "protocolo.json"
26
- ARQUIVO_HISTORY = "history_v32.json"
27
 
28
- # ==================== 2. UTILIDADES ====================
29
- def carregar_protocolo():
30
- try:
31
- with open(ARQUIVO_CONFIG, "r", encoding="utf-8") as f:
32
- return f.read()
33
- except:
34
- return "[]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- def salvar_protocolo(conteudo):
37
- try:
38
- json.loads(conteudo)
39
- with open(ARQUIVO_CONFIG, "w", encoding="utf-8") as f:
40
- f.write(conteudo)
41
- return "βœ… Salvo"
42
- except:
43
- return "❌ Erro JSON"
44
-
45
- def carregar_history():
46
- try:
47
- with open(ARQUIVO_HISTORY, "r", encoding="utf-8") as f:
48
- return json.load(f)
49
- except:
50
- return []
51
 
52
- def salvar_history(history):
53
- try:
54
- with open(ARQUIVO_HISTORY, "w", encoding="utf-8") as f:
55
- json.dump(history, f, ensure_ascii=False, indent=2)
56
- return True
57
- except:
58
- return False
59
 
60
- def ler_anexo(arquivo):
61
- if arquivo is None: return ""
62
- try:
63
- with open(arquivo.name, "r", encoding="utf-8") as f:
64
- return f"\n\n[ANEXO: {os.path.basename(arquivo.name)}]\n{f.read()}\n[FIM ANEXO]\n"
65
- except: return ""
 
 
 
 
66
 
67
- # ==================== 3. PLANEJADOR ROBUSTO ====================
68
- def criar_plano_auto(full_input, history_contexto):
69
- if not model_pro:
70
- return fallback_plano(), "⚠️ Demo mode"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- def fallback_plano():
73
- return [
74
- {"nome": "Analisador", "missao": "Analise o input principal", "modelo": "flash", "tipo_saida": "json"},
75
- {"nome": "RespostaFinal", "missao": "Crie resposta clara e completa", "modelo": "pro", "tipo_saida": "texto"}
76
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
- history_resumo = "\n".join([f"πŸ‘€: {h[0][:80]}..." for h in history_contexto[-2:]])[:150] if history_contexto else ""
79
 
80
- prompt = f"""INPUT: {full_input[:350]}
81
- HISTΓ“RICO: {history_resumo}
 
 
 
 
 
82
 
83
- CRIE PLANO JSON (2-4 agentes):
 
 
84
  [
85
- {{"nome": "Analisador", "missao": "Analise input", "modelo": "flash", "tipo_saida": "json"}},
86
- {{"nome": "Final", "missao": "Resposta final", "modelo": "pro", "tipo_saida": "texto"}}
87
- ]"""
 
 
 
88
 
89
  try:
90
- resp = model_pro.generate_content(prompt, temperature=0.1)
91
- raw = resp.text.strip()
92
- clean = re.sub(r'``````|\n\s*\n', '', raw)
93
- clean = re.sub(r'^.*?\[', '[', clean)
94
- clean = re.sub(r'\].*?$', ']', clean)
95
-
96
- plano = json.loads(clean)
97
- if isinstance(plano, list) and len(plano) >= 2:
98
- return plano, f"βœ… {len(plano)} agentes"
99
- return fallback_plano(), "⚠️ Plano padrão"
100
  except:
101
- return fallback_plano(), "⚠️ Fallback ativo"
102
 
103
- # ==================== 4. EXECUTOR ====================
104
- def executar_no(timeline, config):
 
 
 
 
 
 
105
  if not (model_flash or model_pro):
106
- return {"role": "system", "error": "Sem API"}, "(ERRO)", "Sem key"
107
 
108
  modelo = model_pro if config.get("modelo") == "pro" else model_flash
109
- contexto = json.dumps(timeline[-6:], ensure_ascii=False)
110
 
111
- prompt = f"TIMELINE: {contexto}\nAGENTE: {config['nome']}\nMISSÃO: {config['missao']}"
112
 
113
- log = f"πŸ”Έ {config['nome']}..."
114
  try:
115
  resp = modelo.generate_content(prompt)
116
  out = resp.text.strip()
117
-
118
- if config.get('tipo_saida') == 'json':
119
- out = re.sub(r'``````', '', out)
120
- content = json.loads(out) if out.startswith('{') else {"resumo": out}
121
- else:
122
- content = out
123
-
124
- log += " βœ“ OK"
125
- return {"role": "assistant", "agent": config['nome'], "content": content}, log, out
126
- except Exception as e:
127
- return {"role": "system", "error": str(e)}, f"{log} βœ—", str(e)
128
 
129
- # ==================== 5. ORQUESTRADOR ====================
130
- def orquestrador(texto, arquivo, history, json_config):
131
  anexo = ler_anexo(arquivo)
132
  full_input = f"{texto}\n{anexo}".strip()
133
 
@@ -135,114 +193,88 @@ def orquestrador(texto, arquivo, history, json_config):
135
  yield history, {}, "Sem input."
136
  return
137
 
138
- # Formato MESSAGES para chatbot
139
- history.append([full_input, "🎯 Criando plano..."])
140
  timeline = [{"role": "user", "content": full_input}]
141
- logs = f"πŸš€ v32: {datetime.now().strftime('%H:%M:%S')}\n"
142
 
143
  yield history, timeline, logs
144
 
145
- plano, log_plano = criar_plano_auto(full_input, history)
146
- logs += f"PLANO: {log_plano}\n"
 
 
 
 
 
 
 
 
 
147
  timeline.append({"role": "system", "plano": plano})
148
 
149
- history[-1][1] = f"βœ… {log_plano}"
150
  yield history, timeline, logs
151
 
152
- for i, cfg in enumerate(plano):
153
- history[-1][1] = f"[{i+1}/{len(plano)}] {cfg['nome']}..."
 
154
  yield history, timeline, logs
155
 
156
- res, log_add, raw = executar_no(timeline, cfg)
157
  timeline.append(res)
158
- logs += f" {log_add}\n"
159
 
160
- if cfg.get('tipo_saida') == 'texto' and isinstance(res.get('content'), str):
161
- history[-1][1] = res['content'][:850]
162
  yield history, timeline, logs
163
 
164
- salvar_history(history)
165
- logs += "βœ… ConcluΓ­do"
166
  yield history, timeline, logs
167
 
168
- # ==================== 6. UI 100% COMPATÍVEL HF SPACES ====================
169
- def ui_clean():
170
- css = """
171
- footer {display: none !important;}
172
- .contain {border: none !important;}
173
- """
174
-
175
- config_init = carregar_protocolo()
 
 
176
 
177
- with gr.Blocks(title="πŸš€ PIPELINE v32 - ZERO WARNINGS", css=css, theme=gr.themes.Soft()) as app:
178
- gr.Markdown("# PIPELINE v32 - Auto-Plan Inteligente")
 
179
 
180
  with gr.Tabs():
181
- # ABA 1: CHAT (type='messages')
182
  with gr.Tab("πŸ’¬ Pipeline"):
183
- chatbot = gr.Chatbot(
184
- height=600,
185
- show_copy_button=True,
186
- type="tuples", # CompatΓ­vel antigo
187
- label=""
188
- )
189
 
190
  with gr.Row():
191
- with gr.Column(scale=10):
192
- txt_in = gr.Textbox(
193
- placeholder="Digite qualquer input...",
194
- lines=2,
195
- container=False,
196
- show_label=False
197
- )
198
- with gr.Column(scale=1):
199
- file_in = gr.UploadButton(
200
- "πŸ“Ž",
201
- file_types=[".txt", ".md", ".json", ".py"]
202
- )
203
- with gr.Column(scale=1):
204
- btn_send = gr.Button("▢️ Executar", variant="primary")
205
-
206
- file_status = gr.Markdown("")
207
- file_in.upload(
208
- lambda x: f"πŸ“Ž {os.path.basename(x.name) if x else ''}",
209
- file_in, file_status
210
- )
211
 
212
- # ABA 2: DEBUG (sem 'lines')
213
  with gr.Tab("πŸ” Debug"):
214
- with gr.Row():
215
- out_dna = gr.JSON(label="Timeline")
216
- out_logs = gr.Textbox(label="Logs", lines=15)
217
 
218
- # ABA 3: CONFIG
219
- with gr.Tab("βš™οΈ Config"):
220
- code_json = gr.Code(
221
- value=config_init,
222
- language="json",
223
- label="Config (nΓ£o usada)"
224
- )
225
- gr.Button("Salvar", variant="secondary")
226
 
227
- # TRIGGERS
228
- triggers = [btn_send.click, txt_in.submit]
229
- for trig in triggers:
230
- trig(
231
- orquestrador,
232
- inputs=[txt_in, file_in, chatbot, code_json],
233
- outputs=[chatbot, out_dna, out_logs]
234
- ).then(
235
- lambda: gr.update(value=""),
236
- outputs=[txt_in]
237
- )
238
 
239
  return app
240
 
241
  if __name__ == "__main__":
242
- print("πŸš€ PIPELINE v32 - 100% HF SPACES COMPATÍVEL")
243
- print("βœ… Sem warnings Gradio")
244
- print("βœ… Sem erros JSON plano")
245
- print("βœ… Python 3.10 OK")
246
-
247
- app = ui_clean()
248
- app.launch(server_name="0.0.0.0", server_port=7860, share=False)
 
1
  # ╔════════════════════════════════════════════════════════════════════════════╗
2
+ # β•‘ PIPELINE v33: ANALISADOR CONTEXTUAL + PLANEJADOR SΓ“CRATES β•‘
3
+ # β•‘ 1ΒΊ: CATALOGA contexto β†’ 2ΒΊ: PLANEJA β†’ 3ΒΊ: EXECUTA β•‘
4
  # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
5
 
6
  import os
 
14
  warnings.filterwarnings("ignore")
15
 
16
  # ==================== 1. CONFIGURAÇÃO ====================
17
+ api_key = os.getenv("GOOGLE_API_KEY")
18
  if api_key:
19
  genai.configure(api_key=api_key)
20
  model_flash = genai.GenerativeModel("gemini-flash-latest")
 
22
  else:
23
  model_flash = model_pro = None
24
 
25
+ ARQUIVO_CONTEXT = "contexto_v33.json"
26
+ ARQUIVO_HISTORY = "history_v33.json"
27
 
28
+ # ==================== 2. SISTEMA CONTEXTUAL (CORAÇÃO) ====================
29
+ class AnalisadorContextual:
30
+ def __init__(self):
31
+ self.contexto = self.carregar_contexto()
32
+
33
+ def carregar_contexto(self):
34
+ try:
35
+ with open(ARQUIVO_CONTEXT, "r", encoding="utf-8") as f:
36
+ return json.load(f)
37
+ except:
38
+ return {
39
+ "classificacao": [],
40
+ "fatos": [], # [tag, peso, input_ref, ultima_atualizacao]
41
+ "objetivo_usuario": "",
42
+ "dΓΊvida_central": "",
43
+ "timestamp": ""
44
+ }
45
+
46
+ def salvar_contexto(self):
47
+ with open(ARQUIVO_CONTEXT, "w", encoding="utf-8") as f:
48
+ json.dump(self.contexto, f, ensure_ascii=False, indent=2)
49
+
50
+ def analisar_input(self, input_atual, history):
51
+ """CORAÇÃO: CATALOGA + ATUALIZA CONTEXTUAL"""
52
+ if not model_pro:
53
+ return self.contexto_fallback()
54
+
55
+ history_resumo = "\n".join([f"πŸ‘€: {h[0][:100]}... πŸ€–: {h[1][:100]}..."
56
+ for h in history[-4:]])[:400]
57
+
58
+ prompt_analise = f"""ANALISE CONTEXTUAL v33 - CATALOGUE SEM RESPONDER
59
 
60
+ INPUT ATUAL: {input_atual}
61
+ HISTΓ“RICO RECENTE: {history_resumo}
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ PREENCHA FORMULÁRIO JSON:
 
 
 
 
 
 
64
 
65
+ {{
66
+ "classificacao": ["anexo", "dΓΊvida", "crΓ­tica", "pesquisa"],
67
+ "fatos": [
68
+ ["gpu-l40s", 0.9, "input_001", "2025-12-05"],
69
+ ["ltxv-video", 0.7, "input_002", "2025-12-04"]
70
+ ],
71
+ "objetivo_usuario": "Objetivo principal da conversa",
72
+ "duvida_central": "Ponto especΓ­fico atual",
73
+ "timestamp": "{datetime.now().isoformat()}"
74
+ }}
75
 
76
+ βœ… CATALOGUE FATOS com PESO (0.0-1.0)
77
+ βœ… DECAIMENTO: fatos antigos perdem peso
78
+ βœ… OBJETIVO: meta da conversa inteira
79
+ βœ… DÚVIDA: foco imediato
80
+
81
+ APENAS JSON VÁLIDO!"""
82
+
83
+ try:
84
+ resp = model_pro.generate_content(prompt_analise, temperature=0.1)
85
+ raw = resp.text.strip()
86
+ clean = re.sub(r'``````|\n\s*\n', '', raw)
87
+ clean = re.sub(r'^.*?\[{2}', '[{', clean)
88
+ clean = re.sub(r'\]{2}.*?$', '}]', clean)
89
+
90
+ analise = json.loads(clean)
91
+
92
+ # MERGE + DECAIMENTO
93
+ self.contexto.update(analise)
94
+ self.aplicar_decainento_fatos()
95
+ self.contexto["timestamp"] = datetime.now().isoformat()
96
+
97
+ self.salvar_contexto()
98
+ return self.contexto
99
+
100
+ except:
101
+ return self.contexto_fallback()
102
+
103
+ def contexto_fallback(self):
104
+ return {
105
+ "classificacao": ["dΓΊvida"],
106
+ "fatos": [],
107
+ "objetivo_usuario": "AnΓ‘lise tΓ©cnica/ML/legal",
108
+ "duvida_central": "Processar input atual",
109
+ "timestamp": datetime.now().isoformat()
110
+ }
111
 
112
+ def aplicar_decainento_fatos(self):
113
+ """DECAIMENTO: peso *= 0.95 por dia sem menΓ§Γ£o"""
114
+ agora = datetime.now().timestamp()
115
+ novos_fatos = []
116
+
117
+ for fato in self.contexto.get("fatos", []):
118
+ tag, peso, input_ref, ultima = fato
119
+ ultima_ts = datetime.fromisoformat(ultima).timestamp()
120
+ dias = (agora - ultima_ts) / 86400 # dias desde ultima menΓ§Γ£o
121
+ peso_decay = peso * (0.95 ** dias)
122
+ if peso_decay > 0.1: # MantΓ©m fatos relevantes
123
+ novos_fatos.append([tag, max(0.1, peso_decay), input_ref, self.contexto["timestamp"]])
124
+
125
+ self.contexto["fatos"] = novos_fatos[:20] # Top 20 fatos
126
+
127
+ # ==================== 3. PLANEJADOR SΓ“CRATES ====================
128
+ def planejar_socrates(contexto, input_atual, history):
129
+ """PLANEJA baseado em CONTEXTUAL + input"""
130
+ if not model_pro:
131
+ return fallback_plano()
132
 
133
+ fatos_top = "\n".join([f"- {f[0]} ({f[1]:.2f})" for f in contexto.get("fatos", [])[:5]])
134
 
135
+ prompt_socrates = f"""PLANEJADOR SΓ“CRATES v33
136
+
137
+ CONTEXTO CATALOGADO:
138
+ β€’ ClassificaΓ§Γ£o: {contexto.get('classificacao', [])}
139
+ β€’ Fatos principais: {fatos_top}
140
+ β€’ Objetivo: {contexto.get('objetivo_usuario', '')}
141
+ β€’ DΓΊvida central: {contexto.get('duvida_central', '')}
142
 
143
+ INPUT ATUAL: {input_atual}
144
+
145
+ CRIE PLANO 3-5 agentes para RESOLVER PARADOXO MENON:
146
  [
147
+ {{"nome": "Explorador", "missao": "Levante possibilidades", "modelo": "flash", "tipo_saida": "json"}},
148
+ {{"nome": "Validador", "missao": "Teste cenΓ‘rios", "modelo": "pro", "tipo_saida": "json"}},
149
+ {{"nome": "Sintetizador", "missao": "Resposta final clara", "modelo": "pro", "tipo_saida": "texto"}}
150
+ ]
151
+
152
+ APENAS JSON!"""
153
 
154
  try:
155
+ resp = model_pro.generate_content(prompt_socrates)
156
+ plano_raw = re.sub(r'``````', '', resp.text.strip())
157
+ return json.loads(plano_raw)
 
 
 
 
 
 
 
158
  except:
159
+ return fallback_plano()
160
 
161
+ def fallback_plano():
162
+ return [
163
+ {"nome": "Analisador", "missao": "Analise contexto + input", "modelo": "flash", "tipo_saida": "json"},
164
+ {"nome": "RespostaFinal", "missao": "Resposta clara e completa", "modelo": "pro", "tipo_saida": "texto"}
165
+ ]
166
+
167
+ # ==================== 4. EXECUTOR (REUTILIZADO) ====================
168
+ def executar_agente(timeline, config):
169
  if not (model_flash or model_pro):
170
+ return {"role": "system", "error": "Sem API"}, "ERRO", "Sem key"
171
 
172
  modelo = model_pro if config.get("modelo") == "pro" else model_flash
173
+ contexto = json.dumps(timeline[-8:], ensure_ascii=False)
174
 
175
+ prompt = f"CONTEXTO: {contexto}\nAGENTE: {config['nome']}\nMISSÃO: {config['missao']}"
176
 
 
177
  try:
178
  resp = modelo.generate_content(prompt)
179
  out = resp.text.strip()
180
+ content = json.loads(re.sub(r'``````', '', out)) if config.get('tipo_saida') == 'json' else out
181
+ return {"role": "assistant", "agent": config['nome'], "content": content}, "OK", out
182
+ except:
183
+ return {"role": "system", "error": "Falha"}, "ERRO", "Erro agente"
184
+
185
+ # ==================== 5. ORQUESTRADOR v33 ====================
186
+ analisador = AnalisadorContextual() # GLOBAL SINGLETON
 
 
 
 
187
 
188
+ def orquestrador_v33(texto, arquivo, history, json_config):
 
189
  anexo = ler_anexo(arquivo)
190
  full_input = f"{texto}\n{anexo}".strip()
191
 
 
193
  yield history, {}, "Sem input."
194
  return
195
 
196
+ history.append([full_input, "🧠 Catalogando contexto..."])
 
197
  timeline = [{"role": "user", "content": full_input}]
198
+ logs = f"πŸš€ v33 SΓ“CRATES: {datetime.now().strftime('%H:%M:%S')}\n"
199
 
200
  yield history, timeline, logs
201
 
202
+ # 1ΒΊ PASSO: ANALISADOR CONTEXTUAL (CORAÇÃO)
203
+ contexto = analisador.analisar_input(full_input, history)
204
+ logs += f"πŸ“Š Contexto: {len(contexto.get('fatos', []))} fatos | {contexto.get('classificacao', [])} | {contexto.get('objetivo_usuario', '')[:50]}...\n"
205
+ timeline.append({"role": "system", "contexto": contexto})
206
+
207
+ history[-1][1] = f"βœ… CatΓ‘logo: {len(contexto.get('fatos', []))} fatos catalogados"
208
+ yield history, timeline, logs
209
+
210
+ # 2ΒΊ PASSO: PLANEJADOR SΓ“CRATES
211
+ plano = planejar_socrates(contexto, full_input, history)
212
+ logs += f"🎯 Plano: {len(plano)} agentes\n"
213
  timeline.append({"role": "system", "plano": plano})
214
 
215
+ history[-1][1] = f"🎯 Plano Sócrates: {len(plano)} etapas"
216
  yield history, timeline, logs
217
 
218
+ # 3ΒΊ PASSO: EXECUTA PLANO
219
+ for i, agente in enumerate(plano):
220
+ history[-1][1] = f"[{i+1}/{len(plano)}] {agente['nome']}..."
221
  yield history, timeline, logs
222
 
223
+ res, status, raw = executar_agente(timeline, agente)
224
  timeline.append(res)
225
+ logs += f" {status}\n"
226
 
227
+ if agente.get('tipo_saida') == 'texto':
228
+ history[-1][1] = str(res.get('content', ''))[:900]
229
  yield history, timeline, logs
230
 
231
+ logs += "βœ… Pipeline SΓ³crates concluΓ­do"
 
232
  yield history, timeline, logs
233
 
234
+ def ler_anexo(arquivo):
235
+ if not arquivo: return ""
236
+ try:
237
+ with open(arquivo.name, "r", encoding="utf-8") as f:
238
+ return f"\nπŸ“Ž {os.path.basename(arquivo.name)}:\n{f.read()}\n"
239
+ except: return ""
240
+
241
+ # ==================== 6. UI v33 ====================
242
+ def ui_v33():
243
+ css = "footer {display: none !important;} .contain {border: none !important;}"
244
 
245
+ with gr.Blocks(title="πŸš€ PIPELINE v33 - SΓ“CRATES", css=css, theme=gr.themes.Soft()) as app:
246
+ gr.Markdown("# 🧠 PIPELINE v33 - ANALISADOR CONTEXTUAL + SΓ“CRATES")
247
+ gr.Markdown("*Cataloga β†’ Planeja β†’ Resolve paradoxo Menon*")
248
 
249
  with gr.Tabs():
 
250
  with gr.Tab("πŸ’¬ Pipeline"):
251
+ chatbot = gr.Chatbot(height=600, show_copy_button=True, type="tuples")
 
 
 
 
 
252
 
253
  with gr.Row():
254
+ txt_in = gr.Textbox(placeholder="Seu input...", lines=3, container=False)
255
+ file_in = gr.UploadButton("πŸ“Ž", file_types=[".txt", ".py", ".json"])
256
+ btn_send = gr.Button("▢️ Executar", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
 
 
258
  with gr.Tab("πŸ” Debug"):
259
+ out_dna = gr.JSON(label="Timeline")
260
+ out_logs = gr.Textbox(label="Logs", lines=15)
 
261
 
262
+ with gr.Tab("πŸ“Š Contexto"):
263
+ contexto_display = gr.JSON(label="contexto_v33.json", value=analisador.contexto)
264
+ gr.Button("Atualizar", variant="secondary")
 
 
 
 
 
265
 
266
+ btn_send.click(orquestrador_v33,
267
+ [txt_in, file_in, chatbot, gr.State({})],
268
+ [chatbot, out_dna, out_logs]).then(
269
+ lambda: gr.update(value=""), txt_in
270
+ )
271
+ txt_in.submit(btn_send.click_fn, [txt_in, file_in, chatbot, gr.State({})],
272
+ [chatbot, out_dna, out_logs])
 
 
 
 
273
 
274
  return app
275
 
276
  if __name__ == "__main__":
277
+ print("πŸš€ v33 SΓ“CRATES - ANALISADOR CONTEXTUAL ATIVO")
278
+ print("πŸ“Š contexto_v33.json persiste fatos + pesos")
279
+ app = ui_v33()
280
+ app.launch(server_name="0.0.0.0", server_port=7860)