Update app.py
Browse files
app.py
CHANGED
|
@@ -253,6 +253,40 @@ def query_sql_agent(user_query, selected_model_name):
|
|
| 253 |
except Exception as e:
|
| 254 |
return f"Erro ao consultar o agente SQL: {e}"
|
| 255 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
def chatbot_response(user_input, selected_model_name):
|
| 257 |
start_time = time.time()
|
| 258 |
response = query_sql_agent(user_input, selected_model_name)
|
|
@@ -260,6 +294,9 @@ def chatbot_response(user_input, selected_model_name):
|
|
| 260 |
|
| 261 |
model_id = LLAMA_MODELS[selected_model_name]
|
| 262 |
|
|
|
|
|
|
|
|
|
|
| 263 |
history_log.append({
|
| 264 |
"Modelo LLM": model_id,
|
| 265 |
"Pergunta": user_input,
|
|
@@ -281,6 +318,7 @@ def toggle_history():
|
|
| 281 |
show_history_flag = not show_history_flag
|
| 282 |
return history_log if show_history_flag else {}
|
| 283 |
|
|
|
|
| 284 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 285 |
with gr.Row():
|
| 286 |
with gr.Column(scale=1):
|
|
@@ -288,6 +326,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 288 |
model_selector = gr.Dropdown(list(LLAMA_MODELS.keys()), value="LLaMA 70B", label="")
|
| 289 |
csv_file = gr.File(file_types=[".csv"], label="")
|
| 290 |
upload_feedback = gr.Markdown()
|
|
|
|
| 291 |
reset_btn = gr.Button("Resetar")
|
| 292 |
|
| 293 |
with gr.Column(scale=4):
|
|
@@ -317,6 +356,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 317 |
history_btn.click(toggle_history, outputs=history_output)
|
| 318 |
csv_file.change(handle_csv_and_clear_chat, inputs=csv_file, outputs=[upload_feedback, chatbot])
|
| 319 |
reset_btn.click(reset_all, outputs=[upload_feedback, chatbot, csv_file])
|
|
|
|
| 320 |
|
| 321 |
if __name__ == "__main__":
|
| 322 |
demo.launch(share=False)
|
|
|
|
| 253 |
except Exception as e:
|
| 254 |
return f"Erro ao consultar o agente SQL: {e}"
|
| 255 |
|
| 256 |
+
advanced_mode_enabled = False # Novo estado global
|
| 257 |
+
|
| 258 |
+
def toggle_advanced_mode(state):
|
| 259 |
+
global advanced_mode_enabled
|
| 260 |
+
advanced_mode_enabled = state
|
| 261 |
+
logging.info(f"[MODO AVANÇADO] {'Ativado' if state else 'Desativado'}")
|
| 262 |
+
return "Modo avançado ativado." if state else "Modo avançado desativado."
|
| 263 |
+
|
| 264 |
+
def refine_response_with_llm(user_question, sql_response, chart_md=""):
|
| 265 |
+
prompt = (
|
| 266 |
+
f"Pergunta do usuário:\n{user_question}\n\n"
|
| 267 |
+
f"Resposta gerada pelo agente SQL:\n{sql_response}\n\n"
|
| 268 |
+
"Mantenha a resposta original e faça os ajustes e melhorias posteriormente.\n"
|
| 269 |
+
"Sua tarefa é refinar, complementar e melhorar a resposta.\n"
|
| 270 |
+
"Adicione interpretações estatísticas ou insights relevantes."
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
logging.info(f"[DEBUG] Prompt enviado ao modelo de refinamento:\n{prompt}\n")
|
| 274 |
+
|
| 275 |
+
try:
|
| 276 |
+
response = hf_client.chat.completions.create(
|
| 277 |
+
model=LLAMA_MODELS["LLaMA 70B"],
|
| 278 |
+
messages=[{"role": "system", "content": prompt}],
|
| 279 |
+
max_tokens=1200,
|
| 280 |
+
stream=False
|
| 281 |
+
)
|
| 282 |
+
improved_response = response["choices"][0]["message"]["content"]
|
| 283 |
+
logging.info(f"[DEBUG] Resposta do modelo de refinamento:\n{improved_response}\n")
|
| 284 |
+
return improved_response + ("\n\n" + chart_md if chart_md else "")
|
| 285 |
+
|
| 286 |
+
except Exception as e:
|
| 287 |
+
logging.error(f"[ERRO] Falha ao refinar resposta com LLM: {e}")
|
| 288 |
+
return sql_response + ("\n\n" + chart_md if chart_md else "")
|
| 289 |
+
|
| 290 |
def chatbot_response(user_input, selected_model_name):
|
| 291 |
start_time = time.time()
|
| 292 |
response = query_sql_agent(user_input, selected_model_name)
|
|
|
|
| 294 |
|
| 295 |
model_id = LLAMA_MODELS[selected_model_name]
|
| 296 |
|
| 297 |
+
if advanced_mode_enabled:
|
| 298 |
+
response = refine_response_with_llm(user_input, response)
|
| 299 |
+
|
| 300 |
history_log.append({
|
| 301 |
"Modelo LLM": model_id,
|
| 302 |
"Pergunta": user_input,
|
|
|
|
| 318 |
show_history_flag = not show_history_flag
|
| 319 |
return history_log if show_history_flag else {}
|
| 320 |
|
| 321 |
+
|
| 322 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 323 |
with gr.Row():
|
| 324 |
with gr.Column(scale=1):
|
|
|
|
| 326 |
model_selector = gr.Dropdown(list(LLAMA_MODELS.keys()), value="LLaMA 70B", label="")
|
| 327 |
csv_file = gr.File(file_types=[".csv"], label="")
|
| 328 |
upload_feedback = gr.Markdown()
|
| 329 |
+
advanced_checkbox = gr.Checkbox(label="Refinar Resposta")
|
| 330 |
reset_btn = gr.Button("Resetar")
|
| 331 |
|
| 332 |
with gr.Column(scale=4):
|
|
|
|
| 356 |
history_btn.click(toggle_history, outputs=history_output)
|
| 357 |
csv_file.change(handle_csv_and_clear_chat, inputs=csv_file, outputs=[upload_feedback, chatbot])
|
| 358 |
reset_btn.click(reset_all, outputs=[upload_feedback, chatbot, csv_file])
|
| 359 |
+
advanced_checkbox.change(toggle_advanced_mode, inputs=advanced_checkbox, outputs=[])
|
| 360 |
|
| 361 |
if __name__ == "__main__":
|
| 362 |
demo.launch(share=False)
|