Spaces:
Sleeping
Sleeping
Upload dashboardDemo.py
Browse files- paginas/dashboardDemo.py +33 -10
paginas/dashboardDemo.py
CHANGED
|
@@ -599,8 +599,36 @@ def detectedReferenceToCSV(prompt: str) -> bool:
|
|
| 599 |
# ===========================
|
| 600 |
# Función para interactuar con el bot
|
| 601 |
# ===========================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 602 |
def chatBotProtech(client):
|
| 603 |
with st.sidebar.expander("📁 Chatbot"):
|
|
|
|
|
|
|
| 604 |
|
| 605 |
# Inicializar estados
|
| 606 |
if "chat_history" not in st.session_state:
|
|
@@ -618,7 +646,6 @@ def chatBotProtech(client):
|
|
| 618 |
# Contenedor para mensajes
|
| 619 |
messages = st.container(height=400)
|
| 620 |
|
| 621 |
-
|
| 622 |
# CSS: estilo tipo Messenger
|
| 623 |
st.markdown("""
|
| 624 |
<style>
|
|
@@ -699,7 +726,7 @@ def chatBotProtech(client):
|
|
| 699 |
|
| 700 |
with messages:
|
| 701 |
with st.spinner("Pensando..."):
|
| 702 |
-
completion =
|
| 703 |
response = ""
|
| 704 |
response_placeholder = st.empty()
|
| 705 |
|
|
@@ -732,7 +759,7 @@ def chatBotProtech(client):
|
|
| 732 |
# Mostrar respuesta del asistente
|
| 733 |
with messages:
|
| 734 |
with st.spinner("Pensando..."):
|
| 735 |
-
completion =
|
| 736 |
response = ""
|
| 737 |
response_placeholder = st.empty()
|
| 738 |
|
|
@@ -785,18 +812,16 @@ def chatBotProtech(client):
|
|
| 785 |
st.rerun()
|
| 786 |
|
| 787 |
|
| 788 |
-
def
|
| 789 |
completion = client.chat.completions.create(
|
| 790 |
-
model=
|
| 791 |
messages=[
|
| 792 |
{
|
| 793 |
"role": "system",
|
| 794 |
"content": (
|
| 795 |
"Tu nombre es Protech, el asistente virtual de PRO TECHNOLOGY SOLUTIONS S.A.C. "
|
| 796 |
"Saluda al usuario con cordialidad y responde en español de forma clara, profesional y amable. "
|
| 797 |
-
"
|
| 798 |
-
"No digas que eres un modelo de lenguaje. "
|
| 799 |
-
"Simplemente responde como un asistente humano capacitado en atención al cliente. "
|
| 800 |
"Comienza con un saludo y pregunta: '¿En qué puedo ayudarte hoy?'."
|
| 801 |
)
|
| 802 |
},
|
|
@@ -809,8 +834,6 @@ def callDeepseek(client, prompt):
|
|
| 809 |
)
|
| 810 |
return completion
|
| 811 |
|
| 812 |
-
|
| 813 |
-
|
| 814 |
def callWhisper(client, filename_audio,file):
|
| 815 |
transcription = client.audio.transcriptions.create(
|
| 816 |
file=(filename_audio, file.read()),
|
|
|
|
| 599 |
# ===========================
|
| 600 |
# Función para interactuar con el bot
|
| 601 |
# ===========================
|
| 602 |
+
def seleccionar_modelo_llm():
|
| 603 |
+
modelos_disponibles = {
|
| 604 |
+
"Alibaba Cloud - Qwen QWQ 32B": "qwen-qwq-32b",
|
| 605 |
+
"Alibaba Cloud - Qwen3 32B": "qwen/qwen3-32b",
|
| 606 |
+
"DeepSeek - LLaMA 70B Distill": "deepseek-r1-distill-llama-70b",
|
| 607 |
+
"Google - Gemma2 9B IT": "gemma2-9b-it",
|
| 608 |
+
"Meta - LLaMA 3.1 8B Instant": "llama-3.1-8b-instant",
|
| 609 |
+
"Meta - LLaMA 3.3 70B Versatile": "llama-3.3-70b-versatile",
|
| 610 |
+
"Meta - LLaMA 3 70B": "llama3-70b-8192",
|
| 611 |
+
"Meta - LLaMA 3 8B": "llama3-8b-8192",
|
| 612 |
+
"Meta - LLaMA 4 Maverick 17B": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
| 613 |
+
"Meta - LLaMA 4 Scout 17B": "meta-llama/llama-4-scout-17b-16e-instruct",
|
| 614 |
+
"Meta - LLaMA Guard 4 12B": "meta-llama/llama-guard-4-12b",
|
| 615 |
+
"Meta - Prompt Guard 2 22M": "meta-llama/llama-prompt-guard-2-22m",
|
| 616 |
+
"Meta - Prompt Guard 2 86M": "meta-llama/llama-prompt-guard-2-86m",
|
| 617 |
+
"Mistral - Saba 24B": "mistral-saba-24b"
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
seleccion = st.selectbox(
|
| 621 |
+
"🧠 Elige un modelo LLM de Groq:",
|
| 622 |
+
list(modelos_disponibles.keys())
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
return modelos_disponibles[seleccion]
|
| 626 |
+
|
| 627 |
+
|
| 628 |
def chatBotProtech(client):
|
| 629 |
with st.sidebar.expander("📁 Chatbot"):
|
| 630 |
+
|
| 631 |
+
modelo_llm = seleccionar_modelo_llm()
|
| 632 |
|
| 633 |
# Inicializar estados
|
| 634 |
if "chat_history" not in st.session_state:
|
|
|
|
| 646 |
# Contenedor para mensajes
|
| 647 |
messages = st.container(height=400)
|
| 648 |
|
|
|
|
| 649 |
# CSS: estilo tipo Messenger
|
| 650 |
st.markdown("""
|
| 651 |
<style>
|
|
|
|
| 726 |
|
| 727 |
with messages:
|
| 728 |
with st.spinner("Pensando..."):
|
| 729 |
+
completion = callModelLLM(client, prompt, modelo_llm)
|
| 730 |
response = ""
|
| 731 |
response_placeholder = st.empty()
|
| 732 |
|
|
|
|
| 759 |
# Mostrar respuesta del asistente
|
| 760 |
with messages:
|
| 761 |
with st.spinner("Pensando..."):
|
| 762 |
+
completion = callModelLLM(client, prompt, modelo_llm)
|
| 763 |
response = ""
|
| 764 |
response_placeholder = st.empty()
|
| 765 |
|
|
|
|
| 812 |
st.rerun()
|
| 813 |
|
| 814 |
|
| 815 |
+
def callModelLLM(client, prompt, idModel):
|
| 816 |
completion = client.chat.completions.create(
|
| 817 |
+
model=idModel,
|
| 818 |
messages=[
|
| 819 |
{
|
| 820 |
"role": "system",
|
| 821 |
"content": (
|
| 822 |
"Tu nombre es Protech, el asistente virtual de PRO TECHNOLOGY SOLUTIONS S.A.C. "
|
| 823 |
"Saluda al usuario con cordialidad y responde en español de forma clara, profesional y amable. "
|
| 824 |
+
"Debes responder como un asistente humano capacitado en atención al cliente. "
|
|
|
|
|
|
|
| 825 |
"Comienza con un saludo y pregunta: '¿En qué puedo ayudarte hoy?'."
|
| 826 |
)
|
| 827 |
},
|
|
|
|
| 834 |
)
|
| 835 |
return completion
|
| 836 |
|
|
|
|
|
|
|
| 837 |
def callWhisper(client, filename_audio,file):
|
| 838 |
transcription = client.audio.transcriptions.create(
|
| 839 |
file=(filename_audio, file.read()),
|