Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -88,6 +88,7 @@ class Llama3Demo:
|
|
| 88 |
return response.split("<|assistant|>")[-1].strip()
|
| 89 |
|
| 90 |
##################################################################
|
|
|
|
| 91 |
def main():
|
| 92 |
st.set_page_config(page_title="Llama 3.2 Chat", page_icon="馃")
|
| 93 |
|
|
@@ -107,44 +108,6 @@ def main():
|
|
| 107 |
except Exception as e:
|
| 108 |
st.error(f"Error en configuraci贸n: {str(e)}")
|
| 109 |
|
| 110 |
-
# Inicializar el modelo
|
| 111 |
-
if 'llama' not in st.session_state:
|
| 112 |
-
with st.spinner("Inicializando Llama 3.2... esto puede tomar unos minutos..."):
|
| 113 |
-
try:
|
| 114 |
-
st.session_state.llama = Llama3Demo()
|
| 115 |
-
except Exception as e:
|
| 116 |
-
st.error("Error inicializando el modelo")
|
| 117 |
-
st.stop()
|
| 118 |
-
|
| 119 |
-
# Gesti贸n del historial de chat
|
| 120 |
-
if 'messages' not in st.session_state:
|
| 121 |
-
st.session_state.messages = []
|
| 122 |
-
|
| 123 |
-
# Mostrar historial
|
| 124 |
-
for message in st.session_state.messages:
|
| 125 |
-
with st.chat_message(message["role"]):
|
| 126 |
-
st.markdown(message["content"])
|
| 127 |
-
|
| 128 |
-
# Interface de chat
|
| 129 |
-
if prompt := st.chat_input("Escribe tu mensaje aqu铆"):
|
| 130 |
-
with st.chat_message("assistant"):
|
| 131 |
-
try:
|
| 132 |
-
response = st.session_state.llama.generate_response(
|
| 133 |
-
prompt,
|
| 134 |
-
**generation_params
|
| 135 |
-
)
|
| 136 |
-
st.markdown(response)
|
| 137 |
-
except Exception as e:
|
| 138 |
-
st.error(f"Error: {str(e)}")
|
| 139 |
-
|
| 140 |
-
with st.chat_message("assistant"):
|
| 141 |
-
try:
|
| 142 |
-
response = st.session_state.llama.generate_response(prompt)
|
| 143 |
-
st.markdown(response)
|
| 144 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 145 |
-
except Exception as e:
|
| 146 |
-
st.error(f"Error generando respuesta: {str(e)}")
|
| 147 |
-
|
| 148 |
# Sidebar con controles de generaci贸n
|
| 149 |
with st.sidebar:
|
| 150 |
st.markdown("### Par谩metros de Generaci贸n")
|
|
@@ -199,6 +162,43 @@ def main():
|
|
| 199 |
- **Top-p**: Control sobre la variabilidad de respuestas
|
| 200 |
- **Longitud**: Ajustar seg煤n necesidad de detalle
|
| 201 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
|
| 203 |
if __name__ == "__main__":
|
| 204 |
-
main()
|
|
|
|
|
|
| 88 |
return response.split("<|assistant|>")[-1].strip()
|
| 89 |
|
| 90 |
##################################################################
|
| 91 |
+
|
| 92 |
def main():
|
| 93 |
st.set_page_config(page_title="Llama 3.2 Chat", page_icon="馃")
|
| 94 |
|
|
|
|
| 108 |
except Exception as e:
|
| 109 |
st.error(f"Error en configuraci贸n: {str(e)}")
|
| 110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
# Sidebar con controles de generaci贸n
|
| 112 |
with st.sidebar:
|
| 113 |
st.markdown("### Par谩metros de Generaci贸n")
|
|
|
|
| 162 |
- **Top-p**: Control sobre la variabilidad de respuestas
|
| 163 |
- **Longitud**: Ajustar seg煤n necesidad de detalle
|
| 164 |
""")
|
| 165 |
+
|
| 166 |
+
if st.button("Limpiar Chat"):
|
| 167 |
+
st.session_state.messages = []
|
| 168 |
+
st.experimental_rerun()
|
| 169 |
+
|
| 170 |
+
# Inicializar el modelo
|
| 171 |
+
if 'llama' not in st.session_state:
|
| 172 |
+
with st.spinner("Inicializando Llama 3.2... esto puede tomar unos minutos..."):
|
| 173 |
+
try:
|
| 174 |
+
st.session_state.llama = Llama3Demo()
|
| 175 |
+
except Exception as e:
|
| 176 |
+
st.error("Error inicializando el modelo")
|
| 177 |
+
st.stop()
|
| 178 |
+
|
| 179 |
+
# Gesti贸n del historial de chat
|
| 180 |
+
if 'messages' not in st.session_state:
|
| 181 |
+
st.session_state.messages = []
|
| 182 |
+
|
| 183 |
+
# Mostrar historial
|
| 184 |
+
for message in st.session_state.messages:
|
| 185 |
+
with st.chat_message(message["role"]):
|
| 186 |
+
st.markdown(message["content"])
|
| 187 |
+
|
| 188 |
+
# Interface de chat
|
| 189 |
+
if prompt := st.chat_input("Escribe tu mensaje aqu铆"):
|
| 190 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 191 |
+
with st.chat_message("user"):
|
| 192 |
+
st.markdown(prompt)
|
| 193 |
+
|
| 194 |
+
with st.chat_message("assistant"):
|
| 195 |
+
try:
|
| 196 |
+
response = st.session_state.llama.generate_response(prompt, **generation_params)
|
| 197 |
+
st.markdown(response)
|
| 198 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 199 |
+
except Exception as e:
|
| 200 |
+
st.error(f"Error generando respuesta: {str(e)}")
|
| 201 |
|
| 202 |
if __name__ == "__main__":
|
| 203 |
+
main()
|
| 204 |
+
|