Spaces:
Sleeping
Sleeping
File size: 5,388 Bytes
fa1d694 fc894a8 fa1d694 e6a9c0b fa1d694 ac5af15 fa1d694 56b4d4e 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 56b4d4e fa1d694 20ffc0b fa1d694 20ffc0b 6cb52c6 20ffc0b 6cb52c6 fa1d694 6cb52c6 fa1d694 20ffc0b 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 20ffc0b 6cb52c6 20ffc0b fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 20ffc0b 6cb52c6 fa1d694 6cb52c6 fa1d694 20ffc0b 56b4d4e bfeefad fa1d694 5828241 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 5828241 6cb52c6 5828241 fa1d694 5828241 f739898 fa1d694 6cb52c6 fa1d694 6cb52c6 fa1d694 5828241 9a415c7 fa1d694 56b4d4e 6cb52c6 fa1d694 6cb52c6 fa1d694 6cb52c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
import os
import gradio as gr
from huggingface_hub import InferenceClient, login
# Configuraci贸n inicial
HF_TOKEN = os.getenv("HF_TOKEN", "HF_TOKEN") # Usa variable de entorno o reemplaza
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
# Autenticaci贸n
try:
login(token=HF_TOKEN)
client = InferenceClient(model=MODEL_NAME, token=HF_TOKEN, timeout=60)
except Exception as auth_error:
print(f"Error de autenticaci贸n: {auth_error}")
def format_prompt(message, history, system_message):
"""Formatea el prompt seg煤n las especificaciones de Zephyr"""
prompt = f"<|system|>\n{system_message}</s>\n"
for user_msg, assistant_msg in history:
prompt += f"<|user|>\n{user_msg}</s>\n"
prompt += f"<|assistant|>\n{assistant_msg}</s>\n"
prompt += f"<|user|>\n{message}</s>\n<|assistant|>\n"
return prompt
def respond(message, history, system_message, max_tokens, temperature, top_p):
"""Genera respuestas del chatbot con manejo robusto de errores"""
try:
# Validaci贸n de entrada
if not message or len(message.strip()) == 0:
yield "Por favor, ingresa un mensaje v谩lido."
return
prompt = format_prompt(message, history, system_message)
# Configuraci贸n de generaci贸n
generation_params = {
"max_new_tokens": min(max_tokens, 1024), # Limita a 1024 tokens m谩ximo
"temperature": max(0.1, min(temperature, 1.0)),
"top_p": max(0.1, min(top_p, 1.0)),
"do_sample": True,
"truncate": 2048
}
# Generaci贸n de respuesta
stream = client.text_generation(
prompt,
stream=True,
**generation_params
)
response = ""
for token in stream:
response += token
yield response
except Exception as e:
error_msg = f"Error en la generaci贸n: {str(e)}"
print(error_msg)
yield "馃敶 Lo siento, tuve un problema al procesar tu mensaje. Int茅ntalo de nuevo m谩s tarde."
# CSS mejorado
custom_css = """
:root {
--primary: #6e48aa;
--secondary: #9d50bb;
--accent: #4776E6;
}
.gradio-container {
max-width: 900px;
margin: 20px auto;
border-radius: 12px;
box-shadow: 0 6px 18px rgba(0,0,0,0.1);
background: white;
padding: 25px;
}
.gradio-header {
text-align: center;
margin-bottom: 25px;
}
h1 {
background: linear-gradient(45deg, var(--primary), var(--secondary));
-webkit-background-clip: text;
background-clip: text;
color: transparent;
font-size: 2.2rem;
margin-bottom: 10px;
}
.gradio-description {
color: #555;
font-size: 1rem;
}
.gradio-chatbot {
min-height: 450px;
border: 1px solid #e0e0e0;
border-radius: 10px;
padding: 15px;
background: #fafafa;
margin-bottom: 20px;
}
.gradio-textbox textarea {
border-radius: 8px !important;
border: 1px solid #ddd !important;
padding: 12px 15px !important;
font-size: 15px !important;
min-height: 100px !important;
}
.gradio-button {
background: linear-gradient(45deg, var(--primary), var(--secondary)) !important;
color: white !important;
border: none !important;
border-radius: 8px !important;
padding: 12px 28px !important;
font-weight: 500 !important;
transition: all 0.3s !important;
}
.gradio-button:hover {
transform: translateY(-2px) !important;
box-shadow: 0 4px 12px rgba(110, 72, 170, 0.3) !important;
}
.gradio-slider .wrap {
margin: 15px 0 !important;
}
.dark .gradio-container {
background: #1a1a1a;
}
.dark .gradio-chatbot {
background: #252525;
border-color: #444;
}
"""
# Configuraci贸n de la interfaz
demo = gr.ChatInterface(
fn=respond,
additional_inputs=[
gr.Textbox(
value="Eres ELISA, un asistente de IA 煤til, preciso y amable. Desarrollado por Gerardo.",
label="Configuraci贸n del Sistema",
lines=3,
max_lines=6
),
gr.Slider(
minimum=64,
maximum=1024,
value=256,
step=32,
label="Longitud de Respuesta (tokens)",
info="Controla cu谩n extensa ser谩 la respuesta"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.7,
step=0.05,
label="Creatividad (Temperatura)",
info="Valores m谩s altos = respuestas m谩s creativas"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.9,
step=0.05,
label="Enfoque (Top-p)",
info="Controla la diversidad de palabras"
),
],
css=custom_css,
title="馃 ELISA - Asistente de IA",
description="Chatbot avanzado desarrollado por Gerardo usando Hugging Face",
examples=[
["Hola, 驴qu茅 puedes hacer?"],
["Expl铆came el machine learning en t茅rminos simples"],
["Recomi茅ndame libros sobre IA"]
],
submit_btn="Enviar",
retry_btn="Reintentar",
undo_btn="Deshacer",
clear_btn="Limpiar",
theme="soft"
)
# Configuraci贸n de lanzamiento
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=True,
favicon_path=None,
auth=None
) |