Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,31 +2,33 @@ import os
|
|
| 2 |
import gradio as gr
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
|
| 5 |
-
#
|
| 6 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
)
|
| 12 |
|
| 13 |
-
def
|
| 14 |
try:
|
| 15 |
response = client.text_generation(
|
| 16 |
prompt=message,
|
| 17 |
max_new_tokens=200,
|
| 18 |
-
temperature=0.7
|
|
|
|
| 19 |
)
|
| 20 |
-
|
| 21 |
-
return "", history
|
| 22 |
except Exception as e:
|
| 23 |
-
|
| 24 |
-
return "", history
|
| 25 |
|
| 26 |
-
|
| 27 |
-
fn=
|
|
|
|
|
|
|
| 28 |
title="💬 Mistral Replica Chat",
|
| 29 |
-
description="Chat
|
| 30 |
)
|
| 31 |
|
| 32 |
-
|
|
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
|
| 5 |
+
# Lee el token desde los secretos de tu Space (Settings → Repository secrets → HF_TOKEN)
|
| 6 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
|
| 8 |
+
# Modelo base (usa Mistral oficial)
|
| 9 |
+
MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
|
| 10 |
+
|
| 11 |
+
client = InferenceClient(model=MODEL, token=HF_TOKEN)
|
| 12 |
|
| 13 |
+
def chat_with_mistral(message):
|
| 14 |
try:
|
| 15 |
response = client.text_generation(
|
| 16 |
prompt=message,
|
| 17 |
max_new_tokens=200,
|
| 18 |
+
temperature=0.7,
|
| 19 |
+
repetition_penalty=1.1,
|
| 20 |
)
|
| 21 |
+
return response
|
|
|
|
| 22 |
except Exception as e:
|
| 23 |
+
return f"⚠️ Error: {str(e)}"
|
|
|
|
| 24 |
|
| 25 |
+
iface = gr.Interface(
|
| 26 |
+
fn=chat_with_mistral,
|
| 27 |
+
inputs="text",
|
| 28 |
+
outputs="text",
|
| 29 |
title="💬 Mistral Replica Chat",
|
| 30 |
+
description="Chat de prueba usando el modelo Mistral desde Hugging Face API",
|
| 31 |
)
|
| 32 |
|
| 33 |
+
if __name__ == "__main__":
|
| 34 |
+
iface.launch()
|