Spaces:
Sleeping
Sleeping
File size: 908 Bytes
37af6b6 3b987c0 37af6b6 3b987c0 3d1908c 37af6b6 3d1908c 3b987c0 3d1908c 37af6b6 3d1908c 37af6b6 3d1908c 37af6b6 3d1908c 3b987c0 3d1908c 37af6b6 3d1908c 3b987c0 3d1908c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | import os
import gradio as gr
from huggingface_hub import InferenceClient
# Lee el token desde los secretos de tu Space (Settings → Repository secrets → HF_TOKEN)
HF_TOKEN = os.getenv("HF_TOKEN")
# Modelo base (usa Mistral oficial)
MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
client = InferenceClient(model=MODEL, token=HF_TOKEN)
def chat_with_mistral(message):
try:
response = client.text_generation(
prompt=message,
max_new_tokens=200,
temperature=0.7,
repetition_penalty=1.1,
)
return response
except Exception as e:
return f"⚠️ Error: {str(e)}"
iface = gr.Interface(
fn=chat_with_mistral,
inputs="text",
outputs="text",
title="💬 Mistral Replica Chat",
description="Chat de prueba usando el modelo Mistral desde Hugging Face API",
)
if __name__ == "__main__":
iface.launch()
|