Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Lee el token desde los secretos de tu Space (Settings → Repository secrets → HF_TOKEN) | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| # Modelo base (usa Mistral oficial) | |
| MODEL = "mistralai/Mistral-7B-Instruct-v0.3" | |
| client = InferenceClient(model=MODEL, token=HF_TOKEN) | |
| def chat_with_mistral(message): | |
| try: | |
| response = client.text_generation( | |
| prompt=message, | |
| max_new_tokens=200, | |
| temperature=0.7, | |
| repetition_penalty=1.1, | |
| ) | |
| return response | |
| except Exception as e: | |
| return f"⚠️ Error: {str(e)}" | |
| iface = gr.Interface( | |
| fn=chat_with_mistral, | |
| inputs="text", | |
| outputs="text", | |
| title="💬 Mistral Replica Chat", | |
| description="Chat de prueba usando el modelo Mistral desde Hugging Face API", | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |