Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import os | |
| # Fetch Hugging Face API key from environment variables | |
| API_KEY = os.getenv("HF_API_KEY") | |
| # Initialize InferenceClient | |
| client = InferenceClient( | |
| provider="together", | |
| api_key=API_KEY, | |
| ) | |
| def chat_with_llm(prompt): | |
| """Sends a prompt to the Hugging Face model and returns the response.""" | |
| if not API_KEY: | |
| return "Error: API key is missing. Please set 'HF_API_KEY' in your environment variables." | |
| messages = [{"role": "user", "content": prompt}] | |
| try: | |
| completion = client.chat.completions.create( | |
| model="mistralai/Mistral-7B-Instruct-v0.3", | |
| messages=messages, | |
| max_tokens=500, | |
| ) | |
| return completion.choices[0].message.content if completion.choices else "No response from model." | |
| except Exception as e: | |
| return f"API Error: {str(e)}" | |
| # Create Gradio Chat UI | |
| iface = gr.Interface( | |
| fn=chat_with_llm, | |
| inputs=gr.Textbox(label="Ask me anything"), | |
| outputs=gr.Textbox(label="AI Response"), | |
| title="AI Chatbot with Hugging Face API", | |
| description="A free AI chatbot using Hugging Face's API. Supports multiple LLMs!", | |
| ) | |
| # Launch the app | |
| iface.launch() |