Spaces:
Sleeping
Sleeping
File size: 1,236 Bytes
8bc147a 96a19e1 8bc147a 44dc050 8bc147a 96a19e1 8bc147a 44dc050 96a19e1 44dc050 96a19e1 44dc050 8bc147a 44dc050 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 | import gradio as gr
from huggingface_hub import InferenceClient
import os
# Fetch Hugging Face API key from environment variables
API_KEY = os.getenv("HF_API_KEY")
# Initialize InferenceClient
client = InferenceClient(
provider="together",
api_key=API_KEY,
)
def chat_with_llm(prompt):
"""Sends a prompt to the Hugging Face model and returns the response."""
if not API_KEY:
return "Error: API key is missing. Please set 'HF_API_KEY' in your environment variables."
messages = [{"role": "user", "content": prompt}]
try:
completion = client.chat.completions.create(
model="mistralai/Mistral-7B-Instruct-v0.3",
messages=messages,
max_tokens=500,
)
return completion.choices[0].message.content if completion.choices else "No response from model."
except Exception as e:
return f"API Error: {str(e)}"
# Create Gradio Chat UI
iface = gr.Interface(
fn=chat_with_llm,
inputs=gr.Textbox(label="Ask me anything"),
outputs=gr.Textbox(label="AI Response"),
title="AI Chatbot with Hugging Face API",
description="A free AI chatbot using Hugging Face's API. Supports multiple LLMs!",
)
# Launch the app
iface.launch() |