Manar11's picture
Update app.py
7c9ecd1 verified
import gradio as gr
from groq import Groq
import os
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
SYSTEM_PROMPT = """You are a helpful, smart, and versatile AI Assistant.
You can assist with coding, writing, brainstorming, and answering general knowledge questions
clearly and accurately"""
def respond(message, history, model, temperature, max_tokens):
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
for userMsg , assistantMsg in history:
if userMsg:
messages.append({"role": "user", "content": userMsg})
if assistantMsg:
messages.append({"role": "assistant" , "content": assistantMsg})
messages.append({"role": "user", "content": message})
try:
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
max_completion_tokens=max_tokens,
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
# ChatInterface with additional inputs for parameters
demo = gr.ChatInterface(
fn=respond,
title="💬AI Assistant",
description="Ask me anything! I can help you write code, summarize text, or just chat.",
additional_inputs=[
gr.Dropdown(
choices=[
"llama-3.3-70b-versatile",
"llama-3.1-8b-instant",
"mixtral-8x7b-32768",
],
value="llama-3.3-70b-versatile",
label="Model Selection",
info="Select the AI model to use"
),
gr.Slider(
minimum=0,
maximum=2,
value=0.8,
step=0.1,
label="Temperature",
info="Controls randomness. Lower = more focused, Higher = more creative"
),
gr.Slider(
minimum=256,
maximum=8192,
value=4096,
step=256,
label="Max Tokens",
info="Maximum length of the response"
),
],
examples=[
["Explain quantum physics to a five-year-old."],
["Write a Python script to scrape a website."],
["Help me draft a professional email to my boss about a promotion."],
["What are some healthy meal prep ideas for a busy week?"],
],
theme="soft",
)
if __name__ == "__main__":
demo.launch()