Pro_player / app.py
sayed010's picture
Update app.py
2b6e9f2 verified
raw
history blame
2.17 kB
import gradio as gr
from groq import Groq
import os
client = Groq(api_key=os.environ.get("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"))
SYSTEM_PROMPT = """You are an expert in premier league fantasy . Provide structured and insightful responses to queries
about players and coaches"""
def respond(message, history, model, temperature, max_tokens):
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
for h in history:
messages.append({"role": "user", "content": h[0]})
if h[1]:
messages.append({"role": "assistant", "content": h[1]})
messages.append({"role": "user", "content": message})
try:
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
max_completion_tokens=max_tokens,
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
# ChatInterface with additional inputs for parameters
demo = gr.ChatInterface(
fn=respond,
title="🎬 premier league fantasy Generator AI",
description="Be the hero of the Week!",
additional_inputs=[
gr.Dropdown(
choices=[
"llama-3.3-70b-versatile",
"llama-3.1-8b-instant",
],
value="llama-3.3-70b-versatile",
label="Model",
info="Select the AI model to use"
),
gr.Slider(
minimum=0,
maximum=2,
value=0.9,
step=0.1,
label="Temperature",
info="Controls randomness. Lower = more focused, Higher = more creative"
),
gr.Slider(
minimum=256,
maximum=8192,
value=2048,
step=256,
label="Max Tokens",
info="Maximum length of the response"
),
],
examples=[
["what is the best player in this week"],
["which player should I captin this week"],
["Give me the perfect Team formation this week "],
],
theme="soft",
)
if __name__ == "__main__":
demo.launch()