Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from groq import Groq | |
| import os | |
| groq_api_key = os.environ.get("GROQ_API_KEY") | |
| client = Groq(api_key = groq_api_key) | |
| model_list =[ | |
| 'openai/gpt-oss-120b', | |
| 'moonshotai/kimi-k2-instruct', | |
| 'meta-llama/llama-4-scout-17b-16e-instruct', | |
| 'openai/gpt-oss-20b', | |
| 'qwen/qwen3-32b', | |
| 'llama-3.3-70b-versatile', | |
| 'moonshotai/kimi-k2-instruct-0905', | |
| 'allam-2-7b', | |
| 'meta-llama/llama-4-maverick-17b-128e-instruct', | |
| 'llama-3.1-8b-instant', | |
| ] | |
| reasoning_models = [ | |
| 'openai/gpt-oss-120b', | |
| 'openai/gpt-oss-20b', | |
| 'qwen/qwen3-32b', | |
| ] | |
| default_sys_prompt = """You are a helpful chatbot. You help the end user to the best of your ability.""" | |
| chat_history = [] | |
| def groq_chat(new_message: str, chat_history: list[dict], model: str, system_prompt: str, ): | |
| ''' | |
| Groq chat API call wrapper. | |
| inputs: | |
| - model [str]: model from model_list (cbf static typing from the list) | |
| - chat_history [list[dict]]: list of dictionaries of chat hist, needs "role" and "content" vars as strings | |
| - new_message [str]: new user input message (assuming we're only accepting user inputs) | |
| - system prompt [str]: optional system prompt for whatever chat you're using | |
| outputs: | |
| - "" - used to delete old input msg in chat textbox lol | |
| - nonsys_msg_hist [list[dict]]: updated chat history | |
| ''' | |
| if model not in model_list: | |
| raise ValueError(f"model must be in model_list: {model_list}") | |
| return | |
| #augment chat hist | |
| nonsys_msg_hist = [{key: x[key] for key in ["role", "content"] if key in x} for x in chat_history] #clean the chatbot bullshit out | |
| print(nonsys_msg_hist) | |
| nonsys_msg_hist.extend( | |
| [ | |
| { | |
| "role": "user", | |
| "content": new_message, | |
| } | |
| ] | |
| ) | |
| # use sys prompt | |
| input_msg_hist = [ | |
| { | |
| "role": "system", | |
| "content": system_prompt, | |
| } | |
| ] | |
| input_msg_hist.extend(nonsys_msg_hist) | |
| if model in reasoning_models: | |
| chat_completion = client.chat.completions.create( | |
| messages = input_msg_hist, | |
| model = model, | |
| include_reasoning = False, #removes reasoning tokens from output because I'm lazy | |
| ) | |
| else: | |
| chat_completion = client.chat.completions.create( | |
| messages = input_msg_hist, | |
| model = model, | |
| # include_reasoning = False, #removes reasoning tokens from output because I'm lazy | |
| ) | |
| output_msg = chat_completion.choices[0].message.content | |
| # add to chat hist | |
| nonsys_msg_hist.extend( | |
| [ | |
| { | |
| "role": "assistant", | |
| "content": output_msg | |
| } | |
| ] | |
| ) | |
| return "", nonsys_msg_hist | |
| def create_demo(): | |
| with gr.Blocks() as demo: | |
| with gr.Row(): | |
| model = gr.Dropdown(model_list, | |
| ) | |
| with gr.Row(): | |
| system_prompt = gr.Textbox( | |
| value=default_sys_prompt, | |
| interactive=True | |
| ) | |
| with gr.Row(): | |
| chatbot = gr.Chatbot(label="Conversation", type="messages") | |
| with gr.Row(): | |
| msg = gr.Textbox() | |
| with gr.Row(): | |
| clear = gr.ClearButton([msg, chatbot], variant = 'stop') | |
| msg.submit( | |
| groq_chat, | |
| [msg, chatbot, model, system_prompt, ], | |
| [msg, chatbot] | |
| ) #WHAT AM I DOING LOL - COME BACK TO THIS | |
| return demo | |
| if __name__ == "__main__": | |
| demo = create_demo() | |
| demo.launch( | |
| auth=("DigitalChild", "IhateBroccoli123"), | |
| ssr_mode=False, | |
| share=True, | |
| ) | |