Spaces:
Sleeping
Sleeping
| from pydantic import BaseModel | |
| from dotenv import load_dotenv | |
| from groq import Groq | |
| import gradio as gr | |
| load_dotenv() | |
| client = Groq() | |
| session_memory_dict = {} | |
| system_prompt = { | |
| "role": "system", | |
| "content": ( | |
| "You are a friendly bot who responds kindly, even if the user is frustrated or angry. " | |
| "You are very knowledgeable and cute. " | |
| "Maintain conversation in a respectful and helpful manner." | |
| ) | |
| } | |
| class ChatInput(BaseModel): | |
| session_id: str | |
| input: str | |
| # Chat logic with memory handling | |
| def chat_logic(session_id: str, user_input: str) -> str: | |
| if session_id not in session_memory_dict: | |
| session_memory_dict[session_id] = [] | |
| if not any(m["role"] == "system" for m in session_memory_dict[session_id]): | |
| session_memory_dict[session_id].insert(0, system_prompt) | |
| session_memory_dict[session_id].append({"role": "user", "content": user_input}) | |
| completion = client.chat.completions.create( | |
| model="llama3-8b-8192", | |
| messages=session_memory_dict[session_id] | |
| ) | |
| ai_response = completion.choices[0].message.content | |
| session_memory_dict[session_id].append({"role": "assistant", "content": ai_response}) | |
| print(f"[Session: {session_id}] AI Response: {ai_response}") | |
| return ai_response | |
| # Gradio ChatInterface function wrapper | |
| def chat_with_memory(message, history, session_id="gradio_default"): | |
| ai_reply = chat_logic(session_id=session_id, user_input=message) | |
| return ai_reply | |
| # UI Components | |
| chat_interface = gr.ChatInterface( | |
| fn=chat_with_memory, | |
| additional_inputs=[gr.Textbox(label="Session ID", value="gradio_default")], | |
| title="Friendly Chatbot with Memory, Is it na? ", | |
| description="Chat with Groq's LLaMA3 model. Each session is tracked using a session ID.", | |
| theme="soft" | |
| ) | |
| chat_interface.launch() |