Spaces:
Sleeping
Sleeping
Fazeel Asghar commited on
Commit ·
1584204
1
Parent(s): 756f55f
Changed made in interface
Browse files
app.py
CHANGED
|
@@ -21,6 +21,7 @@ class ChatInput(BaseModel):
|
|
| 21 |
session_id: str
|
| 22 |
input: str
|
| 23 |
|
|
|
|
| 24 |
def chat_logic(session_id: str, user_input: str) -> str:
|
| 25 |
if session_id not in session_memory_dict:
|
| 26 |
session_memory_dict[session_id] = []
|
|
@@ -28,10 +29,7 @@ def chat_logic(session_id: str, user_input: str) -> str:
|
|
| 28 |
if not any(m["role"] == "system" for m in session_memory_dict[session_id]):
|
| 29 |
session_memory_dict[session_id].insert(0, system_prompt)
|
| 30 |
|
| 31 |
-
session_memory_dict[session_id].append({
|
| 32 |
-
"role": "user",
|
| 33 |
-
"content": user_input
|
| 34 |
-
})
|
| 35 |
|
| 36 |
completion = client.chat.completions.create(
|
| 37 |
model="llama3-8b-8192",
|
|
@@ -40,22 +38,22 @@ def chat_logic(session_id: str, user_input: str) -> str:
|
|
| 40 |
|
| 41 |
ai_response = completion.choices[0].message.content
|
| 42 |
|
| 43 |
-
session_memory_dict[session_id].append({
|
| 44 |
-
"role": "assistant",
|
| 45 |
-
"content": ai_response
|
| 46 |
-
})
|
| 47 |
-
|
| 48 |
print(f"[Session: {session_id}] AI Response: {ai_response}")
|
| 49 |
return ai_response
|
| 50 |
|
| 51 |
-
# Gradio
|
| 52 |
-
def
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
session_id: str
|
| 22 |
input: str
|
| 23 |
|
| 24 |
+
# Chat logic with memory handling
|
| 25 |
def chat_logic(session_id: str, user_input: str) -> str:
|
| 26 |
if session_id not in session_memory_dict:
|
| 27 |
session_memory_dict[session_id] = []
|
|
|
|
| 29 |
if not any(m["role"] == "system" for m in session_memory_dict[session_id]):
|
| 30 |
session_memory_dict[session_id].insert(0, system_prompt)
|
| 31 |
|
| 32 |
+
session_memory_dict[session_id].append({"role": "user", "content": user_input})
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
completion = client.chat.completions.create(
|
| 35 |
model="llama3-8b-8192",
|
|
|
|
| 38 |
|
| 39 |
ai_response = completion.choices[0].message.content
|
| 40 |
|
| 41 |
+
session_memory_dict[session_id].append({"role": "assistant", "content": ai_response})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
print(f"[Session: {session_id}] AI Response: {ai_response}")
|
| 43 |
return ai_response
|
| 44 |
|
| 45 |
+
# Gradio ChatInterface function wrapper
|
| 46 |
+
def chat_with_memory(message, history, session_id="gradio_default"):
|
| 47 |
+
ai_reply = chat_logic(session_id=session_id, user_input=message)
|
| 48 |
+
return ai_reply
|
| 49 |
+
|
| 50 |
+
# UI Components
|
| 51 |
+
chat_interface = gr.ChatInterface(
|
| 52 |
+
fn=chat_with_memory,
|
| 53 |
+
additional_inputs=[gr.Textbox(label="Session ID", value="gradio_default")],
|
| 54 |
+
title="Friendly Chatbot with Memory",
|
| 55 |
+
description="Chat with Groq's LLaMA3 model. Each session is tracked using a session ID.",
|
| 56 |
+
theme="soft"
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
chat_interface.launch()
|