Spaces:
Sleeping
Sleeping
Fazeel Asghar
commited on
Commit
·
756f55f
1
Parent(s):
90a1af0
Changed made in interface
Browse files
app.py
CHANGED
|
@@ -4,6 +4,7 @@ from groq import Groq
|
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
load_dotenv()
|
|
|
|
| 7 |
client = Groq()
|
| 8 |
session_memory_dict = {}
|
| 9 |
|
|
@@ -20,110 +21,41 @@ class ChatInput(BaseModel):
|
|
| 20 |
session_id: str
|
| 21 |
input: str
|
| 22 |
|
| 23 |
-
def chat_logic(session_id: str, user_input: str):
|
| 24 |
if session_id not in session_memory_dict:
|
| 25 |
session_memory_dict[session_id] = []
|
| 26 |
|
| 27 |
if not any(m["role"] == "system" for m in session_memory_dict[session_id]):
|
| 28 |
session_memory_dict[session_id].insert(0, system_prompt)
|
| 29 |
|
| 30 |
-
session_memory_dict[session_id].append({
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
model="llama3-8b-8192",
|
| 33 |
messages=session_memory_dict[session_id]
|
| 34 |
)
|
| 35 |
-
ai_response = res.choices[0].message.content
|
| 36 |
-
session_memory_dict[session_id].append({"role": "assistant", "content": ai_response})
|
| 37 |
-
print(f"[{session_id}] AI:", ai_response)
|
| 38 |
-
return ai_response
|
| 39 |
-
|
| 40 |
-
def chat_interface(user_input, chat_history, session_id):
|
| 41 |
-
ai_reply = chat_logic(session_id, user_input)
|
| 42 |
-
chat_history.append((user_input, ai_reply))
|
| 43 |
-
return chat_history, ""
|
| 44 |
-
|
| 45 |
-
def clear_session(session_id):
|
| 46 |
-
session_memory_dict.pop(session_id, None)
|
| 47 |
-
return [], "gradio_default"
|
| 48 |
-
|
| 49 |
-
with gr.Blocks(css="""
|
| 50 |
-
body {
|
| 51 |
-
background-color: #1e1e1e;
|
| 52 |
-
color: #ddd;
|
| 53 |
-
font-family: 'Segoe UI', sans-serif;
|
| 54 |
-
}
|
| 55 |
-
.header {
|
| 56 |
-
text-align: center;
|
| 57 |
-
font-size: 1.8rem;
|
| 58 |
-
padding: 1rem;
|
| 59 |
-
background-color: #2c2c2c;
|
| 60 |
-
color: #ffffff;
|
| 61 |
-
border-bottom: 1px solid #444;
|
| 62 |
-
}
|
| 63 |
-
.chatbot {
|
| 64 |
-
flex: 1;
|
| 65 |
-
display: flex;
|
| 66 |
-
flex-direction: column;
|
| 67 |
-
height: 65vh;
|
| 68 |
-
overflow-y: auto;
|
| 69 |
-
padding: 1rem;
|
| 70 |
-
}
|
| 71 |
-
.chatbot .message {
|
| 72 |
-
margin: 6px 0;
|
| 73 |
-
white-space: pre-wrap;
|
| 74 |
-
word-wrap: break-word;
|
| 75 |
-
line-height: 1.6;
|
| 76 |
-
font-size: 1rem;
|
| 77 |
-
}
|
| 78 |
-
.chatbot .message.user {
|
| 79 |
-
align-self: flex-end;
|
| 80 |
-
color: #90caf9;
|
| 81 |
-
}
|
| 82 |
-
.chatbot .message.bot {
|
| 83 |
-
align-self: flex-start;
|
| 84 |
-
color: #e0e0e0;
|
| 85 |
-
}
|
| 86 |
-
.input-section {
|
| 87 |
-
display: flex;
|
| 88 |
-
gap: 10px;
|
| 89 |
-
align-items: center;
|
| 90 |
-
margin-top: auto;
|
| 91 |
-
}
|
| 92 |
-
.send-button {
|
| 93 |
-
background-color: #2979FF;
|
| 94 |
-
color: white;
|
| 95 |
-
padding: 10px 16px;
|
| 96 |
-
border-radius: 8px;
|
| 97 |
-
border: none;
|
| 98 |
-
cursor: pointer;
|
| 99 |
-
}
|
| 100 |
-
.send-button:hover {
|
| 101 |
-
background-color: #5393FF;
|
| 102 |
-
}
|
| 103 |
-
.session-controls {
|
| 104 |
-
display: flex;
|
| 105 |
-
justify-content: space-between;
|
| 106 |
-
gap: 10px;
|
| 107 |
-
margin-top: 1rem;
|
| 108 |
-
}
|
| 109 |
-
""") as demo:
|
| 110 |
-
|
| 111 |
-
gr.Markdown("## 🤖 Smart Assistant", elem_classes="header")
|
| 112 |
-
|
| 113 |
-
state = gr.State([])
|
| 114 |
|
| 115 |
-
|
| 116 |
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
clear_btn = gr.Button("Clear Chat")
|
| 124 |
-
|
| 125 |
-
send_btn.click(fn=chat_interface, inputs=[user_input, state, session_id], outputs=[chatbot, user_input])
|
| 126 |
-
user_input.submit(fn=chat_interface, inputs=[user_input, state, session_id], outputs=[chatbot, user_input])
|
| 127 |
-
clear_btn.click(fn=clear_session, inputs=[session_id], outputs=[chatbot, session_id])
|
| 128 |
|
| 129 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
load_dotenv()
|
| 7 |
+
|
| 8 |
client = Groq()
|
| 9 |
session_memory_dict = {}
|
| 10 |
|
|
|
|
| 21 |
session_id: str
|
| 22 |
input: str
|
| 23 |
|
| 24 |
+
def chat_logic(session_id: str, user_input: str) -> str:
|
| 25 |
if session_id not in session_memory_dict:
|
| 26 |
session_memory_dict[session_id] = []
|
| 27 |
|
| 28 |
if not any(m["role"] == "system" for m in session_memory_dict[session_id]):
|
| 29 |
session_memory_dict[session_id].insert(0, system_prompt)
|
| 30 |
|
| 31 |
+
session_memory_dict[session_id].append({
|
| 32 |
+
"role": "user",
|
| 33 |
+
"content": user_input
|
| 34 |
+
})
|
| 35 |
+
|
| 36 |
+
completion = client.chat.completions.create(
|
| 37 |
model="llama3-8b-8192",
|
| 38 |
messages=session_memory_dict[session_id]
|
| 39 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
+
ai_response = completion.choices[0].message.content
|
| 42 |
|
| 43 |
+
session_memory_dict[session_id].append({
|
| 44 |
+
"role": "assistant",
|
| 45 |
+
"content": ai_response
|
| 46 |
+
})
|
| 47 |
|
| 48 |
+
print(f"[Session: {session_id}] AI Response: {ai_response}")
|
| 49 |
+
return ai_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
+
# Gradio Interface
|
| 52 |
+
def gradio_chat(user_input, session_id="gradio_default"):
|
| 53 |
+
return chat_logic(session_id=session_id, user_input=user_input)
|
| 54 |
+
|
| 55 |
+
gr.Interface(
|
| 56 |
+
fn=gradio_chat,
|
| 57 |
+
inputs=[gr.Textbox(label="Your message"), gr.Textbox(label="Session ID", value="gradio_default")],
|
| 58 |
+
outputs=gr.Textbox(label="Response"),
|
| 59 |
+
title="Chatbot with memory",
|
| 60 |
+
description="Chat with Groq's LLaMA3 model. Handles sessions separately using IDs."
|
| 61 |
+
).launch()
|