CodeNine commited on
Commit
aeb51ae
·
verified ·
1 Parent(s): 5568321

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -29
app.py CHANGED
@@ -1,32 +1,71 @@
1
  # app.py
2
  import os
 
3
  from groq import Groq
4
- import gradio as gr
5
- from dotenv import load_dotenv
6
-
7
- # Load .env file (contains your API key)
8
- load_dotenv()
9
- client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
10
-
11
- def groq_chat(message, history):
12
- messages = [{"role": "system", "content": "You are a helpful assistant."}]
13
- for user, bot in history:
14
- messages.append({"role": "user", "content": user})
15
- messages.append({"role": "assistant", "content": bot})
16
- messages.append({"role": "user", "content": message})
17
-
18
- response = client.chat.completions.create(
19
- model="llama-3.3-70b-versatile", # Change model if needed
20
- messages=messages,
21
- temperature=0.7,
22
- )
23
- return response.choices[0].message.content
24
-
25
- demo = gr.ChatInterface(
26
- fn=groq_chat,
27
- title="Groq Chatbot",
28
- description="Chat with a lightning-fast LLM via Groq API",
29
- theme=gr.themes.Soft()
30
- )
31
-
32
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # app.py
2
  import os
3
+ import streamlit as st
4
  from groq import Groq
5
+
6
+ # Initialize Groq client
7
+ client = Groq(api_key=os.getenv("GROQ_API_KEY"))
8
+
9
+ # ---------------- UI Setup ----------------
10
+ st.set_page_config(page_title="Advanced Groq Chatbot", layout="centered")
11
+ st.title("🤖 Groq Chatbot")
12
+ st.markdown("Chat using lightning-fast open-source LLMs on Groq!")
13
+
14
+ # Model selector (friendly names)
15
+ model_options = {
16
+ "💬 Fast Chat (LLaMA 3 8B)": "llama3-8b-8192",
17
+ "🧠 Deep Reasoning (LLaMA 3 70B)": "llama3-70b-8192",
18
+ "📊 Math Solver (Mixtral)": "mixtral-8x7b-32768"
19
+ }
20
+ model_choice = st.selectbox("Choose assistant type:", list(model_options.keys()))
21
+ model_id = model_options[model_choice]
22
+
23
+ # Personality selector
24
+ style_options = {
25
+ "Friendly": "You are a friendly assistant who replies casually.",
26
+ "Professional": "You are a formal and helpful assistant.",
27
+ "Technical": "You are a technical assistant providing accurate information."
28
+ }
29
+ personality = st.selectbox("Choose assistant personality:", list(style_options.keys()))
30
+ system_prompt = style_options[personality]
31
+
32
+ # Initialize session history
33
+ if "messages" not in st.session_state:
34
+ st.session_state.messages = [{"role": "system", "content": system_prompt}]
35
+
36
+ # Clear chat button
37
+ if st.button("🔄 Clear Chat"):
38
+ st.session_state.messages = [{"role": "system", "content": system_prompt}]
39
+ st.rerun()
40
+
41
+ # Show chat history
42
+ for msg in st.session_state.messages[1:]:
43
+ with st.chat_message(msg["role"]):
44
+ st.markdown(msg["content"])
45
+
46
+ # Chat input
47
+ if prompt := st.chat_input("Type your message..."):
48
+ st.chat_message("user").markdown(prompt)
49
+ st.session_state.messages.append({"role": "user", "content": prompt})
50
+
51
+ # Call Groq with streaming
52
+ try:
53
+ response = client.chat.completions.create(
54
+ model=model_id,
55
+ messages=st.session_state.messages,
56
+ temperature=0.7,
57
+ stream=True
58
+ )
59
+
60
+ reply_text = ""
61
+ with st.chat_message("assistant"):
62
+ reply_box = st.empty()
63
+ for chunk in response:
64
+ content = chunk.choices[0].delta.content or ""
65
+ reply_text += content
66
+ reply_box.markdown(reply_text)
67
+
68
+ st.session_state.messages.append({"role": "assistant", "content": reply_text})
69
+
70
+ except Exception as e:
71
+ st.error(f"❌ Error: {e}")