Ashkchamp commited on
Commit
e094945
Β·
verified Β·
1 Parent(s): 37b25b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -48
app.py CHANGED
@@ -1,73 +1,77 @@
1
- # app.py ─ Streamlit Space
2
  import os, asyncio, streamlit as st
3
  from dotenv import load_dotenv
4
- from langchain_core.prompts import ChatPromptTemplate
5
- from langchain_core.output_parsers import StrOutputParser
6
- from langchain_google_genai import ChatGoogleGenerativeAI
7
 
8
- # ───────────────────────── ensure event‑loop ────────────────────────
9
  try:
10
  asyncio.get_running_loop()
11
  except RuntimeError:
12
  asyncio.set_event_loop(asyncio.new_event_loop())
13
- if os.name == "nt": # Windows only
14
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
 
15
 
16
- # ─────────────────────────── Streamlit UI ───────────────────────────
17
- st.set_page_config(page_title="LangChain Chatbot", page_icon="πŸ€–")
18
- st.title("πŸ€– LangChain Chatbot Demo")
19
- st.markdown("Type a question and get answers from **Gemini‑Pro**.")
20
 
21
- # Sidebar – API key input
22
  with st.sidebar:
23
- google_key = st.text_input("GoogleΒ APIΒ Key", type="password")
24
- st.markdown("*Your key is kept only in this browser session.*")
 
 
 
 
25
 
26
- # Question input
27
- user_q = st.text_input("Enter your question:")
 
 
28
 
29
- # ──────────────────── lazy LLM constructor ──────────────────────────
30
  def get_llm():
31
  if "llm" not in st.session_state:
32
- key = google_key or os.getenv("GOOGLE_API_KEY")
33
  if not key:
34
- raise ValueError("Please enter your GoogleΒ APIΒ key in the sidebar.")
35
- os.environ["GOOGLE_API_KEY"] = key # make client happy
36
- st.session_state.llm = ChatGoogleGenerativeAI(
37
- model="gemini-pro",
38
- convert_system_message_to_human=True,
 
 
39
  )
40
- return st.session_state.llm
 
 
 
 
41
 
42
- # Prompt template (built once – static)
43
- PROMPT = ChatPromptTemplate.from_messages(
44
- [
45
- ("system", "You are a helpful assistant. Please answer the user."),
46
- ("user", "Question: {question}"),
47
  ]
48
- )
49
- PARSER = StrOutputParser()
50
 
51
- # ─────────────────────────── main action ────────────────────────────
52
  if user_q:
53
- try:
54
- with st.spinner("Thinking…"):
55
- llm = get_llm()
56
- chain = PROMPT | llm | PARSER
57
- answer = chain.invoke({"question": user_q})
58
 
59
- st.success(answer)
 
 
 
 
60
 
61
- st.session_state.setdefault("history", []).extend(
62
- [{"role": "user", "text": user_q},
63
- {"role": "bot", "text": answer}]
64
- )
65
 
66
  except Exception as err:
67
- st.error(f"❌ {err}")
68
 
69
- # Conversation history
70
- st.sidebar.header("Conversation History")
71
- for msg in st.session_state.get("history", []):
72
- role = "You" if msg["role"] == "user" else "Bot"
73
- st.sidebar.write(f"**{role}:** {msg['text']}")
 
1
+ # app.py ─ Streamlit + LangChain + Groq
2
  import os, asyncio, streamlit as st
3
  from dotenv import load_dotenv
4
+ from langchain.schema import SystemMessage, HumanMessage, AIMessage
5
+ from langchain_groq import ChatGroq
 
6
 
7
+ # ───────────────────────── bootstrap event‑loop ─────────────────────
8
  try:
9
  asyncio.get_running_loop()
10
  except RuntimeError:
11
  asyncio.set_event_loop(asyncio.new_event_loop())
12
+ if os.name == "nt":
13
+ import asyncio as _asyncio
14
+ asyncio.set_event_loop_policy(_asyncio.WindowsSelectorEventLoopPolicy())
15
 
16
+ # ─────────────────────────── UI / SETTINGS ────────────────────────
17
+ st.set_page_config("Groq Chatbot", "πŸ€–")
18
+ st.title("πŸ€– Groq‑powered Advanced Chatbot")
19
+ st.caption("DeepSeek‑R1‑Distill‑Llama‑70B β€’ LangChain β€’ Streamlit")
20
 
 
21
  with st.sidebar:
22
+ st.header("πŸ”‘ Groq API Key")
23
+ groq_key = st.text_input("Paste your key here", type="password")
24
+ st.divider()
25
+ temperature = st.slider("Temperature", 0.0, 1.2, 0.7, 0.1)
26
+ top_p = st.slider("Top‑p", 0.0, 1.0, 1.0, 0.05)
27
+ st.markdown("*All values remain local to your browser.*")
28
 
29
+ user_q = st.chat_input("Type your message…")
30
+
31
+ # ────────────────────────── LLM (lazy init) ─────────────────────────
32
+ MODEL_NAME = "deepseek-r1-distill-llama-70b"
33
 
 
34
  def get_llm():
35
  if "llm" not in st.session_state:
36
+ key = groq_key or os.getenv("GROQ_API_KEY")
37
  if not key:
38
+ raise ValueError("Add your Groq key in the sidebar.")
39
+ os.environ["GROQ_API_KEY"] = key # for the client
40
+ st.session_state.llm = ChatGroq(
41
+ model = MODEL_NAME,
42
+ groq_api_key = key,
43
+ temperature = temperature,
44
+ top_p = top_p,
45
  )
46
+ # refresh sampling params if the sliders changed
47
+ llm = st.session_state.llm
48
+ llm.temperature = temperature
49
+ llm.top_p = top_p
50
+ return llm
51
 
52
+ # ───────────────────────── conversation memory ──────────────────────
53
+ if "history" not in st.session_state:
54
+ st.session_state.history = [
55
+ SystemMessage(content="You are an advanced, helpful assistant.")
 
56
  ]
 
 
57
 
58
+ # ──────────────────────────── main loop ─────────────────────────────
59
  if user_q:
60
+ st.session_state.history.append(HumanMessage(content=user_q))
 
 
 
 
61
 
62
+ try:
63
+ with st.chat_message("assistant", avatar="πŸ€–"):
64
+ with st.spinner("Thinking…"):
65
+ answer = get_llm().invoke(st.session_state.history).content
66
+ st.markdown(answer)
67
 
68
+ st.session_state.history.append(AIMessage(content=answer))
 
 
 
69
 
70
  except Exception as err:
71
+ st.error(f"**Error:** {err}")
72
 
73
+ # ──────────────────────── display chat history ──────────────────────
74
+ for msg in st.session_state.history[1:]: # skip system message
75
+ role = "user" if isinstance(msg, HumanMessage) else "assistant"
76
+ with st.chat_message(role):
77
+ st.markdown(msg.content)