Ashkchamp commited on
Commit
37b25b4
Β·
verified Β·
1 Parent(s): bb32f7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -41
app.py CHANGED
@@ -1,56 +1,73 @@
 
1
  import os, asyncio, streamlit as st
2
  from dotenv import load_dotenv
3
  from langchain_core.prompts import ChatPromptTemplate
4
  from langchain_core.output_parsers import StrOutputParser
5
  from langchain_google_genai import ChatGoogleGenerativeAI
6
 
7
- # ------------------------------------------------------------------ #
8
- # 1️⃣ Ensure the current thread owns an asyncio event‑loop
9
  try:
10
  asyncio.get_running_loop()
11
  except RuntimeError:
12
  asyncio.set_event_loop(asyncio.new_event_loop())
13
- if os.name == "nt": # optional Windows tweak
14
  asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
15
- # ------------------------------------------------------------------ #
16
-
17
- load_dotenv() # reads .env into os.environ
18
- os.environ["GOOGLE_API_KEY"] = os.getenv("api") # keep your .env key name
19
-
20
- system_template = "You are a helpful assistant. Please respond to the user queries."
21
- prompt_template = ChatPromptTemplate.from_messages([
22
- ('system', system_template),
23
- ('user', 'Question: {question}')
24
- ])
25
-
26
- model = ChatGoogleGenerativeAI(model="gemini-pro",
27
- convert_system_message_to_human=True)
28
- parser = StrOutputParser()
29
- chain = prompt_template | model | parser
30
-
31
- st.title("LangChain Chatbot Demo")
32
- st.markdown("""
33
- Welcome to the LangChain Chatbot Demo!
34
- Type your query below and get responses powered by Google's GenerativeΒ AI.
35
- """)
36
-
37
- input_text = st.text_input("Enter your question:")
38
-
39
- if input_text:
40
- with st.spinner("Generating response…"):
41
- try:
42
- response = chain.invoke({"question": input_text})
43
- st.write("**Chatbot Response:**")
44
- st.write(response)
45
-
46
- st.session_state.setdefault("history", []).extend([
47
- {"role": "user", "text": input_text},
48
- {"role": "chatbot", "text": response},
49
- ])
50
- except Exception as e:
51
- st.error(f"An error occurred: {e}")
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  st.sidebar.header("Conversation History")
54
  for msg in st.session_state.get("history", []):
55
- role = "You" if msg["role"] == "user" else "Chatbot"
56
  st.sidebar.write(f"**{role}:** {msg['text']}")
 
1
+ # app.py ─ Streamlit Space
2
  import os, asyncio, streamlit as st
3
  from dotenv import load_dotenv
4
  from langchain_core.prompts import ChatPromptTemplate
5
  from langchain_core.output_parsers import StrOutputParser
6
  from langchain_google_genai import ChatGoogleGenerativeAI
7
 
8
+ # ───────────────────────── ensure event‑loop ────────────────────────
 
9
  try:
10
  asyncio.get_running_loop()
11
  except RuntimeError:
12
  asyncio.set_event_loop(asyncio.new_event_loop())
13
+ if os.name == "nt": # Windows only
14
  asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # ─────────────────────────── Streamlit UI ───────────────────────────
17
+ st.set_page_config(page_title="LangChain Chatbot", page_icon="πŸ€–")
18
+ st.title("πŸ€– LangChain Chatbot Demo")
19
+ st.markdown("Type a question and get answers from **Gemini‑Pro**.")
20
+
21
+ # Sidebar – API key input
22
+ with st.sidebar:
23
+ google_key = st.text_input("GoogleΒ APIΒ Key", type="password")
24
+ st.markdown("*Your key is kept only in this browser session.*")
25
+
26
+ # Question input
27
+ user_q = st.text_input("Enter your question:")
28
+
29
+ # ──────────────────── lazy LLM constructor ──────────────────────────
30
+ def get_llm():
31
+ if "llm" not in st.session_state:
32
+ key = google_key or os.getenv("GOOGLE_API_KEY")
33
+ if not key:
34
+ raise ValueError("Please enter your GoogleΒ APIΒ key in the sidebar.")
35
+ os.environ["GOOGLE_API_KEY"] = key # make client happy
36
+ st.session_state.llm = ChatGoogleGenerativeAI(
37
+ model="gemini-pro",
38
+ convert_system_message_to_human=True,
39
+ )
40
+ return st.session_state.llm
41
+
42
+ # Prompt template (built once – static)
43
+ PROMPT = ChatPromptTemplate.from_messages(
44
+ [
45
+ ("system", "You are a helpful assistant. Please answer the user."),
46
+ ("user", "Question: {question}"),
47
+ ]
48
+ )
49
+ PARSER = StrOutputParser()
50
+
51
+ # ─────────────────────────── main action ────────────────────────────
52
+ if user_q:
53
+ try:
54
+ with st.spinner("Thinking…"):
55
+ llm = get_llm()
56
+ chain = PROMPT | llm | PARSER
57
+ answer = chain.invoke({"question": user_q})
58
+
59
+ st.success(answer)
60
+
61
+ st.session_state.setdefault("history", []).extend(
62
+ [{"role": "user", "text": user_q},
63
+ {"role": "bot", "text": answer}]
64
+ )
65
+
66
+ except Exception as err:
67
+ st.error(f"❌ {err}")
68
+
69
+ # Conversation history
70
  st.sidebar.header("Conversation History")
71
  for msg in st.session_state.get("history", []):
72
+ role = "You" if msg["role"] == "user" else "Bot"
73
  st.sidebar.write(f"**{role}:** {msg['text']}")