mervp commited on
Commit
5482c3b
·
verified ·
1 Parent(s): a42f734

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -83
app.py CHANGED
@@ -1,107 +1,40 @@
1
  import streamlit as st
2
- import uuid
3
  import os
4
- import re
5
- from pydantic import BaseModel, Field
6
-
7
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
- from langchain_core.messages import BaseMessage
9
- from langchain_core.runnables.history import RunnableWithMessageHistory
10
- from langchain_core.runnables import ConfigurableFieldSpec
11
- from langchain_core.chat_history import BaseChatMessageHistory
12
  from langchain_groq import ChatGroq
 
13
 
14
- # --- Load environment variables from .env ---
15
- groq_api_key = os.environ["GROQ_API_KEY"]
16
-
17
-
18
- # --- In-memory session-based history ---
19
- class InMemoryHistory(BaseChatMessageHistory, BaseModel):
20
- messages: list[BaseMessage] = Field(default_factory=list)
21
-
22
- def add_messages(self, messages: list[BaseMessage]) -> None:
23
- self.messages.extend(messages)
24
 
25
- def clear(self) -> None:
26
- self.messages = []
27
-
28
- store = {}
29
- def get_by_session_id(session_id: str) -> BaseChatMessageHistory:
30
- if session_id not in store:
31
- store[session_id] = InMemoryHistory()
32
- return store[session_id]
33
 
34
- # --- Prompt Template ---
35
  prompt = ChatPromptTemplate.from_messages([
36
- ("system", "You are an intelligent AI bot which Loves chatting with people. You will answer them in a friendly tone. Try your best to solve the user's problem. Never talk about sensitive content"),
37
- ("system", "If user asks about whats your purpose, you can say your a friendly bot happy to talk to the person"),
38
- ("system", "If user asks about who made you: you were made by Merwin"),
39
- MessagesPlaceholder(variable_name="history"),
40
  ("human", "{question}")
41
  ])
42
 
43
  model = ChatGroq(
44
  groq_api_key=groq_api_key,
45
- model_name="llama-3.1-8b-instant"
46
  )
47
 
48
- chain = prompt | model
49
-
50
- # --- Chain with History Management ---
51
- chain_with_history = RunnableWithMessageHistory(
52
- chain,
53
- get_session_history=get_by_session_id,
54
- input_messages_key="question",
55
- history_messages_key="history",
56
- history_factory_config=[
57
- ConfigurableFieldSpec(
58
- id="session_id",
59
- annotation=str,
60
- name="Session ID",
61
- description="Conversation session ID.",
62
- default="default-session",
63
- is_shared=True,
64
- )
65
- ]
66
- )
67
-
68
- # --- Streamlit UI ---
69
  st.set_page_config(page_title="Mercy AI", layout="centered")
70
  st.title("🤖 Merwin's AI — Your AI Companion")
71
  st.subheader("Your journey matters. I'm here to listen, support, and guide you.")
72
 
73
- session_id = str(uuid.uuid4()) # You can also use a static ID if preferred.
74
-
75
- st.session_state['session_id'] = session_id
76
-
77
- # --- Input Validation ---
78
- def is_valid_name(name):
79
- return bool(re.fullmatch(r"[A-Za-z\s]{2,50}", name.strip()))
80
-
81
- def is_valid_age(age_str):
82
- return age_str.isdigit() and 5 <= int(age_str) <= 100
83
-
84
-
85
- # --- Chat History Setup ---
86
- if "chat_history" not in st.session_state:
87
- st.session_state.chat_history = []
88
-
89
  st.markdown("Type your question below:")
90
 
91
  user_input = st.chat_input("Ask something...")
92
 
93
  if user_input:
94
  with st.spinner("Processing... Please wait."):
95
- response = chain_with_history.invoke(
96
- {
97
- "question": user_input
98
- },
99
- config={"configurable": {"session_id": session_id}}
100
- )
101
- st.session_state.chat_history.append(("user", user_input))
102
- st.session_state.chat_history.append(("bot", response.content))
103
-
104
- # --- Display chat messages ---
105
- for role, msg in st.session_state.chat_history:
106
- with st.chat_message("user" if role == "user" else "assistant"):
107
- st.markdown(msg)
 
1
  import streamlit as st
 
2
  import os
3
+ from langchain_core.prompts import ChatPromptTemplate
 
 
 
 
 
 
 
4
  from langchain_groq import ChatGroq
5
+ from dotenv import load_dotenv
6
 
7
+ load_dotenv()
 
 
 
 
 
 
 
 
 
8
 
9
+ groq_api_key = os.environ["GROQ_API_KEY"]
10
+ groq_model = os.environ["MODEL_NAME"]
 
 
 
 
 
 
11
 
 
12
  prompt = ChatPromptTemplate.from_messages([
13
+ ("system", "You are an intelligent AI bot that loves chatting with people. Answer in a friendly tone and do your best to help. Avoid sensitive content."),
14
+ ("system", "If user asks your purpose, say you're a friendly bot happy to talk."),
15
+ ("system", "If user asks who made you: you were made by Merwin Pinto, A talented student of Vishwakarma University."),
 
16
  ("human", "{question}")
17
  ])
18
 
19
  model = ChatGroq(
20
  groq_api_key=groq_api_key,
21
+ model_name=groq_model
22
  )
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  st.set_page_config(page_title="Mercy AI", layout="centered")
25
  st.title("🤖 Merwin's AI — Your AI Companion")
26
  st.subheader("Your journey matters. I'm here to listen, support, and guide you.")
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  st.markdown("Type your question below:")
29
 
30
  user_input = st.chat_input("Ask something...")
31
 
32
  if user_input:
33
  with st.spinner("Processing... Please wait."):
34
+ chain = prompt | model
35
+ response = chain.invoke({"question": user_input})
36
+
37
+ with st.chat_message("user"):
38
+ st.markdown(user_input)
39
+ with st.chat_message("assistant"):
40
+ st.markdown(response.content)