Mpavan45 commited on
Commit
c07da2a
Β·
verified Β·
1 Parent(s): d7e3001

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +148 -30
src/streamlit_app.py CHANGED
@@ -1,40 +1,158 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
 
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
 
 
12
 
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
 
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
 
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
 
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  })
32
 
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import zipfile
3
 
4
+ # from langchain_community.embeddings import HuggingFaceEmbeddings
5
+ from langchain_community.vectorstores import Chroma
6
 
7
+ from langchain_google_genai import ChatGoogleGenerativeAI
8
+ from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
9
+ from langchain.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
10
+ from langchain_core.messages import HumanMessage, AIMessage
11
+ from langchain.schema.output_parser import StrOutputParser
12
 
13
+ # --- Streamlit Setup ---
14
+ st.set_page_config(page_title="πŸ“Š ITC Financial Analyst AI", layout="wide")
15
 
16
+ # Custom CSS for better UI
17
+ st.markdown("""
18
+ <style>
19
+ .main { background-color: #f9f9f9; }
20
+ .block-container {
21
+ padding-top: 2rem;
22
+ padding-bottom: 2rem;
23
+ }
24
+ .stChatMessage {
25
+ background-color: #ffffff;
26
+ border: 1px solid #e0e0e0;
27
+ padding: 1rem;
28
+ border-radius: 12px;
29
+ margin-bottom: 1rem;
30
+ }
31
+ .stButton button {
32
+ background-color: #FF6347 !important;
33
+ color: white !important;
34
+ border-radius: 8px !important;
35
+ font-weight: 600;
36
+ }
37
+ .source-box {
38
+ background-color: #f0f0f0;
39
+ border-left: 5px solid #555;
40
+ padding: 0.5rem;
41
+ margin-top: 0.5rem;
42
+ border-radius: 8px;
43
+ font-size: 0.9rem;
44
+ }
45
+ </style>
46
+ """, unsafe_allow_html=True)
47
 
48
+ st.title("πŸ“Š ITC Financial Analysis with AI-Powered Insights")
 
 
49
 
50
+ # Chat history buffer
51
+ memory_buffer = {"chat_history": []}
52
 
53
+ # Clear history
54
+ st.sidebar.markdown("## πŸ› οΈ Options")
55
+ if st.sidebar.button("πŸ” End Chat"):
56
+ memory_buffer["chat_history"] = []
57
+
58
+ # Load Chroma vector DB from ZIP
59
+ with zipfile.ZipFile('chroma_db1.zip', 'r') as zip_ref:
60
+ zip_ref.extractall('chroma_db')
61
+
62
+ # Vector embeddings
63
+ embedding = HuggingFaceEmbeddings(model_name='all-MiniLM-L6-v2')
64
+ vectorstore = Chroma(persist_directory='chroma_db', embedding_function=embedding)
65
+ mmr_retriever = vectorstore.as_retriever(search_type="mmr", search_kwargs={"k": 3, "lambda_mult": 1})
66
+
67
+ # Helper functions
68
+ def format_docs(docs):
69
+ return "\n\n".join(doc.page_content for doc in docs)
70
+
71
+ def get_docs_and_context(question):
72
+ docs = mmr_retriever.get_relevant_documents(question)
73
+ return {"question": question, "docs": docs, "context": format_docs(docs)}
74
+
75
+ # Prompt setup
76
+ parallel_chain = RunnableLambda(lambda x: {
77
+ "question": x["input"],
78
+ **get_docs_and_context(x["input"])
79
  })
80
 
81
+ chat_prompt = ChatPromptTemplate.from_messages([
82
+ ("system",
83
+ """
84
+ You are a domain-specific AI financial analyst focused on company-level performance evaluation.
85
+
86
+ Your task is to analyze and respond to user financial queries *strictly based on the provided transcript data*: {context}.
87
+
88
+ Rules:
89
+ 1. ONLY extract facts, figures, and insights that are explicitly available in the transcript.
90
+ 2. If data is *missing or partially available*, clearly state: "The required data is not available in the current transcript." Then provide a generic but relevant explanation based on standard financial principles.
91
+ 3. Maintain numerical accuracy and avoid interpretation beyond data boundaries.
92
+ 4. Prioritize answers relevant to *ITC Ltd.*, but keep response format adaptable to other firms and fiscal years.
93
+ 5. Clearly present year-wise or metric-wise insights using bullet points or structured formats if applicable.
94
+
95
+ Your goals:
96
+ - Ensure 100% fidelity to source transcript.
97
+ - Do not assume or hallucinate missing numbers.
98
+ - Use clear, reproducible reasoning steps (e.g., show which line items support your conclusion).
99
+ - Output should be modular enough to scale across other companies and time periods.
100
+
101
+ Respond only to this question from the user.
102
+ """),
103
+ MessagesPlaceholder(variable_name="chat_history", optional=True),
104
+ ("human", "{input}")
105
+ ])
106
+
107
+ GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
108
+ llm = ChatGoogleGenerativeAI(api_key=GOOGLE_API_KEY, model="gemini-2.0-flash-exp", temperature=1)
109
+ parser = StrOutputParser()
110
+
111
+ def get_history_from_buffer(_):
112
+ return memory_buffer['chat_history']
113
+
114
+ runnable_get_history_from_buffer = RunnableLambda(get_history_from_buffer)
115
+
116
+ main_chain = (
117
+ parallel_chain |
118
+ RunnableLambda(lambda x: {
119
+ "llm_input": {"input": x["question"], "context": x["context"]},
120
+ "docs": x["docs"]
121
+ }) |
122
+ RunnableLambda(lambda x: {
123
+ "result": (chat_prompt | llm | parser).invoke(x["llm_input"]),
124
+ "source_documents": x["docs"]
125
+ })
126
+ )
127
+
128
+ chain = RunnablePassthrough.assign(chat_history=runnable_get_history_from_buffer) | main_chain
129
+
130
+ # Chat history UI
131
+ st.markdown("### πŸ’¬ Conversation")
132
+ for msg in memory_buffer["chat_history"]:
133
+ role = "user" if isinstance(msg, HumanMessage) else "assistant"
134
+ with st.chat_message(role):
135
+ st.markdown(msg.content)
136
+
137
+ # Chat input
138
+ user_input = st.chat_input("Ask about ITC’s performance or any financial metric...")
139
+
140
+ if user_input:
141
+ with st.chat_message("user"):
142
+ st.markdown(user_input)
143
+
144
+ memory_buffer["chat_history"].append(HumanMessage(content=user_input))
145
+ output = chain.invoke({"input": user_input})
146
+ ai_response = output["result"]
147
+
148
+ memory_buffer["chat_history"].append(AIMessage(content=ai_response))
149
+
150
+ with st.chat_message("assistant"):
151
+ st.markdown(ai_response)
152
+
153
+ # Show source documents
154
+ if output.get("source_documents"):
155
+ st.markdown("**Sources:**")
156
+ for doc in output["source_documents"]:
157
+ source = doc.metadata.get("source", "Unknown document")
158
+ st.markdown(f"<div class='source-box'>πŸ“„ {source}</div>", unsafe_allow_html=True)