Mpavan45 commited on
Commit
adf1127
Β·
verified Β·
1 Parent(s): c07da2a

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -157
app.py DELETED
@@ -1,157 +0,0 @@
1
- import streamlit as st
2
- import zipfile
3
-
4
- from langchain_community.vectorstores import Chroma
5
-
6
- from langchain_google_genai import ChatGoogleGenerativeAI
7
- from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
8
- from langchain.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
9
- from langchain_core.messages import HumanMessage, AIMessage
10
- from langchain.schema.output_parser import StrOutputParser
11
-
12
- # --- Streamlit Setup ---
13
- st.set_page_config(page_title="πŸ“Š ITC Financial Analyst AI", layout="wide")
14
-
15
- # Custom CSS for better UI
16
- st.markdown("""
17
- <style>
18
- .main { background-color: #f9f9f9; }
19
- .block-container {
20
- padding-top: 2rem;
21
- padding-bottom: 2rem;
22
- }
23
- .stChatMessage {
24
- background-color: #ffffff;
25
- border: 1px solid #e0e0e0;
26
- padding: 1rem;
27
- border-radius: 12px;
28
- margin-bottom: 1rem;
29
- }
30
- .stButton button {
31
- background-color: #FF6347 !important;
32
- color: white !important;
33
- border-radius: 8px !important;
34
- font-weight: 600;
35
- }
36
- .source-box {
37
- background-color: #f0f0f0;
38
- border-left: 5px solid #555;
39
- padding: 0.5rem;
40
- margin-top: 0.5rem;
41
- border-radius: 8px;
42
- font-size: 0.9rem;
43
- }
44
- </style>
45
- """, unsafe_allow_html=True)
46
-
47
- st.title("πŸ“Š ITC Financial Analysis with AI-Powered Insights")
48
-
49
- # Chat history buffer
50
- memory_buffer = {"chat_history": []}
51
-
52
- # Clear history
53
- st.sidebar.markdown("## πŸ› οΈ Options")
54
- if st.sidebar.button("πŸ” End Chat"):
55
- memory_buffer["chat_history"] = []
56
-
57
- # Load Chroma vector DB from ZIP
58
- with zipfile.ZipFile('chroma_db1.zip', 'r') as zip_ref:
59
- zip_ref.extractall('chroma_db')
60
-
61
- # Vector embeddings
62
- embedding = HuggingFaceEmbeddings(model_name='all-MiniLM-L6-v2')
63
- vectorstore = Chroma(persist_directory='chroma_db', embedding_function=embedding)
64
- mmr_retriever = vectorstore.as_retriever(search_type="mmr", search_kwargs={"k": 3, "lambda_mult": 1})
65
-
66
- # Helper functions
67
- def format_docs(docs):
68
- return "\n\n".join(doc.page_content for doc in docs)
69
-
70
- def get_docs_and_context(question):
71
- docs = mmr_retriever.get_relevant_documents(question)
72
- return {"question": question, "docs": docs, "context": format_docs(docs)}
73
-
74
- # Prompt setup
75
- parallel_chain = RunnableLambda(lambda x: {
76
- "question": x["input"],
77
- **get_docs_and_context(x["input"])
78
- })
79
-
80
- chat_prompt = ChatPromptTemplate.from_messages([
81
- ("system",
82
- """
83
- You are a domain-specific AI financial analyst focused on company-level performance evaluation.
84
-
85
- Your task is to analyze and respond to user financial queries *strictly based on the provided transcript data*: {context}.
86
-
87
- Rules:
88
- 1. ONLY extract facts, figures, and insights that are explicitly available in the transcript.
89
- 2. If data is *missing or partially available*, clearly state: "The required data is not available in the current transcript." Then provide a generic but relevant explanation based on standard financial principles.
90
- 3. Maintain numerical accuracy and avoid interpretation beyond data boundaries.
91
- 4. Prioritize answers relevant to *ITC Ltd.*, but keep response format adaptable to other firms and fiscal years.
92
- 5. Clearly present year-wise or metric-wise insights using bullet points or structured formats if applicable.
93
-
94
- Your goals:
95
- - Ensure 100% fidelity to source transcript.
96
- - Do not assume or hallucinate missing numbers.
97
- - Use clear, reproducible reasoning steps (e.g., show which line items support your conclusion).
98
- - Output should be modular enough to scale across other companies and time periods.
99
-
100
- Respond only to this question from the user.
101
- """),
102
- MessagesPlaceholder(variable_name="chat_history", optional=True),
103
- ("human", "{input}")
104
- ])
105
-
106
- GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
107
- llm = ChatGoogleGenerativeAI(api_key=GOOGLE_API_KEY, model="gemini-2.0-flash-exp", temperature=1)
108
- parser = StrOutputParser()
109
-
110
- def get_history_from_buffer(_):
111
- return memory_buffer['chat_history']
112
-
113
- runnable_get_history_from_buffer = RunnableLambda(get_history_from_buffer)
114
-
115
- main_chain = (
116
- parallel_chain |
117
- RunnableLambda(lambda x: {
118
- "llm_input": {"input": x["question"], "context": x["context"]},
119
- "docs": x["docs"]
120
- }) |
121
- RunnableLambda(lambda x: {
122
- "result": (chat_prompt | llm | parser).invoke(x["llm_input"]),
123
- "source_documents": x["docs"]
124
- })
125
- )
126
-
127
- chain = RunnablePassthrough.assign(chat_history=runnable_get_history_from_buffer) | main_chain
128
-
129
- # Chat history UI
130
- st.markdown("### πŸ’¬ Conversation")
131
- for msg in memory_buffer["chat_history"]:
132
- role = "user" if isinstance(msg, HumanMessage) else "assistant"
133
- with st.chat_message(role):
134
- st.markdown(msg.content)
135
-
136
- # Chat input
137
- user_input = st.chat_input("Ask about ITC’s performance or any financial metric...")
138
-
139
- if user_input:
140
- with st.chat_message("user"):
141
- st.markdown(user_input)
142
-
143
- memory_buffer["chat_history"].append(HumanMessage(content=user_input))
144
- output = chain.invoke({"input": user_input})
145
- ai_response = output["result"]
146
-
147
- memory_buffer["chat_history"].append(AIMessage(content=ai_response))
148
-
149
- with st.chat_message("assistant"):
150
- st.markdown(ai_response)
151
-
152
- # Show source documents
153
- if output.get("source_documents"):
154
- st.markdown("**Sources:**")
155
- for doc in output["source_documents"]:
156
- source = doc.metadata.get("source", "Unknown document")
157
- st.markdown(f"<div class='source-box'>πŸ“„ {source}</div>", unsafe_allow_html=True)