leomini commited on
Commit
4159183
Β·
verified Β·
1 Parent(s): 58af20d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +217 -134
app.py CHANGED
@@ -1,134 +1,217 @@
1
- import streamlit as st
2
- import os
3
- import requests
4
- import time
5
-
6
- # Config
7
- CHUNKS_FILE = "chunks.txt" # Updated to match Dockerfile structure
8
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "AIzaSyDteeiTCZIt9J-NntBUrdWLG3WuXGhules")
9
- GEMINI_API_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
10
- MAX_CONTEXT_LENGTH = 1000
11
- MAX_RESPONSE_LENGTH = 300
12
-
13
- # Load chunks
14
- def load_chunks(chunks_file):
15
- chunks = []
16
- try:
17
- with open(chunks_file, 'r', encoding='utf-8') as file:
18
- current_chunk = ""
19
- for line in file:
20
- if line.startswith("Chunk"):
21
- if current_chunk:
22
- chunks.append(current_chunk.strip())
23
- current_chunk = ""
24
- else:
25
- current_chunk += line
26
- if current_chunk:
27
- chunks.append(current_chunk.strip())
28
- return chunks
29
- except Exception as e:
30
- st.error(f"⚠️ Error loading chunks: {e}")
31
- return []
32
-
33
- # Basic keyword search
34
- def search_messages(query, chunks, top_k=3):
35
- query_words = set(query.lower().split())
36
- scores = []
37
- for chunk in chunks:
38
- chunk_words = set(chunk.lower().split())
39
- match_count = len(query_words.intersection(chunk_words))
40
- score = match_count / max(len(chunk_words), 1)
41
- scores.append((score, chunk))
42
- scores.sort(reverse=True)
43
- return [chunk for _, chunk in scores[:top_k]]
44
-
45
- # Call Gemini
46
- def generate_response(query, chunks):
47
- try:
48
- context = "\n".join(chunks)[:MAX_CONTEXT_LENGTH]
49
- prompt = f"""
50
- You are a professional customer support assistant. You resolve user issues by analyzing previous customer interactions and providing clear, helpful, and empathetic responses.
51
-
52
- Instructions:
53
- - Use the provided chat history as your internal knowledge base.
54
- - Do not mention or reference the history directly.
55
- - Understand recurring issues and recognize patterns from similar past cases.
56
- - For the given user query:
57
- - Greet and acknowledge the concern professionally.
58
- - Suggest a solution or steps, based on insights from similar historical interactions.
59
- - If the solution is uncertain, offer best practices or next steps.
60
- - End with a polite closing and an offer for further help.
61
- - don't mention about past history or previous tickets.
62
-
63
- Chat History:
64
- {context}
65
-
66
- User Query:
67
- "{query}"
68
-
69
- Your Response:
70
- """.strip()
71
-
72
- headers = {
73
- "Content-Type": "application/json",
74
- "X-goog-api-key": GEMINI_API_KEY
75
- }
76
-
77
- data = {
78
- "contents": [{"parts": [{"text": prompt}]}],
79
- "generationConfig": {"maxOutputTokens": MAX_RESPONSE_LENGTH}
80
- }
81
-
82
- response = requests.post(GEMINI_API_URL, headers=headers, json=data)
83
- response.raise_for_status()
84
- response_data = response.json()
85
- return response_data["candidates"][0]["content"]["parts"][0]["text"].strip()
86
-
87
- except Exception as e:
88
- return f"⚠️ Error generating response: {e}"
89
-
90
- # App UI
91
- def main():
92
- st.set_page_config(page_title=" Support Assistant", layout="centered")
93
- st.title("βœ… Assistant βœ…")
94
- st.caption("Submit support questions that are related to previously resolved tickets to ensure efficient and accurate assistance")
95
-
96
- # Load chunks and history
97
- if "chunks" not in st.session_state:
98
- st.session_state.chunks = load_chunks(CHUNKS_FILE)
99
- if "messages" not in st.session_state:
100
- st.session_state.messages = []
101
-
102
- # Show chat history
103
- for message in st.session_state.messages:
104
- role, content = message["role"], message["content"]
105
- with st.chat_message("user" if role == "user" else "assistant"):
106
- st.markdown(content)
107
- if role == "assistant":
108
- with st.expander("πŸ“‹ Copy Response"):
109
- st.code(content, language="markdown")
110
-
111
- # User input
112
- user_input = st.chat_input("Type your support question here...")
113
-
114
- if user_input:
115
- # Display user message
116
- with st.chat_message("user"):
117
- st.markdown(user_input)
118
- st.session_state.messages.append({"role": "user", "content": user_input})
119
-
120
- # Show bot is thinking...
121
- with st.chat_message("assistant"):
122
- with st.spinner("🧠 Thinking..."):
123
- relevant_chunks = search_messages(user_input, st.session_state.chunks)
124
- bot_reply = generate_response(user_input, relevant_chunks)
125
- time.sleep(0.5) # simulate delay
126
- st.markdown(bot_reply)
127
- with st.expander("πŸ“‹ Copy Response"):
128
- st.code(bot_reply, language="markdown")
129
-
130
- # Save bot reply
131
- st.session_state.messages.append({"role": "assistant", "content": bot_reply})
132
-
133
- if __name__ == "__main__":
134
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import requests
4
+ import time
5
+
6
+ # Config
7
+ CHUNKS_FILE = "chunks.txt"
8
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "AIzaSyDteeiTCZIt9J-NntBUrdWLG3WuXGhules")
9
+ GEMINI_API_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
10
+ MAX_CONTEXT_LENGTH = 1000
11
+ MAX_RESPONSE_LENGTH = 300
12
+
13
+ # Load chunks for answer mode
14
+ def load_chunks(chunks_file):
15
+ chunks = []
16
+ try:
17
+ with open(chunks_file, 'r', encoding='utf-8') as file:
18
+ current_chunk = ""
19
+ for line in file:
20
+ if line.startswith("Chunk"):
21
+ if current_chunk:
22
+ chunks.append(current_chunk.strip())
23
+ current_chunk = ""
24
+ else:
25
+ current_chunk += line
26
+ if current_chunk:
27
+ chunks.append(current_chunk.strip())
28
+ return chunks
29
+ except Exception as e:
30
+ st.error(f"⚠️ Error loading chunks: {e}")
31
+ return []
32
+
33
+ # Search relevant chat chunks
34
+ def search_messages(query, chunks, top_k=3):
35
+ query_words = set(query.lower().split())
36
+ scores = []
37
+ for chunk in chunks:
38
+ chunk_words = set(chunk.lower().split())
39
+ match_count = len(query_words.intersection(chunk_words))
40
+ score = match_count / max(len(chunk_words), 1)
41
+ scores.append((score, chunk))
42
+ scores.sort(reverse=True)
43
+ return [chunk for _, chunk in scores[:top_k]]
44
+
45
+ # Generate answer from Gemini
46
+ def generate_response(query, chunks):
47
+ try:
48
+ context = "\n".join(chunks)[:MAX_CONTEXT_LENGTH]
49
+ prompt = f"""
50
+ You are a professional customer support assistant. You resolve user issues by analyzing previous customer interactions and providing clear, helpful, and empathetic responses.
51
+
52
+ Instructions:
53
+ - Use the provided chat history as your internal knowledge base.
54
+ - Do not mention or reference the history directly.
55
+ - Understand recurring issues and recognize patterns from similar past cases.
56
+ - For the given user query:
57
+ - Greet and acknowledge the concern professionally.
58
+ - Suggest a solution or steps, based on insights from similar historical interactions.
59
+ - If the solution is uncertain, offer best practices or next steps.
60
+ - End with a polite closing and an offer for further help.
61
+ - Don't mention about past history or previous tickets.
62
+
63
+ Chat History:
64
+ {context}
65
+
66
+ User Query:
67
+ "{query}"
68
+
69
+ Your Response:
70
+ """.strip()
71
+
72
+ headers = {
73
+ "Content-Type": "application/json",
74
+ "X-goog-api-key": GEMINI_API_KEY
75
+ }
76
+
77
+ data = {
78
+ "contents": [{"parts": [{"text": prompt}]}],
79
+ "generationConfig": {"maxOutputTokens": MAX_RESPONSE_LENGTH}
80
+ }
81
+
82
+ response = requests.post(GEMINI_API_URL, headers=headers, json=data)
83
+ response.raise_for_status()
84
+ response_data = response.json()
85
+ return response_data["candidates"][0]["content"]["parts"][0]["text"].strip()
86
+
87
+ except Exception as e:
88
+ return f"⚠️ Error generating response: {e}"
89
+
90
+ # Format instruction based on example
91
+ def format_instruction(raw_input):
92
+ try:
93
+ example_input = """MOSTK014_P11
94
+ HOME RETURN error"""
95
+
96
+ example_output = """Dear @Fa,
97
+
98
+ Kindly check the issue on MOSTK014_P11 - we are encountering a HOME RETURN FAIL ERROR.
99
+
100
+ Please assist at your earliest convenience.
101
+
102
+ Thank you for your support."""
103
+
104
+ prompt = f"""
105
+ You are a professional support coordinator.
106
+
107
+ When given a raw technical input, you must convert it into a polite, professional 4–5 line instruction message, similar in tone and format to the example below.
108
+
109
+ --- EXAMPLE ---
110
+ Raw Input:
111
+ {example_input}
112
+
113
+ Formatted Output:
114
+ {example_output}
115
+ --- END EXAMPLE ---
116
+ Rules:
117
+ - always use a greeting and closing
118
+ - always team Dear @Fa,
119
+
120
+ Now format this new input the same way:
121
+
122
+ Raw Input:
123
+ {raw_input}
124
+
125
+ Formatted Output:
126
+ """.strip()
127
+
128
+ headers = {
129
+ "Content-Type": "application/json",
130
+ "X-goog-api-key": GEMINI_API_KEY
131
+ }
132
+
133
+ data = {
134
+ "contents": [{"parts": [{"text": prompt}]}],
135
+ "generationConfig": {
136
+ "maxOutputTokens": 200,
137
+ "temperature": 0.7
138
+ }
139
+ }
140
+
141
+ response = requests.post(GEMINI_API_URL, headers=headers, json=data)
142
+ response.raise_for_status()
143
+ response_data = response.json()
144
+ return response_data["candidates"][0]["content"]["parts"][0]["text"].strip()
145
+
146
+ except Exception as e:
147
+ return f"⚠️ Error formatting instruction: {e}"
148
+
149
+ # Main app
150
+ def main():
151
+ st.set_page_config(page_title="Support Assistant", layout="centered")
152
+ st.title("πŸ€– AssistEdge πŸ‘‡")
153
+ st.caption("AssistEdge is your intelligent frontline support companion β€” blending historical insights with real-time AI to deliver clear, empathetic, and professional responses. Whether you're resolving recurring issues or crafting polished technical instructions, AssistEdge keeps the conversation flowing with memory-aware chat and smart formatting.")
154
+
155
+ # Mode switcher
156
+ mode = st.radio("Select Mode:", ["Instruction Formatter","Answer Mode (Chat)"], horizontal=True)
157
+
158
+ # Initialize session states
159
+ if "chunks" not in st.session_state:
160
+ st.session_state.chunks = load_chunks(CHUNKS_FILE)
161
+ if "answer_messages" not in st.session_state:
162
+ st.session_state.answer_messages = []
163
+ if "format_messages" not in st.session_state:
164
+ st.session_state.format_messages = []
165
+
166
+ if mode == "Answer Mode (Chat)":
167
+ # Show previous chat messages
168
+ for msg in st.session_state.answer_messages:
169
+ with st.chat_message(msg["role"]):
170
+ st.markdown(msg["content"])
171
+ if msg["role"] == "assistant":
172
+ with st.expander("πŸ“‹ Copy Response"):
173
+ st.code(msg["content"], language="markdown")
174
+
175
+ # Chat input
176
+ user_input = st.chat_input("Type your support question here...")
177
+ if user_input:
178
+ st.chat_message("user").markdown(user_input)
179
+ st.session_state.answer_messages.append({"role": "user", "content": user_input})
180
+
181
+ with st.chat_message("assistant"):
182
+ with st.spinner("🧠 Thinking..."):
183
+ relevant_chunks = search_messages(user_input, st.session_state.chunks)
184
+ bot_reply = generate_response(user_input, relevant_chunks)
185
+ time.sleep(0.5)
186
+ st.markdown(bot_reply)
187
+ with st.expander("πŸ“‹ Copy Response"):
188
+ st.code(bot_reply, language="markdown")
189
+
190
+ st.session_state.answer_messages.append({"role": "assistant", "content": bot_reply})
191
+
192
+ else:
193
+ # Show formatter message history
194
+ for msg in st.session_state.format_messages:
195
+ with st.chat_message(msg["role"]):
196
+ st.markdown(msg["content"])
197
+ if msg["role"] == "assistant":
198
+ with st.expander("πŸ“‹ Copy Instruction"):
199
+ st.code(msg["content"], language="markdown")
200
+
201
+ formatter_input = st.chat_input("Enter raw error or instruction to format...")
202
+ if formatter_input:
203
+ st.chat_message("user").markdown(formatter_input)
204
+ st.session_state.format_messages.append({"role": "user", "content": formatter_input})
205
+
206
+ with st.chat_message("assistant"):
207
+ with st.spinner("πŸ“„ Formatting..."):
208
+ formatted = format_instruction(formatter_input)
209
+ time.sleep(0.5)
210
+ st.markdown(formatted)
211
+ with st.expander("πŸ“‹ Copy Instruction"):
212
+ st.code(formatted, language="markdown")
213
+
214
+ st.session_state.format_messages.append({"role": "assistant", "content": formatted})
215
+
216
+ if __name__ == "__main__":
217
+ main()