Krish-05 commited on
Commit
8e43129
·
verified ·
1 Parent(s): 0bc4633

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +34 -257
streamlit_app.py CHANGED
@@ -1,292 +1,69 @@
1
  import streamlit as st
2
  import requests
3
  import json
4
- import time # For simulating loading
5
 
6
- # Configuration for the FastAPI backend
7
- # When running in Docker, 'localhost' from Streamlit's perspective
8
- # refers to the container itself.
9
- FASTAPI_HOST = "localhost"
10
- FASTAPI_PORT = 7860
11
- FASTAPI_URL = f"http://{FASTAPI_HOST}:{FASTAPI_PORT}/ask"
12
 
13
  st.set_page_config(page_title="Ollama AI Assistant", page_icon="🤖", layout="wide")
14
 
15
- # --- Custom CSS for styling (mimicking Tailwind where possible) ---
16
- # Streamlit doesn't directly support Tailwind classes on its native widgets,
17
- # so we'll inject custom CSS to try and match the look.
18
- # For elements rendered via st.markdown with unsafe_allow_html=True,
19
- # we can use the provided Tailwind CDN.
20
- st.markdown(
21
- """
22
- <head>
23
- <link rel="preconnect" href="https://fonts.gstatic.com/" crossorigin />
24
- <link
25
- rel="stylesheet"
26
- as="style"
27
- onload="this.rel='stylesheet'"
28
- href="https://fonts.googleapis.com/css2?display=swap&family=Inter:wght@400;500;700;900&family=Noto+Sans:wght@400;500;700;900"
29
- />
30
- <script src="https://cdn.tailwindcss.com?plugins=forms,container-queries"></script>
31
- <style>
32
- /* Apply Inter font to the whole body for consistency */
33
- body {
34
- font-family: 'Inter', 'Noto Sans', sans-serif;
35
- }
36
- /* Custom styling for Streamlit's native text area and button to blend in */
37
- .stTextArea > div > div > textarea {
38
- border-radius: 0.5rem; /* rounded-lg */
39
- background-color: #f0f2f4; /* bg-[#f0f2f4] */
40
- color: #111418; /* text-[#111418] */
41
- border: none;
42
- padding: 1rem; /* px-4 py-3 */
43
- font-size: 1rem; /* text-base */
44
- line-height: 1.5rem; /* leading-normal */
45
- resize: vertical; /* Allow vertical resize */
46
- }
47
- .stButton > button {
48
- border-radius: 0.5rem; /* rounded-lg */
49
- background-color: #197fe5; /* bg-[#197fe5] */
50
- color: white; /* text-white */
51
- font-weight: 500; /* font-medium */
52
- padding: 0.5rem 1rem; /* px-4 py-2 */
53
- font-size: 0.875rem; /* text-sm */
54
- line-height: 1.25rem; /* leading-normal */
55
- border: none;
56
- cursor: pointer;
57
- transition: background-color 0.2s;
58
- }
59
- .stButton > button:hover {
60
- background-color: #156ac0; /* Darker shade on hover */
61
- }
62
-
63
- /* Custom styling for Streamlit's success/error/info messages */
64
- .stAlert {
65
- border-radius: 0.5rem;
66
- padding: 1rem;
67
- margin-bottom: 1rem;
68
- }
69
- .stAlert.success {
70
- background-color: #d4edda;
71
- color: #155724;
72
- border-color: #c3e6cb;
73
- }
74
- .stAlert.info {
75
- background-color: #d1ecf1;
76
- color: #0c5460;
77
- border-color: #bee5eb;
78
- }
79
- .stAlert.error {
80
- background-color: #f8d7da;
81
- color: #721c24;
82
- border-color: #f5c6cb;
83
- }
84
- .stAlert.warning {
85
- background-color: #fff3cd;
86
- color: #856404;
87
- border-color: #ffeeba;
88
- }
89
-
90
- /* Main layout container adjustments for Streamlit */
91
- .main .block-container {
92
- padding-left: 2.5rem; /* px-10 */
93
- padding-right: 2.5rem; /* px-10 */
94
- padding-top: 1.25rem; /* py-5 */
95
- padding-bottom: 1.25rem; /* py-5 */
96
- max-width: 960px; /* max-w-[960px] */
97
- margin-left: auto;
98
- margin-right: auto;
99
- }
100
- .reportview-container .main {
101
- background-color: white;
102
- }
103
- </style>
104
- </head>
105
- """,
106
- unsafe_allow_html=True
107
- )
108
-
109
- # --- Initialize chat history in session state ---
110
  if 'chat_history' not in st.session_state:
111
  st.session_state.chat_history = [
112
  {"role": "assistant", "message": "Hello! How can I assist you today?"}
113
  ]
114
 
115
- # --- Header Section ---
116
- st.markdown(
117
- """
118
- <div class="relative flex size-full min-h-screen flex-col bg-white group/design-root overflow-x-hidden" style='font-family: Inter, "Noto Sans", sans-serif;'>
119
- <div class="layout-container flex h-full grow flex-col">
120
- <header class="flex items-center justify-between whitespace-nowrap border-b border-solid border-b-[#f0f2f4] px-10 py-3">
121
- <div class="flex items-center gap-4 text-[#111418]">
122
- <div class="size-4">
123
- <svg viewBox="0 0 48 48" fill="currentColor" xmlns="http://www.w3.org/2000/svg">
124
- <path
125
- d="M42.4379 44C42.4379 44 36.0744 33.9038 41.1692 24C46.8624 12.9336 42.2078 4 42.2078 4L7.01134 4C7.01134 4 11.6577 12.932 5.96912 23.9969C0.876273 33.9029 7.27094 44 7.27094 44L42.4379 44Z"
126
- ></path>
127
- </svg>
128
- </div>
129
- <h2 class="text-[#111418] text-lg font-bold leading-tight tracking-[-0.015em]">Ollama AI Assistant</h2>
130
- </div>
131
- <div class="flex flex-1 justify-end gap-8">
132
- <div class="flex items-center gap-9">
133
- <a class="text-[#111418] text-sm font-medium leading-normal" href="#">Home</a>
134
- <a class="text-[#111418] text-sm font-medium leading-normal" href="#">About</a>
135
- <a class="text-[#111418] text-sm font-medium leading-normal" href="#">Contact</a>
136
- </div>
137
- <button
138
- class="flex max-w-[480px] cursor-pointer items-center justify-center overflow-hidden rounded-lg h-10 bg-[#f0f2f4] text-[#111418] gap-2 text-sm font-bold leading-normal tracking-[0.015em] min-w-0 px-2.5"
139
- >
140
- <div class="text-[#111418]" data-icon="Question" data-size="20px" data-weight="regular">
141
- <svg xmlns="http://www.w3.org/2000/svg" width="20px" height="20px" fill="currentColor" viewBox="0 0 256 256">
142
- <path
143
- d="M140,180a12,12,0,1,1-12-12A12,12,0,0,1,140,180ZM128,72c-22.06,0-40,16.15-40,36v4a8,8,0,0,0,16,0v-4c0-11,10.77-20,24-20s24,9,24,20-10.77,20-24,20a8,8,0,0,0-8,8v8a8,8,0,0,0,16,0v-.72c18.24-3.35,32-17.9,32-35.28C168,88.15,150.06,72,128,72Zm104,56A104,104,0,1,1,128,24,104.11,104.11,0,0,1,232,128Zm-16,0a88,88,0,1,0-88,88A88.1,88.1,0,0,0,216,128Z"
144
- ></path>
145
- </svg>
146
- </div>
147
- </button>
148
- <div
149
- class="bg-center bg-no-repeat aspect-square bg-cover rounded-full size-10"
150
- style='background-image: url("https://lh3.googleusercontent.com/aida-public/AB6AXuC5KnfW55B9wImWCPuTc-su9IqknYT6UByWvZiMMcykiONoep2xgQzZVy3-rO3Eg5es87eMIdQjykY7rhMDLxqclYZyBbAPBOt2O7w1CwDPv07CBM_xm6bb69JPvr47GHBN8TAJTaPvEsazBM4FdVPdpqOuL0NkEempaK4qxmKrQZdLomFGEolqi6jx2w03c656tRpmsE8dOFAK12G9JbduRuDXSw3EhOQLLNaJZTwpU6TXnmIc0z3Mq3oKR27GeCTKaQvy1Rgin11U");'
151
- ></div>
152
- </div>
153
- </header>
154
- <div class="px-40 flex flex-1 justify-center py-5">
155
- <div class="layout-content-container flex flex-col max-w-[960px] flex-1">
156
- <h2 class="text-[#111418] tracking-light text-[28px] font-bold leading-tight px-4 text-center pb-3 pt-5">Welcome to the Chatbot</h2>
157
- <p class="text-[#111418] text-base font-normal leading-normal pb-3 pt-1 px-4 text-center">
158
- Start chatting with our AI assistant. Type your message below and press send.
159
- </p>
160
- """,
161
- unsafe_allow_html=True
162
- )
163
 
164
- # --- Chat Display Area ---
165
- chat_display_container = st.container()
166
- with chat_display_container:
167
- for chat in st.session_state.chat_history:
168
- if chat["role"] == "assistant":
169
- st.markdown(
170
- f"""
171
- <div class="flex items-end gap-3 p-4">
172
- <div
173
- class="bg-center bg-no-repeat aspect-square bg-cover rounded-full w-10 shrink-0"
174
- style='background-image: url("https://lh3.googleusercontent.com/aida-public/AB6AXuBKHyf9huD3r49CwxpDcgRD4Ks1kSV0CxqU2WM_1p5TIcqUHVdNtLTsHIqBLBWVloYC-Zo9mzohtE_MrpMEcOkMkY0QBmPNSq4BRYd9fn0h3FRJtIesg86T8LbeM3Eq2C4IYsnrGSof88GoEho-X6vJkj9S-csVf7cwDfgUsv6qtacgVPsKIMNAAlLHYxGONGfUaa8U2XNfBPrZ0ieLOsKlK0BDsOG4JH6slntUBVimLXxb7cn96F");'
175
- ></div>
176
- <div class="flex flex-1 flex-col gap-1 items-start">
177
- <p class="text-[#637588] text-[13px] font-normal leading-normal max-w-[360px]">AI Assistant</p>
178
- <p class="text-base font-normal leading-normal flex max-w-[360px] rounded-lg px-4 py-3 bg-[#f0f2f4] text-[#111418]">{chat["message"]}</p>
179
- </div>
180
- </div>
181
- """,
182
- unsafe_allow_html=True
183
- )
184
- else: # role == "user"
185
- st.markdown(
186
- f"""
187
- <div class="flex items-end gap-3 p-4 justify-end">
188
- <div class="flex flex-1 flex-col gap-1 items-end">
189
- <p class="text-[#637588] text-[13px] font-normal leading-normal max-w-[360px] text-right">You</p>
190
- <p class="text-base font-normal leading-normal flex max-w-[360px] rounded-lg px-4 py-3 bg-[#197fe5] text-white">{chat["message"]}</p>
191
- </div>
192
- <div
193
- class="bg-center bg-no-repeat aspect-square bg-cover rounded-full w-10 shrink-0"
194
- style='background-image: url("https://lh3.googleusercontent.com/aida-public/AB6AXuBnmIGyzlGKCR0gv4IOuNULvdR6AgNnZYMSi6whrzgaOZabk8OYAVghLVv8XTaihjo7VHEEhlvA_zxPtn8Tq8gRHOswzwsC2Xy7HQuQ8wlZLcYyTUeJvj5sJwNPtvaqtn9QCAMwymfrao07KZ9EIw1hQT2FU4szlvMuttkBguNWJ1LnxsmQQwhltofj5A48jZcjo8w3ldVkuheoWJ-NMI3m1U-e6_YLtGuPDN_SEQrKSHUyGCdscSL2ZgB9DlrTR8WU8jaHOxcZRldy");'
195
- ></div>
196
- </div>
197
- """,
198
- unsafe_allow_html=True
199
- )
200
 
201
  # --- Input Area ---
202
- # Use a form to handle input and button click
203
  with st.form("chat_form", clear_on_submit=True):
204
- col1, col2 = st.columns([0.9, 0.1])
205
- with col1:
206
- user_prompt = st.text_area(
207
- "Type your message here...",
208
- height=60,
209
- placeholder="e.g., Explain quantum computing in simple terms.",
210
- label_visibility="collapsed", # Hide the default label
211
- key="user_input_text_area" # Add a key for the text area
212
- )
213
- with col2:
214
- # Custom button styling using markdown
215
- send_button_html = """
216
- <button type="submit" class="flex max-w-[480px] cursor-pointer items-center justify-center overflow-hidden rounded-lg h-10 bg-[#197fe5] text-white gap-2 text-sm font-bold leading-normal tracking-[0.015em] min-w-0 px-2.5">
217
- <span class="truncate">Send</span>
218
- </button>
219
- """
220
- submitted = st.form_submit_button(label="Send", use_container_width=True)
221
-
222
 
223
  if submitted and user_prompt:
224
- # Add user message to chat history
225
  st.session_state.chat_history.append({"role": "user", "message": user_prompt})
226
-
227
- # Display a loading message
228
- loading_message = st.empty()
229
- loading_message.markdown(
230
- """
231
- <div class="flex items-end gap-3 p-4">
232
- <div
233
- class="bg-center bg-no-repeat aspect-square bg-cover rounded-full w-10 shrink-0"
234
- style='background-image: url("https://lh3.googleusercontent.com/aida-public/AB6AXuBKHyf9huD3r49CwxpDcgRD4Ks1kSV0CxqU2WM_1p5TIcqUHVdNtLTsHIqBLBWVloYC-Zo9mzohtE_MrpMEcOkMkY0QBmPNSq4BRYd9fn0h3FRJtIesg86T8LbeM3Eq2C4IYsnrGSof88GoEho-X6vJkj9S-csVf7cwDfgUsv6qtacgVPsKIMNAAlLHYxGONGfUaa8U2XNfBPrZ0ieLOsKlK0BDsOG4JH6slntUBVimLXxb7cn96F");'
235
- ></div>
236
- <div class="flex flex-1 flex-col gap-1 items-start">
237
- <p class="text-[#637588] text-[13px] font-normal leading-normal max-w-[360px]">AI Assistant</p>
238
- <p class="text-base font-normal leading-normal flex max-w-[360px] rounded-lg px-4 py-3 bg-[#f0f2f4] text-[#111418]">Thinking... Please wait as the model generates a response.</p>
239
- </div>
240
- </div>
241
- """,
242
- unsafe_allow_html=True
243
- )
244
 
245
  try:
246
- # Prepare the request payload
247
  payload = {"text": user_prompt}
248
  headers = {"Content-Type": "application/json"}
249
-
250
- # Make the POST request to the FastAPI endpoint
251
  response = requests.post(FASTAPI_URL, data=json.dumps(payload), headers=headers)
252
 
253
- # Clear the loading message
254
- loading_message.empty()
255
-
256
- # Check if the request was successful (status code 200)
257
  if response.status_code == 200:
258
- data = response.json()
259
- llm_response = data.get("response", "No response received.")
260
- st.session_state.chat_history.append({"role": "assistant", "message": llm_response})
261
  else:
262
- error_message = f"Error: Could not get a response from the FastAPI server. Status code: {response.status_code}. Details: {response.text}"
263
- st.error(error_message)
264
- st.session_state.chat_history.append({"role": "assistant", "message": f"Error: {error_message}"})
265
  except requests.exceptions.ConnectionError:
266
- error_message = f"Error: Could not connect to the FastAPI server. Please ensure it is running at {FASTAPI_URL}."
267
- st.error(error_message)
268
- st.session_state.chat_history.append({"role": "assistant", "message": f"Error: {error_message}"})
269
  except Exception as e:
270
- error_message = f"An unexpected error occurred: {e}"
271
- st.error(error_message)
272
- st.session_state.chat_history.append({"role": "assistant", "message": f"Error: {error_message}"})
273
 
274
- # Rerun to update chat history display
275
  st.experimental_rerun()
 
276
  elif submitted and not user_prompt:
277
  st.warning("Please enter a prompt before clicking 'Send'.")
278
 
279
-
280
  # --- Footer ---
281
- st.markdown(
282
- """
283
- </div>
284
- </div>
285
- </div>
286
- </div>
287
- <div class="flex items-center justify-center whitespace-nowrap px-10 py-3 mt-auto">
288
- <p class="text-[#637588] text-sm font-normal leading-normal">Powered by Ollama, FastAPI, and Streamlit.</p>
289
- </div>
290
- """,
291
- unsafe_allow_html=True
292
- )
 
1
  import streamlit as st
2
  import requests
3
  import json
 
4
 
5
+ # FastAPI configuration
6
+ FASTAPI_URL = "http://localhost:7860/ask"
 
 
 
 
7
 
8
  st.set_page_config(page_title="Ollama AI Assistant", page_icon="🤖", layout="wide")
9
 
10
+ # --- Session state for chat history ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  if 'chat_history' not in st.session_state:
12
  st.session_state.chat_history = [
13
  {"role": "assistant", "message": "Hello! How can I assist you today?"}
14
  ]
15
 
16
+ # --- App Header ---
17
+ st.title("🤖 Ollama AI Assistant")
18
+ st.caption("Start chatting with our AI assistant. Type your message below and press send.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ # --- Chat Display ---
21
+ st.markdown("---")
22
+ for chat in st.session_state.chat_history:
23
+ if chat["role"] == "assistant":
24
+ with st.chat_message("assistant", avatar="🤖"):
25
+ st.write(chat["message"])
26
+ else:
27
+ with st.chat_message("user"):
28
+ st.write(chat["message"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  # --- Input Area ---
 
31
  with st.form("chat_form", clear_on_submit=True):
32
+ user_prompt = st.text_area(
33
+ "Type your message here...",
34
+ height=100,
35
+ placeholder="e.g., Explain quantum computing in simple terms.",
36
+ label_visibility="collapsed",
37
+ key="user_input_text_area"
38
+ )
39
+ submitted = st.form_submit_button("Send")
 
 
 
 
 
 
 
 
 
 
40
 
41
  if submitted and user_prompt:
 
42
  st.session_state.chat_history.append({"role": "user", "message": user_prompt})
43
+ with st.chat_message("assistant", avatar="🤖"):
44
+ st.write("Thinking...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  try:
 
47
  payload = {"text": user_prompt}
48
  headers = {"Content-Type": "application/json"}
 
 
49
  response = requests.post(FASTAPI_URL, data=json.dumps(payload), headers=headers)
50
 
 
 
 
 
51
  if response.status_code == 200:
52
+ llm_response = response.json().get("response", "No response received.")
 
 
53
  else:
54
+ llm_response = f"Error: FastAPI server returned {response.status_code}. Details: {response.text}"
55
+
 
56
  except requests.exceptions.ConnectionError:
57
+ llm_response = f"Error: Cannot connect to the FastAPI server at {FASTAPI_URL}."
 
 
58
  except Exception as e:
59
+ llm_response = f"Unexpected error: {e}"
 
 
60
 
61
+ st.session_state.chat_history.append({"role": "assistant", "message": llm_response})
62
  st.experimental_rerun()
63
+
64
  elif submitted and not user_prompt:
65
  st.warning("Please enter a prompt before clicking 'Send'.")
66
 
 
67
  # --- Footer ---
68
+ st.markdown("---")
69
+ st.caption("Powered by Ollama, FastAPI, and Streamlit.")