aghoraguru commited on
Commit
45027e8
Β·
verified Β·
1 Parent(s): 3073160

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +542 -33
src/streamlit_app.py CHANGED
@@ -1,40 +1,549 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
 
4
  import streamlit as st
 
 
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  """
7
- # Welcome to Streamlit!
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
 
13
- In the meantime, below is an example of what you can do with just a few lines of code:
 
 
 
 
 
 
 
14
  """
15
 
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import uuid
4
+ import textwrap
5
  import streamlit as st
6
+ from typing import List, Dict, Any, Optional
7
+ from pathlib import Path
8
+ import time
9
 
10
+ # Load environment variables
11
+ from dotenv import load_dotenv
12
+ load_dotenv()
13
+
14
+ # Get API keys from environment
15
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
16
+ TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
17
+
18
+ if not OPENAI_API_KEY or not TAVILY_API_KEY:
19
+ st.error("Please set OPENAI_API_KEY and TAVILY_API_KEY in your .env file")
20
+ st.stop()
21
+
22
+ # Set environment variables
23
+ os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
24
+ os.environ['TAVILY_API_KEY'] = TAVILY_API_KEY
25
+
26
+ # Imports after setting environment variables
27
+ from openai import OpenAI
28
+ from tavily import TavilyClient
29
+ from langchain_openai import ChatOpenAI
30
+ from langgraph.prebuilt import create_react_agent
31
+ from langchain_core.tools import tool
32
+ import plotly.express as px
33
+ import plotly.graph_objects as go
34
+ import plotly.io as pio
35
+
36
+ # Initialize clients
37
+ openai_client = OpenAI(api_key=OPENAI_API_KEY)
38
+ tavily_client = TavilyClient(TAVILY_API_KEY)
39
+ llm_model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
40
+
41
+ # Create plots directory
42
+ plots_dir = Path("./plots")
43
+ plots_dir.mkdir(exist_ok=True)
44
+
45
+ # Initialize session state for conversation memory
46
+ if 'conversation_history' not in st.session_state:
47
+ st.session_state.conversation_history = []
48
+ if 'current_data' not in st.session_state:
49
+ st.session_state.current_data = None
50
+ if 'current_plot_context' not in st.session_state:
51
+ st.session_state.current_plot_context = {}
52
+
53
+ # ── TOOLS ────────────────────────────────────────────────────────────────
54
+ @tool
55
+ def search_web(query: str, max_results: int = 5) -> List[Dict[str, Any]]:
56
+ """Return Tavily results (title, url, raw_content, score)."""
57
+ return tavily_client.search(
58
+ query=query,
59
+ max_results=max_results,
60
+ search_depth="advanced",
61
+ chunks_per_source=3,
62
+ include_raw_content=True,
63
+ )["results"]
64
+
65
+ @tool
66
+ def extract_data(
67
+ raw_results: List[Dict[str, Any]],
68
+ schema: Optional[str] = None
69
+ ) -> List[Dict[str, Any]]:
70
+ """Turn *raw_results* into structured JSON matching *schema*.
71
+ If schema is None, a minimal list-of-dicts schema is inferred."""
72
+ if schema is None:
73
+ schema = '[{"OS":"string","MarketShare":"number"}]'
74
+ sys = "Return ONLY valid JSON. No markdown."
75
+ usr = (
76
+ f"Raw:\n{json.dumps(raw_results, ensure_ascii=False)[:4000]}"
77
+ f"\n\nSchema:\n{schema}"
78
+ )
79
+ res = openai_client.chat.completions.create(
80
+ model="gpt-4o-mini",
81
+ messages=[{"role": "system", "content": sys},
82
+ {"role": "user", "content": usr}],
83
+ temperature=0, max_tokens=2000,
84
+ response_format={"type": "json_object"},
85
+ )
86
+ return json.loads(res.choices[0].message.content.strip())
87
+
88
+ @tool
89
+ def generate_plot_code(
90
+ data: List[Dict[str, Any]],
91
+ instructions: str
92
+ ) -> str:
93
+ """Return RAW python defining create_plot(data)->fig."""
94
+ sys = ("Return ONLY python code (no markdown) that defines "
95
+ "`create_plot(data)` and returns a Plotly figure.")
96
+ usr = f"Data:\n{json.dumps(data, indent=2)}\n\nInstructions:\n{instructions}"
97
+ res = openai_client.chat.completions.create(
98
+ model="gpt-4o-mini",
99
+ messages=[{"role": "system", "content": sys},
100
+ {"role": "user", "content": usr}],
101
+ temperature=0, max_tokens=1500,
102
+ response_format={"type": "text"},
103
+ )
104
+ return res.choices[0].message.content.strip()
105
+
106
+ @tool
107
+ def render_plot(
108
+ code: str,
109
+ data: List[Dict[str, Any]],
110
+ filename: str | None = None
111
+ ) -> str:
112
+ """Exec *code* and save fig to HTML; returns filepath."""
113
+ if filename is None:
114
+ filename = f"plot_{uuid.uuid4().hex[:8]}.html"
115
+
116
+ # Ensure filename is saved in plots directory
117
+ filepath = plots_dir / filename
118
+
119
+ ctx = {"px": px, "go": go, "pio": pio}
120
+ exec(code, ctx) # defines create_plot
121
+ pio.write_html(ctx["create_plot"](data), str(filepath))
122
+
123
+ # Store current data and context for conversation memory
124
+ st.session_state.current_data = data
125
+ st.session_state.current_plot_context = {
126
+ 'code': code,
127
+ 'data': data,
128
+ 'filepath': str(filepath),
129
+ 'filename': filename
130
+ }
131
+
132
+ return str(filepath)
133
+
134
+ # ── AGENT PROMPT ─────────────────────────────────────────────────────────
135
+ cheat_sheet = textwrap.dedent("""
136
+ ┏━━ TOOL ARG GUIDE ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
137
+ ┃ search_web {{query:str, max_results?:int}} ┃
138
+ ┃ extract_data {{raw_results:…, schema?:str}} ← schema optional ┃
139
+ ┃ generate_plot_code {{data:…, instructions:str}} ┃
140
+ ┃ render_plot {{code:…, data:… [,filename]}} β†’ then STOP ┃
141
+ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
142
+ """)
143
+
144
+ def create_agent_prompt(conversation_history, current_data):
145
+ """Create dynamic agent prompt with conversation context."""
146
+ context_info = ""
147
+ if current_data:
148
+ context_info = f"""
149
+ CURRENT DATA CONTEXT:
150
+ You have access to previously extracted data: {json.dumps(current_data[:2], indent=2)}...
151
+ If the user asks to modify the current plot, you can skip search_web and extract_data steps and directly use this data.
152
  """
153
+
154
+ conversation_context = ""
155
+ if conversation_history:
156
+ recent_messages = conversation_history[-4:] # Last 4 messages for context
157
+ conversation_context = f"""
158
+ CONVERSATION HISTORY:
159
+ {chr(10).join([f"User: {msg['user']}" + (f"\nBot: {msg['bot']}" if msg.get('bot') else "") for msg in recent_messages])}
160
+ """
161
+
162
+ return f"""
163
+ You are Plot-Agent, an AI visualization assistant with conversation memory.
164
+
165
+ {context_info}
166
 
167
+ {conversation_context}
 
 
168
 
169
+ PIPELINE: search_web β†’ extract_data β†’ generate_plot_code β†’ render_plot.
170
+
171
+ RULES
172
+ β€’ If user asks to modify current plot and you have current data, skip search_web and extract_data.
173
+ β€’ If extract_data gets no schema, that's OK; the tool will infer one.
174
+ β€’ After render_plot, reply with the file path & a one-liner, then **end**.
175
+ β€’ Use conversation context to understand user's intent better.
176
+ {cheat_sheet}
177
  """
178
 
179
+ agent_prompt = create_agent_prompt([], None) # Initial prompt
180
+
181
+ TOOLS = [search_web, extract_data, generate_plot_code, render_plot]
182
+ plot_agent = create_react_agent(llm_model, TOOLS, prompt=agent_prompt)
183
+
184
+ # ── STREAMLIT UI ─────────────────────────────────────────────────────────
185
+ def main():
186
+ st.set_page_config(
187
+ page_title="Plot-Agent πŸ€–πŸ“Š",
188
+ page_icon="πŸ“Š",
189
+ layout="wide",
190
+ initial_sidebar_state="expanded"
191
+ )
192
+
193
+ # Custom CSS for better styling
194
+ st.markdown("""
195
+ <style>
196
+ .main-header {
197
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
198
+ padding: 1rem;
199
+ border-radius: 10px;
200
+ margin-bottom: 2rem;
201
+ }
202
+ .main-header h1 {
203
+ color: white;
204
+ margin: 0;
205
+ text-align: center;
206
+ }
207
+ .status-box {
208
+ border-left: 4px solid #4CAF50;
209
+ background-color: #f9f9f9;
210
+ padding: 10px;
211
+ margin: 10px 0;
212
+ border-radius: 5px;
213
+ color: black !important;
214
+ }
215
+ .status-box * {
216
+ color: black !important;
217
+ }
218
+ .tool-box {
219
+ border: 1px solid #ddd;
220
+ border-radius: 8px;
221
+ padding: 15px;
222
+ margin: 10px 0;
223
+ background-color: #f8f9fa;
224
+ color: black !important;
225
+ }
226
+ .tool-box * {
227
+ color: black !important;
228
+ }
229
+ .error-box {
230
+ border-left: 4px solid #f44336;
231
+ background-color: #ffebee;
232
+ padding: 10px;
233
+ margin: 10px 0;
234
+ border-radius: 5px;
235
+ color: black !important;
236
+ }
237
+ .error-box * {
238
+ color: black !important;
239
+ }
240
+ </style>
241
+ """, unsafe_allow_html=True)
242
+
243
+ # Header
244
+ st.markdown("""
245
+ <div class="main-header">
246
+ <h1>πŸ€– Plot-Agent: AI-Powered Data Visualization</h1>
247
+ <p style="text-align: center; color: white; margin: 0;">
248
+ Search the web, extract data, and create stunning visualizations automatically!
249
+ </p>
250
+ </div>
251
+ """, unsafe_allow_html=True)
252
+
253
+ # Sidebar
254
+ with st.sidebar:
255
+ st.header("βš™οΈ Configuration")
256
+
257
+ # API Status
258
+ st.subheader("πŸ”‘ API Status")
259
+ if OPENAI_API_KEY and TAVILY_API_KEY:
260
+ st.success("βœ… API Keys Loaded")
261
+ else:
262
+ st.error("❌ API Keys Missing")
263
+
264
+ # Recent Plots
265
+ st.subheader("πŸ“ Recent Plots")
266
+ plot_files = list(plots_dir.glob("*.html"))
267
+ if plot_files:
268
+ plot_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
269
+ for i, plot_file in enumerate(plot_files[:5]):
270
+ if st.button(f"πŸ“Š {plot_file.stem}", key=f"recent_{i}"):
271
+ st.session_state.selected_plot = str(plot_file)
272
+ # Clear latest_plot to avoid conflicts
273
+ if 'latest_plot' in st.session_state:
274
+ del st.session_state.latest_plot
275
+ st.rerun()
276
+ else:
277
+ st.info("No plots generated yet")
278
+
279
+ # Clear plots
280
+ if st.button("πŸ—‘οΈ Clear All Plots"):
281
+ for plot_file in plot_files:
282
+ plot_file.unlink()
283
+ # Clear session state plot references
284
+ if 'latest_plot' in st.session_state:
285
+ del st.session_state.latest_plot
286
+ if 'selected_plot' in st.session_state:
287
+ del st.session_state.selected_plot
288
+ st.success("All plots cleared!")
289
+ st.rerun()
290
+
291
+ # Clear conversation
292
+ if st.button("πŸ—‘οΈ Clear Conversation"):
293
+ st.session_state.conversation_history = []
294
+ st.session_state.current_data = None
295
+ st.session_state.current_plot_context = {}
296
+ st.success("Conversation cleared!")
297
+ st.rerun()
298
+
299
+ # Show current context
300
+ if st.session_state.current_data:
301
+ st.subheader("πŸ’Ύ Current Data Context")
302
+ st.success(f"πŸ“Š {len(st.session_state.current_data)} data points available")
303
+ with st.expander("View Data Sample"):
304
+ st.json(st.session_state.current_data[:3])
305
+
306
+ # Show conversation history
307
+ if st.session_state.conversation_history:
308
+ st.subheader("πŸ’¬ Conversation History")
309
+ with st.expander(f"View History ({len(st.session_state.conversation_history)} messages)"):
310
+ for i, msg in enumerate(st.session_state.conversation_history[-5:]):
311
+ st.write(f"**{i+1}. User:** {msg['user']}")
312
+ if msg.get('bot'):
313
+ st.write(f"**Bot:** {msg['bot']}")
314
+
315
+ # Main interface
316
+ col1, col2 = st.columns([1, 1])
317
+
318
+ with col1:
319
+ st.header("πŸ’¬ Chat with Plot-Agent")
320
+
321
+ # Input form
322
+ with st.form("plot_request"):
323
+ user_input = st.text_area(
324
+ "What visualization would you like to create?",
325
+ placeholder="e.g., Create a bar chart of top 10 countries by GDP in 2024",
326
+ height=100
327
+ )
328
+
329
+ submitted = st.form_submit_button("πŸš€ Generate Plot", use_container_width=True)
330
+
331
+ # Example prompts
332
+ st.subheader("πŸ’‘ Example Prompts")
333
+
334
+ # Dynamic examples based on context
335
+ base_examples = [
336
+ "Create a line chart of Bitcoin price over the last 6 months",
337
+ "Show a pie chart of global smartphone market share in 2024",
338
+ "Make a bar chart of top 10 most populous cities in the world",
339
+ "Create a scatter plot of countries by GDP vs population",
340
+ ]
341
+
342
+ context_examples = []
343
+ if st.session_state.current_data:
344
+ context_examples = [
345
+ "Change the current chart to a pie chart",
346
+ "Make the bars horizontal instead of vertical",
347
+ "Add different colors to each data point",
348
+ "Change the title and add axis labels",
349
+ ]
350
+
351
+ all_examples = context_examples + base_examples
352
+
353
+ for i, example in enumerate(all_examples[:6]): # Show max 6 examples
354
+ prefix = "πŸ”„" if i < len(context_examples) else "πŸ“"
355
+ if st.button(f"{prefix} {example}", key=f"example_{i}"):
356
+ st.session_state.user_input = example
357
+ submitted = True
358
+ user_input = example
359
+
360
+ with col2:
361
+ st.header("πŸ”„ Agent Activity")
362
+
363
+ # Create placeholders for real-time updates
364
+ status_placeholder = st.empty()
365
+ activity_placeholder = st.empty()
366
+
367
+ # Process request
368
+ if submitted and user_input:
369
+ # Add user message to conversation history
370
+ st.session_state.conversation_history.append({
371
+ 'user': user_input,
372
+ 'timestamp': time.time()
373
+ })
374
+
375
+ with status_placeholder.container():
376
+ st.markdown('<div class="status-box">πŸš€ <strong>Starting Plot-Agent...</strong></div>',
377
+ unsafe_allow_html=True)
378
+
379
+ # Create containers for activity logging
380
+ activity_container = activity_placeholder.container()
381
+
382
+ try:
383
+ # Create dynamic agent with conversation context
384
+ dynamic_prompt = create_agent_prompt(
385
+ st.session_state.conversation_history,
386
+ st.session_state.current_data
387
+ )
388
+ plot_agent = create_react_agent(llm_model, TOOLS, prompt=dynamic_prompt)
389
+
390
+ # Stream the agent execution
391
+ messages = []
392
+ current_tool = None
393
+ tool_results = {}
394
+ bot_response = ""
395
+
396
+ with activity_container:
397
+ progress_bar = st.progress(0)
398
+ step_counter = 0
399
+ max_steps = 4 # search, extract, generate, render
400
+
401
+ for chunk in plot_agent.stream(
402
+ {"messages": [{"role": "user", "content": user_input}]},
403
+ stream_mode="updates",
404
+ config={"recursion_limit": 10},
405
+ ):
406
+ node_name = next(iter(chunk))
407
+
408
+ if node_name == "agent":
409
+ if "messages" in chunk[node_name]:
410
+ message = chunk[node_name]["messages"][-1]
411
+ messages.append(message)
412
+
413
+ # Parse tool calls
414
+ if hasattr(message, 'tool_calls') and message.tool_calls:
415
+ for tool_call in message.tool_calls:
416
+ current_tool = tool_call['name']
417
+ step_counter += 1
418
+ progress_bar.progress(min(step_counter / max_steps, 1.0))
419
+
420
+ st.markdown(f"""
421
+ <div class="tool-box">
422
+ <h4>πŸ”§ Using Tool: {current_tool}</h4>
423
+ <p><strong>Arguments:</strong></p>
424
+ <pre>{json.dumps(tool_call['args'], indent=2)}</pre>
425
+ </div>
426
+ """, unsafe_allow_html=True)
427
+
428
+ time.sleep(0.5) # Visual delay for better UX
429
+
430
+ # Show assistant responses
431
+ elif hasattr(message, 'content') and message.content:
432
+ bot_response = message.content
433
+ st.markdown(f"""
434
+ <div class="status-box">
435
+ <strong>πŸ€– Plot-Agent:</strong> {message.content}
436
+ </div>
437
+ """, unsafe_allow_html=True)
438
+
439
+ elif node_name == "tools":
440
+ # Show tool results
441
+ for tool_name, result in chunk[node_name].items():
442
+ tool_results[tool_name] = result
443
+
444
+ if tool_name == "search_web":
445
+ st.markdown(f"""
446
+ <div class="tool-box">
447
+ <h4>πŸ” Search Results</h4>
448
+ <p>Found {len(result)} sources</p>
449
+ <details>
450
+ <summary>View Sources</summary>
451
+ <ul>
452
+ """)
453
+ for item in result[:3]: # Show first 3 sources
454
+ st.markdown(f"<li><strong>{item.get('title', 'N/A')}</strong><br><small>{item.get('url', 'N/A')}</small></li>")
455
+ st.markdown("</ul></details></div>", unsafe_allow_html=True)
456
+
457
+ elif tool_name == "extract_data":
458
+ st.markdown(f"""
459
+ <div class="tool-box">
460
+ <h4>πŸ“Š Extracted Data</h4>
461
+ <p>Processed {len(result)} data points</p>
462
+ <details>
463
+ <summary>View Data Sample</summary>
464
+ <pre>{json.dumps(result[:3] if len(result) > 3 else result, indent=2)}</pre>
465
+ </details>
466
+ </div>
467
+ """, unsafe_allow_html=True)
468
+
469
+ elif tool_name == "generate_plot_code":
470
+ st.markdown(f"""
471
+ <div class="tool-box">
472
+ <h4>🎨 Generated Plot Code</h4>
473
+ <details>
474
+ <summary>View Code</summary>
475
+ <pre>{result[:500]}...</pre>
476
+ </details>
477
+ </div>
478
+ """, unsafe_allow_html=True)
479
+
480
+ elif tool_name == "render_plot":
481
+ st.markdown(f"""
482
+ <div class="tool-box">
483
+ <h4>βœ… Plot Rendered</h4>
484
+ <p><strong>File:</strong> {result}</p>
485
+ </div>
486
+ """, unsafe_allow_html=True)
487
+
488
+ # Set the generated plot for display and auto-refresh
489
+ st.session_state.latest_plot = result
490
+ # Clear selected plot to show latest
491
+ if 'selected_plot' in st.session_state:
492
+ del st.session_state.selected_plot
493
+
494
+ progress_bar.progress(1.0)
495
+
496
+ # Update conversation history with bot response
497
+ if bot_response:
498
+ st.session_state.conversation_history[-1]['bot'] = bot_response
499
+
500
+ # Update status
501
+ with status_placeholder.container():
502
+ st.markdown('<div class="status-box">βœ… <strong>Plot generation completed!</strong></div>',
503
+ unsafe_allow_html=True)
504
+
505
+ # Force rerun to show the new plot immediately
506
+ time.sleep(0.5) # Small delay to ensure file is written
507
+ st.rerun()
508
+
509
+ except Exception as e:
510
+ with status_placeholder.container():
511
+ st.markdown(f'<div class="error-box">❌ <strong>Error:</strong> {str(e)}</div>',
512
+ unsafe_allow_html=True)
513
+
514
+ # Display generated plot
515
+ st.header("πŸ“Š Generated Visualization")
516
+
517
+ # Determine which plot to show (latest has priority over selected)
518
+ plot_file = None
519
+ if hasattr(st.session_state, 'latest_plot') and st.session_state.latest_plot:
520
+ plot_file = st.session_state.latest_plot
521
+ st.info("πŸ†• **Latest Generated Plot**")
522
+ elif hasattr(st.session_state, 'selected_plot') and st.session_state.selected_plot:
523
+ plot_file = st.session_state.selected_plot
524
+ st.info(f"πŸ“ **Selected Plot:** {Path(plot_file).stem}")
525
+
526
+ if plot_file and Path(plot_file).exists():
527
+ # Display the HTML plot
528
+ try:
529
+ with open(plot_file, 'r', encoding='utf-8') as f:
530
+ html_content = f.read()
531
+
532
+ st.components.v1.html(html_content, height=600, scrolling=True)
533
+
534
+ # Download button
535
+ st.download_button(
536
+ label="πŸ“₯ Download Plot",
537
+ data=html_content,
538
+ file_name=Path(plot_file).name,
539
+ mime="text/html"
540
+ )
541
+ except Exception as e:
542
+ st.error(f"Error loading plot: {str(e)}")
543
+ elif hasattr(st.session_state, 'latest_plot') or hasattr(st.session_state, 'selected_plot'):
544
+ st.error("Plot file not found! It may have been deleted.")
545
+ else:
546
+ st.info("πŸ‘‹ Generate a plot or select from recent plots to view here!")
547
+
548
+ if __name__ == "__main__":
549
+ main()