sameerwanjari826 commited on
Commit
b54c604
Β·
verified Β·
1 Parent(s): 73ef695

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +513 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,515 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
1
  import streamlit as st
2
+ import sqlite3
3
+ from google import genai
4
+ import os
5
+ from dotenv import load_dotenv
6
+ import pandas as pd
7
+ import requests
8
+ import json
9
+
10
+ # Load environment variables
11
+ load_dotenv()
12
+
13
+ # Configure API
14
+ # Database Configuration
15
+ DB_PATH = "data/placement.db"
16
+
17
+ def is_valid_api_key(key):
18
+ """Check if the provided key looks like a valid Gemini API key."""
19
+ if not key:
20
+ return False
21
+ # Common placeholders and length check
22
+ placeholders = ["your_gemini_api_key_here", "INSERT_KEY_HERE", "ENTER_KEY"]
23
+ if any(p in key for p in placeholders):
24
+ return False
25
+ # Gemini keys usually start with AIza and are ~39-40 chars
26
+ return len(key) >= 30 and key.startswith("AIza")
27
+
28
+ def get_ollama_models():
29
+ """Fetch available local models from Ollama."""
30
+ try:
31
+ response = requests.get("http://localhost:11434/api/tags", timeout=2)
32
+ if response.status_code == 200:
33
+ data = response.json()
34
+ return [m['name'] for m in data.get('models', [])]
35
+ except:
36
+ pass
37
+ return []
38
+
39
+ def call_ollama(model_name, prompt, history=None):
40
+ """Helper to call local Ollama API."""
41
+ url = "http://localhost:11434/api/chat"
42
+
43
+ messages = []
44
+ if history:
45
+ for msg in history:
46
+ messages.append({"role": msg["role"], "content": msg["content"]})
47
+
48
+ messages.append({"role": "user", "content": prompt})
49
+
50
+ payload = {
51
+ "model": model_name,
52
+ "messages": messages,
53
+ "stream": False
54
+ }
55
+
56
+ try:
57
+ response = requests.post(url, json=payload, timeout=30)
58
+ if response.status_code == 200:
59
+ return response.json().get('message', {}).get('content', "Error: Empty response")
60
+ return f"Error: Ollama returned {response.status_code}"
61
+ except Exception as e:
62
+ return f"Error connecting to Ollama: {e}"
63
+
64
+ def get_db_connection():
65
+ conn = sqlite3.connect(DB_PATH)
66
+ return conn
67
+
68
+ def run_query(query, params=None):
69
+ conn = get_db_connection()
70
+ try:
71
+ if params:
72
+ df = pd.read_sql_query(query, conn, params=params)
73
+ else:
74
+ df = pd.read_sql_query(query, conn)
75
+ conn.close()
76
+ return df
77
+ except Exception as e:
78
+ conn.close()
79
+ return f"Error: {e}"
80
+
81
+ # ... (run_query ends)
82
+
83
+ def generate_sql(question, model_name, history=None):
84
+ # Schema Definition for the LLM
85
+ schema = """
86
+ Table: events
87
+ Columns: id (INTEGER), company_name (TEXT), event_type (TEXT), raw_filename (TEXT), topic_url (TEXT)
88
+
89
+ Table: students
90
+ Columns: id (INTEGER), roll_no (TEXT), email (TEXT), name (TEXT), branch (TEXT), year (TEXT)
91
+
92
+ Table: event_students
93
+ Columns: id (INTEGER), student_id (INTEGER), event_id (INTEGER), raw_line (TEXT)
94
+ Foreign Keys: student_id -> students.id, event_id -> events.id
95
+ """
96
+
97
+ context_history = ""
98
+ if history:
99
+ # Get last 4 messages for context
100
+ context_history = "\nRecent Conversation Context:\n"
101
+ for msg in history[-4:]:
102
+ context_history += f"{msg['role'].capitalize()}: {msg['content']}\n"
103
+
104
+ prompt = f"""
105
+ You are a SQL Expert. Convert the following natural language question into a SQL query for a SQLite database.
106
+
107
+ Database Schema:
108
+ {schema}
109
+ {context_history}
110
+
111
+ CRITICAL RULES:
112
+ 1. Return ONLY the SQL query. No markdown, no explanation.
113
+ 2. **Joins are Usage**:
114
+ To find a student's events: `JOIN event_students es ON s.id = es.student_id JOIN events e ON es.event_id = e.id`
115
+ 3. **ROBUST NAME MATCHING (IMPORTANT)**:
116
+ - Users might provide only part of a name (e.g., "Sameer Wanjari" for "Sameer Nandesh Wanjari").
117
+ - NEVER use `name LIKE '%First Last%'`.
118
+ - ALWAYS split the name into parts and match each part separately using AND.
119
+ - Example: For "Sameer Wanjari", use: `s.name LIKE '%Sameer%' AND s.name LIKE '%Wanjari%'`.
120
+ 4. Case Insensitive: `LIKE` in SQLite is case-insensitive for ASCII, but ensure logic holds.
121
+ 5. "Placed" = e.event_type contains 'Offer' or 'PPO' or 'Pre-Placement'.
122
+ 6. "Interview Shortlist" = e.event_type contains 'Interview'.
123
+ 7. "Test Shortlist" = e.event_type contains 'Test'.
124
+ 8. **Branches**: 'branch' column in `students` table contains values like 'CSE', 'Physics'.
125
+ 9. **Counts vs Lists**:
126
+ - If asked "How many" ONLY, use `COUNT(DISTINCT s.roll_no)`.
127
+ - If asked "How many" AND "Names/Who/List", use `SELECT DISTINCT s.name, e.company_name...`.
128
+ 10. Select columns: `students.name`, `students.roll_no`, `students.branch`, `events.company_name`, `events.event_type`.
129
+ 11. **NO HALLUCINATIONS**: Do NOT guess names or details. If the user's question references a person or company, use the exact parts they provided in a `LIKE` query.
130
+
131
+ Question: {question}
132
+ SQL:
133
+ """
134
+
135
+ # If it's an Ollama model, use Ollama helper
136
+ if not model_name.startswith("gemini") and not model_name.startswith("gemma"):
137
+ ollama_response = call_ollama(model_name, prompt, history)
138
+ sql = ollama_response.replace("```sql", "").replace("```", "").strip()
139
+ # Basic cleanup if model includes reasoning/text
140
+ if "SELECT" in sql.upper():
141
+ start = sql.upper().find("SELECT")
142
+ sql = sql[start:]
143
+ return sql
144
+
145
+ response = client.models.generate_content(
146
+ model=model_name,
147
+ contents=prompt
148
+ )
149
+ sql = response.text.replace("```sql", "").replace("```", "").strip()
150
+ return sql
151
+
152
+ def generate_natural_answer(question, sql, df, model_name, history=None):
153
+ # safe-guard for large results
154
+ if len(df) > 50:
155
+ data_context = df.head(50).to_markdown(index=False) + f"\n...(and {len(df)-50} more rows)"
156
+ else:
157
+ data_context = df.to_markdown(index=False)
158
+
159
+ context_history = ""
160
+ if history:
161
+ context_history = "\nRecent Conversation Context:\n"
162
+ for msg in history[-4:]:
163
+ context_history += f"{msg['role'].capitalize()}: {msg['content']}\n"
164
+
165
+ prompt = f"""
166
+ You are a helpful assistant for the IIT BHU Placement Cell.
167
+
168
+ User Question: {question}
169
+ Executed SQL: {sql}
170
+ Result Data:
171
+ {data_context}
172
+ {context_history}
173
+
174
+ Task: Answer the user's question naturally based ONLY on the result data.
175
+
176
+ STRICT ANTI-HALLUCINATION RULES:
177
+ 1. **ONLY Use Result Data**: Do NOT mention any names, companies, branches, or counts that are not explicitly present in the "Result Data" table above.
178
+ 2. **No Assumptions**: If the result data is empty, say "I couldn't find any records." Do NOT guess.
179
+ 3. **Schema Grounding**: Do NOT mention fields like "CGPA", "Year of Graduation", or "Phone Number" as they are not tracked in this database.
180
+
181
+ SPECIAL FORMAT FOR "ANALYSIS" REQUESTS:
182
+ If asked for an "analysis" or "overview" of a student/company, focus on:
183
+ - **Summarize Shortlists**: Count and list the companies/students from the data.
184
+ - **Highlight Offers**: Clearly state any 'Offers' found.
185
+
186
+ General Rules:
187
+ - Use bullet points and bold text for key information.
188
+ - Do NOT mention "SQL" or "dataframe".
189
+ """
190
+
191
+ # If it's an Ollama model, use Ollama helper
192
+ if not model_name.startswith("gemini") and not model_name.startswith("gemma"):
193
+ return call_ollama(model_name, prompt, history)
194
+
195
+ response = client.models.generate_content(
196
+ model=model_name,
197
+ contents=prompt
198
+ )
199
+ return response.text
200
+
201
+ # Streamlit UI
202
+ st.set_page_config(page_title="Placement Query Bot", page_icon="πŸŽ“", layout="wide")
203
+
204
+ # Sidebar Configuration
205
+ with st.sidebar:
206
+ st.title("πŸŽ“ TPC Bot")
207
+ st.markdown("**Created by: Sameer Wanjari**")
208
+ st.markdown("---")
209
+
210
+ # API Key Handling
211
+ api_key = os.getenv("GOOGLE_API_KEY")
212
+ if not is_valid_api_key(api_key):
213
+ st.warning("⚠️ Gemini API Key Missing")
214
+ st.info("""
215
+ **How to get a Key:**
216
+ 1. Visit [Google AI Studio](https://aistudio.google.com/app/apikey)
217
+ 2. Sign in with Google
218
+ 3. Click **"Create API key"**
219
+ 4. Copy & paste below πŸ‘‡
220
+ """)
221
+ user_api_key = st.text_input("Enter Gemini API Key", type="password")
222
+ if user_api_key:
223
+ if is_valid_api_key(user_api_key):
224
+ os.environ["GOOGLE_API_KEY"] = user_api_key
225
+ st.success("Key set!")
226
+ st.rerun()
227
+ else:
228
+ st.error("Invalid key format. Should start with 'AIza'.")
229
+ else:
230
+ st.success("βœ… API Key Active")
231
+ if st.button("πŸ—‘οΈ Clear/Change Key"):
232
+ os.environ["GOOGLE_API_KEY"] = ""
233
+ if "messages" in st.session_state:
234
+ st.session_state.messages = []
235
+ st.rerun()
236
+
237
+ st.markdown("---")
238
+ st.header("πŸ€– AI Model")
239
+
240
+ # Check for Ollama Models
241
+ ollama_models = get_ollama_models()
242
+
243
+ available_models = [
244
+ "gemini-2.5-flash",
245
+ "gemini-2.5-flash-lite",
246
+ "gemma-3-1b-it",
247
+ "gemma-3-4b-it",
248
+ "gemma-3-12b-it",
249
+ "gemma-3-27b-it"
250
+ ]
251
+
252
+ # Add Ollama models if available
253
+ full_model_list = available_models + ollama_models
254
+
255
+ selected_model = st.selectbox(
256
+ "Choose AI Brain",
257
+ full_model_list,
258
+ index=0,
259
+ help="Select Gemini/Gemma (Cloud) or Ollama (Local)"
260
+ )
261
+
262
+ if selected_model in ollama_models:
263
+ st.info(f"🏠 Running locally via Ollama: `{selected_model}`")
264
+ elif "gemini" in selected_model or "gemma" in selected_model:
265
+ st.info(f"☁️ Running in Cloud via Gemini API")
266
+
267
+ st.markdown("---")
268
+
269
+ # Database Stats
270
+ conn = get_db_connection()
271
+ c = conn.cursor()
272
+ c.execute("SELECT COUNT(DISTINCT roll_no) FROM students")
273
+ total_students = c.fetchone()[0]
274
+ c.execute("SELECT COUNT(DISTINCT company_name) FROM events")
275
+ total_companies = c.fetchone()[0]
276
+ conn.close()
277
+
278
+ # Data Refresh
279
+ st.header("βš™οΈ Data")
280
+ if st.button("πŸ”„ Refresh DB"):
281
+ with st.spinner("Processing..."):
282
+ try:
283
+ import process_data
284
+ process_data.process_files()
285
+ st.success("Done! Reloading...")
286
+ st.rerun()
287
+ except Exception as e:
288
+ st.error(f"Error: {e}")
289
+
290
+ # Initialize Client
291
+ api_key = os.getenv("GOOGLE_API_KEY")
292
+ client = None
293
+ if is_valid_api_key(api_key):
294
+ try:
295
+ client = genai.Client(api_key=api_key)
296
+ except Exception as e:
297
+ st.error(f"Failed to initialize Gemini Client: {e}")
298
+
299
+ # Main Interface Tabs
300
+ tab1, tab2, tab3 = st.tabs(["πŸ’¬ Chat Assistant", "πŸ” Student Explorer", "🏒 Company Explorer"])
301
+
302
+ # --- TAB 1: CHAT ---
303
+ with tab1:
304
+ st.header("Ask anything about placements")
305
+ st.markdown("Examples: *'Analysis of Sameer Wanjari'*, *'How many Physics students got offers?'*")
306
+
307
+ # Chat History logic
308
+ if "messages" not in st.session_state:
309
+ st.session_state.messages = []
310
+
311
+ # Display Chat History
312
+ for message in st.session_state.messages:
313
+ with st.chat_message(message["role"]):
314
+ st.markdown(message["content"])
315
+
316
+ if not client:
317
+ st.warning("⚠️ **Gemini API Key is missing!**")
318
+ st.info("You can still use the **Student Explorer** tab to browse data manually.")
319
+ st.markdown("To enable AI Chat:")
320
+ st.markdown("1. Get a key from [Google AI Studio](https://aistudio.google.com/app/apikey).")
321
+ st.markdown("2. Enter it in the sidebar.")
322
+ else:
323
+ # Chat Input
324
+ if prompt := st.chat_input("Ask a question..."):
325
+ # Display user message immediately
326
+ with st.chat_message("user"):
327
+ st.markdown(prompt)
328
+ st.session_state.messages.append({"role": "user", "content": prompt})
329
+
330
+ with st.chat_message("assistant"):
331
+ message_placeholder = st.empty()
332
+ message_placeholder.markdown("Thinking...")
333
+
334
+ try:
335
+ # 1. Generate SQL
336
+ sql_query = generate_sql(prompt, selected_model, st.session_state.messages[:-1])
337
+
338
+ # 2. Execute SQL
339
+ result = run_query(sql_query)
340
+
341
+ if isinstance(result, pd.DataFrame):
342
+ # 3. Generate Natural Language Answer
343
+ nl_response = generate_natural_answer(prompt, sql_query, result, selected_model, st.session_state.messages[:-1])
344
+ message_placeholder.markdown(nl_response)
345
+
346
+ # Save to history
347
+ st.session_state.messages.append({"role": "assistant", "content": nl_response})
348
+
349
+ with st.expander("View Technical Details (SQL & Data)"):
350
+ st.code(sql_query, language="sql")
351
+ st.dataframe(result)
352
+ else:
353
+ message_placeholder.error(result)
354
+ st.session_state.messages.append({"role": "assistant", "content": f"Error: {result}"})
355
+
356
+ except Exception as e:
357
+ message_placeholder.error(f"An error occurred: {e}")
358
+ st.session_state.messages.append({"role": "assistant", "content": f"An error occurred: {e}"})
359
+
360
+ # Use rerun to ensure the history loop takes over and pins the input box to the bottom
361
+ st.rerun()
362
+
363
+ # --- TAB 2: EXPLORER ---
364
+ with tab2:
365
+ st.header("Student Profile Explorer")
366
+
367
+ conn = get_db_connection()
368
+
369
+ # 1. Filters
370
+ col1, col2 = st.columns(2)
371
+ with col1:
372
+ branches = pd.read_sql("SELECT DISTINCT branch FROM students WHERE branch IS NOT NULL ORDER BY branch", conn)['branch'].tolist()
373
+ selected_branch = st.selectbox("Filter by Branch", ["All"] + branches)
374
+
375
+ with col2:
376
+ years = pd.read_sql("SELECT DISTINCT year FROM students WHERE year IS NOT NULL ORDER BY year", conn)['year'].tolist()
377
+ selected_year = st.selectbox("Filter by Year", ["All"] + years)
378
+
379
+ # 2. Student Selector
380
+ query = "SELECT DISTINCT name, roll_no FROM students WHERE 1=1"
381
+ params = []
382
+ if selected_branch != "All":
383
+ query += " AND branch = ?"
384
+ params.append(selected_branch)
385
+ if selected_year != "All":
386
+ query += " AND year = ?"
387
+ params.append(selected_year)
388
+
389
+ query += " ORDER BY name"
390
+
391
+ students_df = pd.read_sql(query, conn, params=params)
392
+
393
+ if students_df.empty:
394
+ st.warning("No students found with filters.")
395
+ else:
396
+ # Create display label "Name (Roll)"
397
+ student_options = [f"{row['name']} ({row['roll_no']})" for _, row in students_df.iterrows()]
398
+ selected_student_str = st.selectbox("Select Student", student_options, index=None, placeholder="Type to search...")
399
+
400
+ if selected_student_str:
401
+ # Extract Roll
402
+ roll_no = selected_student_str.split("(")[-1].strip(")")
403
+
404
+ st.markdown("---")
405
+ st.subheader(f"Profile: {selected_student_str}")
406
+
407
+ # Fetch History
408
+ history_query = """
409
+ SELECT e.company_name, e.event_type, e.topic_url
410
+ FROM event_students es
411
+ JOIN students s ON es.student_id = s.id
412
+ JOIN events e ON es.event_id = e.id
413
+ WHERE s.roll_no = ?
414
+ ORDER BY e.event_type, e.company_name
415
+ """
416
+ history = pd.read_sql(history_query, conn, params=[roll_no])
417
+
418
+ if not history.empty:
419
+ # Summary Metrics
420
+ offers = history[history['event_type'].str.contains('Offer', case=False)]
421
+ interviews = history[history['event_type'].str.contains('Interview', case=False)]
422
+ tests = history[history['event_type'].str.contains('Test', case=False)]
423
+
424
+ m1, m2, m3 = st.columns(3)
425
+ m1.metric("Offers", len(offers))
426
+ m2.metric("Interviews", len(interviews))
427
+ m3.metric("Tests", len(tests))
428
+
429
+ # Detailed Timeline
430
+ st.write("#### πŸ“… Event Timeline")
431
+
432
+ # Group by type for cleaner view
433
+ for etype in history['event_type'].unique():
434
+ with st.expander(f"{etype} ({len(history[history['event_type']==etype])})", expanded=True):
435
+ subset = history[history['event_type'] == etype]
436
+ for _, row in subset.iterrows():
437
+ # Markdown list with link
438
+ if row['topic_url']:
439
+ st.markdown(f"- [{row['company_name']}]({row['topic_url']})")
440
+ else:
441
+ st.markdown(f"- {row['company_name']}")
442
+ else:
443
+ st.info("No recorded events for this student.")
444
+
445
+ conn.close()
446
+
447
+ # --- TAB 3: COMPANY EXPLORER ---
448
+ with tab3:
449
+ st.header("🏒 Company Explorer")
450
+ conn = get_db_connection()
451
+
452
+ # 1. Company Selector
453
+ companies = pd.read_sql("SELECT DISTINCT company_name FROM events ORDER BY company_name", conn)['company_name'].tolist()
454
+ if not companies:
455
+ st.warning("No companies found.")
456
+ else:
457
+ selected_company = st.selectbox("Select Company", companies, index=None, placeholder="Choose a company...")
458
+
459
+ if selected_company:
460
+ st.markdown("---")
461
+ st.subheader(f"Results for: {selected_company}")
462
+
463
+ # Fetch relevant events and students
464
+ # We need to distinguish between FT and Intern
465
+
466
+ # Get IDs of events for this company
467
+ events_df = pd.read_sql("SELECT id, event_type, topic_url FROM events WHERE company_name = ?", conn, params=[selected_company])
468
+
469
+ if events_df.empty:
470
+ st.info("No events found for this company.")
471
+ else:
472
+ # Separate Full-Time and Internship Events
473
+ ft_events_df = events_df[~events_df['event_type'].str.contains("Internship|Intern", case=False, regex=True)]
474
+ intern_events_df = events_df[events_df['event_type'].str.contains("Internship|Intern", case=False, regex=True)]
475
+
476
+ def display_events_table(events_subset, section_title):
477
+ if events_subset.empty:
478
+ return
479
+
480
+ st.subheader(section_title)
481
+ # Get unique event types in this subset
482
+ unique_types = events_subset['event_type'].unique()
483
+
484
+ for etype in sorted(unique_types):
485
+ # Filter events for this specific type
486
+ matched_ids = events_subset[events_subset['event_type'] == etype]['id'].tolist()
487
+
488
+ # Query students
489
+ placeholders = ','.join(['?'] * len(matched_ids))
490
+ q = f"""
491
+ SELECT DISTINCT s.name, s.roll_no, s.branch, s.year, e.event_type, e.topic_url
492
+ FROM event_students es
493
+ JOIN students s ON es.student_id = s.id
494
+ JOIN events e ON es.event_id = e.id
495
+ WHERE es.event_id IN ({placeholders})
496
+ ORDER BY s.name
497
+ """
498
+
499
+ results = pd.read_sql(q, conn, params=matched_ids)
500
+
501
+ if not results.empty:
502
+ with st.expander(f"{etype} ({len(results)})", expanded=False):
503
+ # Show Source Link if available
504
+ links = events_subset[events_subset['event_type'] == etype]['topic_url'].unique()
505
+ if len(links) > 0 and links[0]:
506
+ st.markdown(f"πŸ”— **[View Original Forum Post]({links[0]})**")
507
+
508
+ display_df = results[['name', 'roll_no', 'branch', 'year']].copy()
509
+ display_df.columns = ["Name", "Roll No", "Branch", "Year"]
510
+ st.dataframe(display_df, hide_index=True, use_container_width=True)
511
+
512
+ display_events_table(ft_events_df, "πŸŽ“ Full-Time")
513
+ display_events_table(intern_events_df, "πŸ’Ό Internship")
514
 
515
+ conn.close()