ankitkr9911 commited on
Commit
8fc9a71
Β·
verified Β·
1 Parent(s): 1bfc480

Update pages/1_real_time_prediction.py

Browse files
Files changed (1) hide show
  1. pages/1_real_time_prediction.py +189 -263
pages/1_real_time_prediction.py CHANGED
@@ -1,28 +1,19 @@
1
  import streamlit as st
 
 
2
  import av
3
- import cv2
4
- import numpy as np
5
- import pandas as pd
6
  import time
7
- import queue
8
- import logging
9
  from datetime import datetime
10
- from pathlib import Path
11
- from typing import List, NamedTuple
12
-
13
- from streamlit_webrtc import webrtc_streamer, WebRtcMode
14
-
15
- # Configure logging
16
- logger = logging.getLogger(__name__)
17
 
18
  # Page configuration
19
  st.set_page_config(
20
- page_title="Live Attendance System",
21
  page_icon="🟒",
22
  layout="wide"
23
  )
24
 
25
- # Apply custom CSS
26
  st.markdown("""
27
  <style>
28
  .main-header {
@@ -66,301 +57,236 @@ st.markdown("""
66
  70% { box-shadow: 0 0 0 10px rgba(76, 175, 80, 0); }
67
  100% { box-shadow: 0 0 0 0 rgba(76, 175, 80, 0); }
68
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  </style>
70
  """, unsafe_allow_html=True)
71
 
72
  # Header
73
- st.markdown("<h1 class='main-header'>🟒 Live Attendance System</h1>", unsafe_allow_html=True)
74
 
75
- # Initialize session state variables if not already set
76
- if 'last_log_time' not in st.session_state:
77
- st.session_state.last_log_time = time.time()
78
- if 'attendance_logs' not in st.session_state:
79
- st.session_state.attendance_logs = []
80
- if 'demo_face_db' not in st.session_state:
81
- # Create a demo database for testing
82
- st.session_state.demo_face_db = pd.DataFrame({
83
- 'Name': ['John Doe', 'Jane Smith', 'Alex Johnson', 'Maria Garcia'],
84
- 'Role': ['Student', 'Teacher', 'Student', 'Student'],
85
- 'Facial Feature': [np.random.rand(128) for _ in range(4)] # Dummy facial embeddings
86
- })
 
 
 
 
 
 
 
 
 
 
 
87
 
88
- # For thread safety, use a queue
89
- result_queue = queue.Queue()
90
-
91
- # Define a named tuple for face detection results
92
- class FaceDetection(NamedTuple):
93
- name: str
94
- role: str
95
- confidence: float
96
- box: np.ndarray
97
-
98
- # Main layout
99
  left_col, right_col = st.columns([3, 2])
100
 
101
  with left_col:
102
  st.markdown("<div class='card'>", unsafe_allow_html=True)
103
- st.markdown("<h2>πŸ“Ή Live Recognition Feed</h2>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- # Recognition settings
106
  with st.expander("Recognition Settings"):
 
107
  confidence_threshold = st.slider("Recognition Confidence Threshold", 0.3, 0.9, 0.5, 0.05)
108
- log_interval = st.slider("Log Update Interval (seconds)", 5, 60, 30)
109
 
110
- # Load face detection model (using OpenCV's built-in face detector for simplicity)
111
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
 
112
 
113
- # Simple face recognition simulator
114
- def recognize_face(face_img, db):
115
- """Simulate face recognition using random matching for demo purposes"""
116
- # In a real app, you would use a proper face recognition model here
117
- # This is just a placeholder for demonstration
118
- if np.random.random() > 0.3: # 70% chance of "recognizing" a face
119
- person_idx = np.random.randint(0, len(db))
120
- person = db.iloc[person_idx]
121
- confidence = np.random.uniform(0.5, 0.95)
122
- return person['Name'], person['Role'], confidence
123
- return None, None, 0.0
124
-
125
- # Video frame callback
126
  def video_frame_callback(frame):
 
127
  img = frame.to_ndarray(format="bgr24")
128
 
129
- # Convert to grayscale for face detection
130
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
131
-
132
- # Detect faces
133
- faces = face_cascade.detectMultiScale(gray, 1.1, 5)
134
-
135
- detections = []
 
136
 
137
- # Process each detected face
138
- for (x, y, w, h) in faces:
139
- # Draw rectangle around the face
140
- cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
 
 
141
 
142
- # Extract the face ROI
143
- face_roi = img[y:y+h, x:x+w]
144
-
145
- # Recognize face (simulated)
146
- name, role, confidence = recognize_face(face_roi, st.session_state.demo_face_db)
147
-
148
- if name and confidence > confidence_threshold:
149
- # Create detection object
150
- detection = FaceDetection(
151
- name=name,
152
- role=role,
153
- confidence=confidence,
154
- box=np.array([x, y, x+w, y+h])
155
- )
156
- detections.append(detection)
157
-
158
- # Add text labels
159
- label = f"{name}: {confidence:.2f}"
160
- cv2.putText(img, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
161
-
162
- # Check if it's time to log attendance
163
- current_time = time.time()
164
- if current_time - st.session_state.last_log_time >= log_interval:
165
- # Log attendance
166
- for detection in detections:
167
- st.session_state.attendance_logs.append({
168
- 'Name': detection.name,
169
- 'Role': detection.role,
170
- 'Timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
171
- 'Confidence': f"{detection.confidence:.2f}"
172
- })
173
- st.session_state.last_log_time = current_time
174
-
175
- # Put detections in queue for display
176
- result_queue.put(detections)
177
-
178
- return av.VideoFrame.from_ndarray(img, format="bgr24")
179
 
180
- # WebRTC streamer with STUN/TURN config
181
  st.markdown("<div class='webcam-container'>", unsafe_allow_html=True)
182
-
183
- # Define proper RTC configuration with multiple fallback STUN/TURN servers
184
- rtc_configuration = {
185
- "iceServers": [
186
- {"urls": ["stun:stun.l.google.com:19302", "stun:stun1.l.google.com:19302", "stun:stun2.l.google.com:19302"]},
187
- {
188
- "urls": "turn:openrelay.metered.ca:80",
189
- "username": "openrelayproject",
190
- "credential": "openrelayproject"
191
- },
192
- {
193
- "urls": "turn:openrelay.metered.ca:443",
194
- "username": "openrelayproject",
195
- "credential": "openrelayproject"
196
- },
197
- {
198
- "urls": "turn:openrelay.metered.ca:443?transport=tcp",
199
- "username": "openrelayproject",
200
- "credential": "openrelayproject"
201
- }
202
- ],
203
- "iceTransportPolicy": "all"
204
- }
205
-
206
- # Add WebRTC options for better compatibility
207
- webrtc_options = {
208
- "media_stream_constraints": {"video": {"width": {"ideal": 640}, "height": {"ideal": 480}}, "audio": False},
209
- }
210
-
211
- webrtc_ctx = webrtc_streamer(
212
- key="face-recognition",
213
- mode=WebRtcMode.SENDRECV,
214
  video_frame_callback=video_frame_callback,
215
- rtc_configuration=rtc_configuration,
216
- media_stream_constraints=webrtc_options["media_stream_constraints"],
217
- async_processing=True,
218
  )
219
  st.markdown("</div>", unsafe_allow_html=True)
220
 
221
- # Add troubleshooting information
222
- if not webrtc_ctx.state.playing:
223
- st.warning("""
224
- ### Troubleshooting WebRTC Connection
225
- If you're seeing "Connection is taking longer than expected" error:
226
-
227
- 1. **Check your browser**: Make sure you're using Chrome, Firefox, or Edge
228
- 2. **Allow camera access**: When prompted, allow the browser to access your camera
229
- 3. **Network issues**: If behind a firewall or VPN, try disabling it temporarily
230
- 4. **Try refresh**: Sometimes a simple page refresh can fix connection issues
231
-
232
- This application requires camera access to function properly.
233
- """)
234
-
235
- # Display instructions
236
  st.info("""
237
  **Instructions:**
238
  1. Stand in front of the camera
239
  2. Wait for the system to recognize your face
240
  3. Your attendance will be logged automatically
241
- 4. The system records entry times at the specified interval
242
  """)
243
 
244
- # Display detections
245
- if webrtc_ctx.state.playing:
246
- detection_placeholder = st.empty()
247
-
248
- # Similar to the working example, show detected faces in a table
249
- if st.checkbox("Show detected people", value=True):
250
- while True:
251
- try:
252
- result = result_queue.get(timeout=1.0)
253
- if result:
254
- # Convert detections to DataFrame for display
255
- detection_data = []
256
- for det in result:
257
- detection_data.append({
258
- 'Name': det.name,
259
- 'Role': det.role,
260
- 'Confidence': f"{det.confidence:.2f}"
261
- })
262
-
263
- detection_df = pd.DataFrame(detection_data)
264
- detection_placeholder.dataframe(detection_df)
265
- except queue.Empty:
266
- continue
267
-
268
  st.markdown("</div>", unsafe_allow_html=True)
269
 
270
  with right_col:
271
- # Registered users
272
- st.markdown("<div class='card'>", unsafe_allow_html=True)
273
- st.markdown("<h2>πŸ‘₯ Registered Users</h2>", unsafe_allow_html=True)
274
-
275
- # Display user database
276
- st.dataframe(
277
- st.session_state.demo_face_db[['Name', 'Role']],
278
- use_container_width=True,
279
- hide_index=True
280
- )
281
-
282
- # Add new user
283
- with st.expander("Add New User"):
284
- new_name = st.text_input("Name")
285
- new_role = st.selectbox("Role", ["Student", "Teacher"])
286
-
287
- if st.button("Add User"):
288
- if new_name:
289
- # Add new user with random facial features
290
- new_user = pd.DataFrame({
291
- 'Name': [new_name],
292
- 'Role': [new_role],
293
- 'Facial Feature': [np.random.rand(128)]
294
- })
295
- st.session_state.demo_face_db = pd.concat([st.session_state.demo_face_db, new_user], ignore_index=True)
296
- st.success(f"Added user: {new_name}")
297
- else:
298
- st.error("Please enter a name")
299
-
300
- st.markdown("</div>", unsafe_allow_html=True)
301
-
302
- # Recent activity
303
  st.markdown("<div class='card'>", unsafe_allow_html=True)
304
- st.markdown("<h2>πŸ•’ Recent Activity</h2>", unsafe_allow_html=True)
305
 
306
- if st.session_state.attendance_logs:
307
- logs_df = pd.DataFrame(st.session_state.attendance_logs)
308
  st.dataframe(
309
- logs_df[['Name', 'Role', 'Timestamp', 'Confidence']],
310
  use_container_width=True,
311
- hide_index=True
 
312
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
  else:
314
- st.info("No activity logged yet")
315
-
316
- # Clear logs button
317
- if st.button("Clear Logs"):
318
- st.session_state.attendance_logs = []
319
- st.success("Logs cleared")
320
 
321
  st.markdown("</div>", unsafe_allow_html=True)
322
-
323
- # Add alternative method if WebRTC fails
324
- st.sidebar.markdown("## Alternative Mode")
325
- use_fallback = st.sidebar.checkbox("Use Image Upload Mode (if webcam doesn't work)")
326
-
327
- if use_fallback:
328
- st.sidebar.info("Upload a photo to simulate face recognition")
329
- uploaded_file = st.sidebar.file_uploader("Upload image", type=["jpg", "jpeg", "png"])
330
 
331
- if uploaded_file is not None:
332
- # Read image
333
- file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
334
- img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
335
-
336
- # Convert BGR to RGB (for display)
337
- img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
338
-
339
- # Perform face detection
340
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
341
- faces = face_cascade.detectMultiScale(gray, 1.1, 5)
342
 
343
- for (x, y, w, h) in faces:
344
- # Draw rectangle
345
- cv2.rectangle(img_rgb, (x, y), (x+w, y+h), (0, 255, 0), 2)
 
346
 
347
- # Simulate recognition
348
- name, role, confidence = recognize_face(img[y:y+h, x:x+w], st.session_state.demo_face_db)
 
 
349
 
350
- if name and confidence > confidence_threshold:
351
- # Add text labels
352
- label = f"{name}: {confidence:.2f}"
353
- cv2.putText(img_rgb, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
354
-
355
- # Add to logs
356
- st.session_state.attendance_logs.append({
357
- 'Name': name,
358
- 'Role': role,
359
- 'Timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
360
- 'Confidence': f"{confidence:.2f}",
361
- 'Method': 'Image Upload'
362
- })
363
-
364
- # Show image with detections
365
- st.sidebar.image(img_rgb, caption="Processed Image", use_column_width=True)
 
 
 
 
 
 
 
 
 
 
366
 
 
 
 
1
  import streamlit as st
2
+ from home import face_rec
3
+ from streamlit_webrtc import webrtc_streamer
4
  import av
 
 
 
5
  import time
6
+ import pandas as pd
 
7
  from datetime import datetime
 
 
 
 
 
 
 
8
 
9
  # Page configuration
10
  st.set_page_config(
11
+ page_title="Live Attendance | AI Attendance",
12
  page_icon="🟒",
13
  layout="wide"
14
  )
15
 
16
+ # Custom CSS
17
  st.markdown("""
18
  <style>
19
  .main-header {
 
57
  70% { box-shadow: 0 0 0 10px rgba(76, 175, 80, 0); }
58
  100% { box-shadow: 0 0 0 0 rgba(76, 175, 80, 0); }
59
  }
60
+ .inactive {
61
+ background-color: #9e9e9e;
62
+ }
63
+ .section-title {
64
+ font-size: 1.3rem;
65
+ font-weight: 600;
66
+ color: #424242;
67
+ margin-bottom: 15px;
68
+ padding-bottom: 8px;
69
+ border-bottom: 2px solid #e0e0e0;
70
+ }
71
+ .attendance-table {
72
+ font-size: 0.9rem;
73
+ }
74
+ .footer {
75
+ text-align: center;
76
+ margin-top: 30px;
77
+ padding: 20px;
78
+ color: #6c757d;
79
+ border-top: 1px solid #e9ecef;
80
+ }
81
+ .stats-box {
82
+ background-color: #f8f9fa;
83
+ border-radius: 8px;
84
+ padding: 15px;
85
+ text-align: center;
86
+ box-shadow: 0 2px 5px rgba(0,0,0,0.1);
87
+ }
88
+ .stats-number {
89
+ font-size: 1.8rem;
90
+ font-weight: bold;
91
+ color: #4CAF50;
92
+ }
93
+ .stats-label {
94
+ color: #757575;
95
+ font-size: 0.9rem;
96
+ }
97
  </style>
98
  """, unsafe_allow_html=True)
99
 
100
  # Header
101
+ st.markdown("<h1 class='main-header'>🟒 Live Attendance Tracking</h1>", unsafe_allow_html=True)
102
 
103
+ # System status indicators
104
+ col1, col2, col3 = st.columns(3)
105
+ with col1:
106
+ st.markdown("""
107
+ <div class="status-indicator">
108
+ <div class="status-dot active"></div>
109
+ <div>Recognition System: <b>Active</b></div>
110
+ </div>
111
+ """, unsafe_allow_html=True)
112
+ with col2:
113
+ st.markdown("""
114
+ <div class="status-indicator">
115
+ <div class="status-dot active"></div>
116
+ <div>Database Connection: <b>Active</b></div>
117
+ </div>
118
+ """, unsafe_allow_html=True)
119
+ with col3:
120
+ st.markdown("""
121
+ <div class="status-indicator">
122
+ <div class="status-dot active"></div>
123
+ <div>Auto-Logging: <b>Enabled</b></div>
124
+ </div>
125
+ """, unsafe_allow_html=True)
126
 
127
+ # Main content
 
 
 
 
 
 
 
 
 
 
128
  left_col, right_col = st.columns([3, 2])
129
 
130
  with left_col:
131
  st.markdown("<div class='card'>", unsafe_allow_html=True)
132
+ st.markdown("<h2 class='section-title'>πŸ“Ή Live Recognition Feed</h2>", unsafe_allow_html=True)
133
+
134
+ # Retrieve registered data
135
+ with st.spinner('Retrieving data from Redis database...'):
136
+ try:
137
+ redis_face_db = face_rec.retrive_data(name='academy:register')
138
+ if not redis_face_db.empty:
139
+ st.success('βœ… User data retrieved successfully!')
140
+ else:
141
+ st.warning('⚠️ No registered users found in the database.')
142
+ except Exception as e:
143
+ st.error(f"Error retrieving data: {e}")
144
+ redis_face_db = pd.DataFrame()
145
 
146
+ # Configuration options
147
  with st.expander("Recognition Settings"):
148
+ wait_time = st.slider("Log Update Interval (seconds)", 10, 120, 30)
149
  confidence_threshold = st.slider("Recognition Confidence Threshold", 0.3, 0.9, 0.5, 0.05)
 
150
 
151
+ # Initialize real-time prediction
152
+ set_time = time.time()
153
+ realtime_pred = face_rec.RealTimePred()
154
 
155
+ # Video frame callback function
 
 
 
 
 
 
 
 
 
 
 
 
156
  def video_frame_callback(frame):
157
+ global set_time
158
  img = frame.to_ndarray(format="bgr24")
159
 
160
+ # Perform face prediction
161
+ pred_img = realtime_pred.face_prediction(
162
+ img,
163
+ redis_face_db,
164
+ 'Facial Feature',
165
+ ['Name', 'Role'],
166
+ thresh=confidence_threshold
167
+ )
168
 
169
+ # Check if it's time to save logs
170
+ time_now = time.time()
171
+ diff_time = time_now - set_time
172
+ if diff_time >= wait_time:
173
+ realtime_pred.saveLogs_redis()
174
+ set_time = time.time() # Reset timer
175
 
176
+ return av.VideoFrame.from_ndarray(pred_img, format="bgr24")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
+ # Webcam feed with face recognition
179
  st.markdown("<div class='webcam-container'>", unsafe_allow_html=True)
180
+ webrtc_streamer(
181
+ key="realTimePrediction",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
  video_frame_callback=video_frame_callback,
183
+ rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
184
+ media_stream_constraints={"video": True, "audio": False},
 
185
  )
186
  st.markdown("</div>", unsafe_allow_html=True)
187
 
188
+ # Instructions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  st.info("""
190
  **Instructions:**
191
  1. Stand in front of the camera
192
  2. Wait for the system to recognize your face
193
  3. Your attendance will be logged automatically
194
+ 4. The system records entry and exit times
195
  """)
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  st.markdown("</div>", unsafe_allow_html=True)
198
 
199
  with right_col:
200
+ # User database card
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  st.markdown("<div class='card'>", unsafe_allow_html=True)
202
+ st.markdown("<h2 class='section-title'>πŸ‘₯ Registered Users</h2>", unsafe_allow_html=True)
203
 
204
+ # Display registered users
205
+ if not redis_face_db.empty:
206
  st.dataframe(
207
+ redis_face_db[['Name', 'Role']].sort_values('Name'),
208
  use_container_width=True,
209
+ hide_index=True,
210
+ height=200
211
  )
212
+
213
+ # Display statistics
214
+ total_users = len(redis_face_db)
215
+ students = len(redis_face_db[redis_face_db['Role'] == 'Student'])
216
+ teachers = len(redis_face_db[redis_face_db['Role'] == 'Teacher'])
217
+
218
+ st.markdown("<br>", unsafe_allow_html=True)
219
+ stats_cols = st.columns(3)
220
+ with stats_cols[0]:
221
+ st.markdown(f"""
222
+ <div class="stats-box">
223
+ <div class="stats-number">{total_users}</div>
224
+ <div class="stats-label">Total Users</div>
225
+ </div>
226
+ """, unsafe_allow_html=True)
227
+ with stats_cols[1]:
228
+ st.markdown(f"""
229
+ <div class="stats-box">
230
+ <div class="stats-number">{students}</div>
231
+ <div class="stats-label">Students</div>
232
+ </div>
233
+ """, unsafe_allow_html=True)
234
+ with stats_cols[2]:
235
+ st.markdown(f"""
236
+ <div class="stats-box">
237
+ <div class="stats-number">{teachers}</div>
238
+ <div class="stats-label">Teachers</div>
239
+ </div>
240
+ """, unsafe_allow_html=True)
241
  else:
242
+ st.warning("No registered users found in the database.")
 
 
 
 
 
243
 
244
  st.markdown("</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
245
 
246
+ # Recent activity card
247
+ st.markdown("<div class='card'>", unsafe_allow_html=True)
248
+ st.markdown("<h2 class='section-title'>πŸ•’ Recent Activity</h2>", unsafe_allow_html=True)
249
+
250
+ # Load and display recent logs
251
+ try:
252
+ logs_list = face_rec.r.lrange('attendance:logs', 0, 9) # Get last 10 logs
 
 
 
 
253
 
254
+ if logs_list:
255
+ # Convert bytes to string and create dataframe
256
+ logs_string = [log.decode('utf-8').split('@') for log in logs_list]
257
+ logs_df = pd.DataFrame(logs_string, columns=['Name', 'Role', 'Timestamp'])
258
 
259
+ # Format timestamp
260
+ logs_df['Timestamp'] = pd.to_datetime(logs_df['Timestamp'],format='ISO8601')
261
+ logs_df['Time'] = logs_df['Timestamp'].dt.strftime('%H:%M:%S')
262
+ logs_df['Date'] = logs_df['Timestamp'].dt.strftime('%Y-%m-%d')
263
 
264
+ # Display recent logs
265
+ st.dataframe(
266
+ logs_df[['Name', 'Role', 'Time', 'Date']],
267
+ use_container_width=True,
268
+ hide_index=True
269
+ )
270
+ else:
271
+ st.info("No recent activity logged.")
272
+ except Exception as e:
273
+ st.error(f"Error loading logs: {e}")
274
+
275
+ st.markdown("</div>", unsafe_allow_html=True)
276
+
277
+ # Quick actions
278
+ st.markdown("<div class='card'>", unsafe_allow_html=True)
279
+ st.markdown("<h2 class='section-title'>⚑ Quick Actions</h2>", unsafe_allow_html=True)
280
+
281
+ col1, col2 = st.columns(2)
282
+ with col1:
283
+ if st.button("πŸ“Š View Reports", use_container_width=True):
284
+ st.switch_page("pages/report.py")
285
+ with col2:
286
+ if st.button("πŸ“ Add New User", use_container_width=True):
287
+ st.switch_page("pages/page1.py")
288
+
289
+ st.markdown("</div>", unsafe_allow_html=True)
290
 
291
+ # Footer
292
+ st.markdown("<div class='footer'>AI-Powered Attendance System β€’ Β© 2025</div>", unsafe_allow_html=True)