Arnel Gwen Nuqui commited on
Commit
454c384
·
2 Parent(s): fd40825 471a1ec

Restore routes folder from commit 4a63a35

Browse files
routes/__init__.py ADDED
File without changes
routes/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (164 Bytes). View file
 
routes/__pycache__/classification_routes.cpython-311.pyc ADDED
Binary file (10.1 kB). View file
 
routes/__pycache__/video_routes.cpython-311.pyc ADDED
Binary file (11.5 kB). View file
 
routes/__pycache__/webrtc_routes.cpython-311.pyc ADDED
Binary file (36.9 kB). View file
 
routes/classification_routes.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, io, base64, requests
2
+ from pathlib import Path
3
+ from flask import Blueprint, request, jsonify
4
+ import tensorflow as tf
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+ try:
9
+ from tensorflow.keras.applications import mobilenet_v2 as _mv2
10
+ except Exception:
11
+ from keras.applications import mobilenet_v2 as _mv2
12
+
13
+ preprocess_input = _mv2.preprocess_input
14
+ classification_bp = Blueprint('classification_bp', __name__)
15
+
16
+ # ------------------------------------------------------------
17
+ # Model setup and auto-download
18
+ # ------------------------------------------------------------
19
+ MODEL_DIR = Path(os.getenv("MODEL_DIR", "/tmp/model"))
20
+ os.makedirs(MODEL_DIR, exist_ok=True)
21
+
22
+ MODEL_URLS = {
23
+ "model": "https://huggingface.co/Gwen01/ProctorVision-Models/resolve/main/cheating_mobilenetv2_final.keras",
24
+ "threshold": "https://huggingface.co/Gwen01/ProctorVision-Models/resolve/main/best_threshold.npy"
25
+ }
26
+
27
+ MODEL_PATHS = {}
28
+ for key, url in MODEL_URLS.items():
29
+ local_path = MODEL_DIR / Path(url).name
30
+ MODEL_PATHS[key] = str(local_path)
31
+ if not local_path.exists():
32
+ print(f"📥 Downloading {key} from Hugging Face…")
33
+ r = requests.get(url)
34
+ r.raise_for_status()
35
+ with open(local_path, "wb") as f:
36
+ f.write(r.content)
37
+ print(f"✅ Saved {key} → {local_path}")
38
+
39
+ # Candidate filenames for compatibility
40
+ CANDIDATES = [
41
+ "cheating_mobilenetv2_final.keras",
42
+ "mnv2_clean_best.keras",
43
+ "mnv2_continue.keras",
44
+ "mnv2_finetune_best.keras",
45
+ ]
46
+
47
+
48
+ model_path = next((MODEL_DIR / f for f in CANDIDATES if (MODEL_DIR / f).exists()), None)
49
+ if model_path and model_path.exists():
50
+ model = tf.keras.models.load_model(model_path, compile=False)
51
+ print(f"✅ Model loaded: {model_path}")
52
+ else:
53
+ model = None
54
+ print(f"⚠️ No model found in {MODEL_DIR}. Put one of: {CANDIDATES}")
55
+
56
+ # --- Load threshold ---
57
+ thr_file = MODEL_DIR / "best_threshold.npy"
58
+ THRESHOLD = float(np.load(thr_file)[0]) if thr_file.exists() else 0.555
59
+ print(f"📊 Using decision threshold: {THRESHOLD:.3f}")
60
+
61
+ # --- Input shape ---
62
+ if model is not None:
63
+ H, W = model.input_shape[1:3]
64
+ else:
65
+ H, W = 224, 224 # fallback
66
+
67
+ LABELS = ["Cheating", "Not Cheating"]
68
+
69
+ # ------------------------------------------------------------
70
+ # Helper Functions
71
+ # ------------------------------------------------------------
72
+ def preprocess_pil(pil_img: Image.Image) -> np.ndarray:
73
+ img = pil_img.convert("RGB")
74
+ if img.size != (W, H):
75
+ img = img.resize((W, H), Image.BILINEAR)
76
+ x = np.asarray(img, dtype=np.float32)
77
+ x = preprocess_input(x)
78
+ return np.expand_dims(x, 0)
79
+
80
+ def predict_batch(batch_np: np.ndarray) -> np.ndarray:
81
+ probs = model.predict(batch_np, verbose=0).ravel()
82
+ if probs.ndim == 0:
83
+ probs = np.array([probs])
84
+ if len(probs) != batch_np.shape[0]:
85
+ raw = model.predict(batch_np, verbose=0)
86
+ if raw.ndim == 2 and raw.shape[1] == 2:
87
+ probs = raw[:, 1] # probability of "Not Cheating"
88
+ else:
89
+ probs = raw.ravel()
90
+ return probs
91
+
92
+ def label_from_prob(prob_non_cheating: float) -> str:
93
+ return LABELS[int(prob_non_cheating >= THRESHOLD)]
94
+
95
+ # ------------------------------------------------------------
96
+ # Environment Variables
97
+ # ------------------------------------------------------------
98
+ RAILWAY_API = os.getenv("RAILWAY_API", "").rstrip("/")
99
+ if not RAILWAY_API:
100
+ print("⚠️ WARNING: RAILWAY_API not set — backend sync will fail.")
101
+
102
+ # ------------------------------------------------------------
103
+ # Route 1 — Classify uploaded multiple files (manual)
104
+ # ------------------------------------------------------------
105
+ @classification_bp.route('/classify_multiple', methods=['POST'])
106
+ def classify_multiple():
107
+ if model is None:
108
+ return jsonify({"error": "Model not loaded."}), 500
109
+
110
+ files = request.files.getlist('files') if 'files' in request.files else []
111
+ if not files:
112
+ return jsonify({"error": "No files uploaded"}), 400
113
+
114
+ batch = []
115
+ for f in files:
116
+ try:
117
+ pil = Image.open(io.BytesIO(f.read()))
118
+ batch.append(preprocess_pil(pil)[0])
119
+ except Exception as e:
120
+ return jsonify({"error": f"Error reading image: {str(e)}"}), 400
121
+
122
+ batch_np = np.stack(batch, axis=0)
123
+ probs = predict_batch(batch_np)
124
+ labels = [label_from_prob(p) for p in probs]
125
+
126
+ return jsonify({
127
+ "threshold": THRESHOLD,
128
+ "results": [{"label": lbl, "prob_non_cheating": float(p)} for lbl, p in zip(labels, probs)]
129
+ })
130
+
131
+ # ------------------------------------------------------------
132
+ # Route 2 — Auto-classify Behavior Logs (Backend-to-Backend)
133
+ # ------------------------------------------------------------
134
+ @classification_bp.route('/classify_behavior_logs', methods=['POST'])
135
+ def classify_behavior_logs():
136
+ if model is None:
137
+ return jsonify({"error": "Model not loaded."}), 500
138
+
139
+ data = request.get_json(silent=True) or {}
140
+ user_id = data.get('user_id')
141
+ exam_id = data.get('exam_id')
142
+ if not user_id or not exam_id:
143
+ return jsonify({"error": "Missing user_id or exam_id"}), 400
144
+
145
+ # --- Fetch behavior logs from Railway ---
146
+ try:
147
+ fetch_url = f"{RAILWAY_API}/api/fetch_behavior_logs"
148
+ response = requests.get(fetch_url, params={"user_id": user_id, "exam_id": exam_id})
149
+ if response.status_code != 200:
150
+ return jsonify({"error": f"Failed to fetch logs: {response.text}"}), 500
151
+
152
+ logs = response.json().get("logs", [])
153
+ if not logs:
154
+ return jsonify({"message": "No logs to classify."}), 200
155
+ except Exception as e:
156
+ return jsonify({"error": f"Failed to reach Railway API: {str(e)}"}), 500
157
+
158
+ # --- Process & Predict ---
159
+ updates = []
160
+ CHUNK = 64
161
+ for i in range(0, len(logs), CHUNK):
162
+ chunk = logs[i:i+CHUNK]
163
+ batch = []
164
+ ids = []
165
+
166
+ for log in chunk:
167
+ try:
168
+ img_data = base64.b64decode(log["image_base64"])
169
+ pil = Image.open(io.BytesIO(img_data))
170
+ batch.append(preprocess_pil(pil)[0])
171
+ ids.append(log["id"])
172
+ except Exception as e:
173
+ print(f"⚠️ Failed to read image ID {log['id']}: {e}")
174
+
175
+ if not batch:
176
+ continue
177
+
178
+ batch_np = np.stack(batch, axis=0)
179
+ probs = predict_batch(batch_np)
180
+ labels = [label_from_prob(p) for p in probs]
181
+
182
+ for log_id, lbl in zip(ids, labels):
183
+ updates.append({"id": log_id, "label": lbl})
184
+
185
+ # --- Send predictions back to Railway ---
186
+ try:
187
+ update_url = f"{RAILWAY_API}/api/update_classifications"
188
+ post_res = requests.post(update_url, json={"updates": updates})
189
+ if post_res.status_code != 200:
190
+ return jsonify({"error": f"Failed to update classifications: {post_res.text}"}), 500
191
+ except Exception as e:
192
+ return jsonify({"error": f"Failed to push updates: {str(e)}"}), 500
193
+
194
+ return jsonify({
195
+ "message": f"Classification complete for {len(updates)} logs.",
196
+ "threshold": THRESHOLD
197
+ }), 200
routes/webrtc_routes.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio, time, traceback, os, threading, base64, cv2, numpy as np, mediapipe as mp, requests
2
+ from collections import defaultdict, deque
3
+ from aiortc import RTCPeerConnection, RTCSessionDescription
4
+ from aiortc.contrib.media import MediaBlackhole
5
+ from flask import Blueprint, request, jsonify
6
+
7
+ # ----------------------------------------------------------------------
8
+ # CONFIGURATION
9
+ # ----------------------------------------------------------------------
10
+ webrtc_bp = Blueprint("webrtc", __name__)
11
+
12
+ # Base URL of your main (Railway) backend
13
+ RAILWAY_API = os.getenv("RAILWAY_API", "").rstrip("/")
14
+ if not RAILWAY_API:
15
+ print("⚠️ WARNING: RAILWAY_API not set — backend communication may fail.")
16
+
17
+ SUMMARY_EVERY_S = float(os.getenv("PROCTOR_SUMMARY_EVERY_S", "1.0"))
18
+ RECV_TIMEOUT_S = float(os.getenv("PROCTOR_RECV_TIMEOUT_S", "5.0"))
19
+ HEARTBEAT_S = float(os.getenv("PROCTOR_HEARTBEAT_S", "10.0"))
20
+
21
+ # ----------------------------------------------------------------------
22
+ # LOGGING UTIL
23
+ # ----------------------------------------------------------------------
24
+ def log(event, sid="-", eid="-", **kv):
25
+ tail = " ".join(f"{k}={v}" for k, v in kv.items())
26
+ print(f"[{event}] sid={sid} eid={eid} {tail}".strip(), flush=True)
27
+
28
+ # ----------------------------------------------------------------------
29
+ # HELPER: send background POST to Railway backend
30
+ # ----------------------------------------------------------------------
31
+ def _send_to_railway(endpoint, payload, sid, eid):
32
+ """Send POST requests asynchronously to Railway backend."""
33
+ def _worker():
34
+ try:
35
+ url = f"{RAILWAY_API}{endpoint}"
36
+ r = requests.post(url, json=payload, timeout=10)
37
+ if r.status_code != 200:
38
+ log("RAILWAY_POST_FAIL", sid, eid, code=r.status_code, msg=r.text)
39
+ except Exception as e:
40
+ log("RAILWAY_POST_ERR", sid, eid, err=str(e))
41
+ threading.Thread(target=_worker, daemon=True).start()
42
+
43
+ # ----------------------------------------------------------------------
44
+ # GLOBAL STATE
45
+ # ----------------------------------------------------------------------
46
+ _loop = asyncio.new_event_loop()
47
+ threading.Thread(target=_loop.run_forever, daemon=True).start()
48
+ pcs = set()
49
+ last_warning = defaultdict(lambda: {"warning": "Looking Forward", "at": 0})
50
+ last_capture = defaultdict(lambda: {"label": None, "at": 0})
51
+ last_metrics = defaultdict(lambda: {"yaw": None, "pitch": None, "dx": None, "dy": None,
52
+ "fps": None, "label": "n/a", "at": 0})
53
+
54
+ # ----------------------------------------------------------------------
55
+ # MEDIAPIPE SETUP
56
+ # ----------------------------------------------------------------------
57
+ mp_face_mesh = mp.solutions.face_mesh
58
+ mp_hands = mp.solutions.hands
59
+
60
+ face_mesh = mp_face_mesh.FaceMesh(
61
+ static_image_mode=False, max_num_faces=1, refine_landmarks=True,
62
+ min_detection_confidence=0.6, min_tracking_confidence=0.6
63
+ )
64
+ hands = mp_hands.Hands(
65
+ static_image_mode=False, max_num_hands=2,
66
+ min_detection_confidence=0.6, min_tracking_confidence=0.6
67
+ )
68
+
69
+ # ----------------------------------------------------------------------
70
+ # DETECTOR CLASS
71
+ # ----------------------------------------------------------------------
72
+ IDX_NOSE, IDX_CHIN, IDX_LE, IDX_RE, IDX_LM, IDX_RM = 1, 152, 263, 33, 291, 61
73
+ MODEL_3D = np.array([
74
+ [0.0, 0.0, 0.0],
75
+ [0.0, -63.6, -12.5],
76
+ [-43.3, 32.7, -26.0],
77
+ [43.3, 32.7, -26.0],
78
+ [-28.9, -28.9, -24.1],
79
+ [28.9, -28.9, -24.1],
80
+ ], dtype=np.float32)
81
+
82
+ def _landmarks_to_pts(lms, w, h):
83
+ ids = [IDX_NOSE, IDX_CHIN, IDX_LE, IDX_RE, IDX_LM, IDX_RM]
84
+ return np.array([[lms[i].x * w, lms[i].y * h] for i in ids], dtype=np.float32)
85
+
86
+ def _bbox_from_landmarks(lms, w, h, pad=0.03):
87
+ xs = [p.x for p in lms]; ys = [p.y for p in lms]
88
+ x1n, y1n = max(0.0, min(xs) - pad), max(0.0, min(ys) - pad)
89
+ x2n, y2n = min(1.0, max(xs) + pad), min(1.0, max(ys) + pad)
90
+ return (int(x1n*w), int(y1n*h), int(x2n*w), int(y2n*h))
91
+
92
+ # Thresholds
93
+ YAW_DEG_TRIG, PITCH_UP, PITCH_DOWN = 12, 10, 16
94
+ DX_TRIG, DY_UP, DY_DOWN = 0.06, 0.08, 0.10
95
+ SMOOTH_N, CAPTURE_MIN_MS = 5, 1200
96
+ HOLD_FRAMES_HEAD, HOLD_FRAMES_NOFACE, HOLD_FRAMES_HAND = 3, 3, 5
97
+
98
+ class ProctorDetector:
99
+ def __init__(self):
100
+ self.yaw_hist, self.pitch_hist, self.dx_hist, self.dy_hist = deque(maxlen=SMOOTH_N), deque(maxlen=SMOOTH_N), deque(maxlen=SMOOTH_N), deque(maxlen=SMOOTH_N)
101
+ self.base_yaw = self.base_pitch = None
102
+ self.last_capture_ms, self.noface_streak, self.hand_streak = 0, 0, 0
103
+ self.last_print = 0.0
104
+
105
+ def _pose_angles(self, lms, w, h):
106
+ try:
107
+ pts2d = _landmarks_to_pts(lms, w, h)
108
+ cam = np.array([[w, 0, w/2], [0, w, h/2], [0, 0, 1]], dtype=np.float32)
109
+ ok, rvec, _ = cv2.solvePnP(MODEL_3D, pts2d, cam, np.zeros((4,1)))
110
+ if not ok: return None, None
111
+ R, _ = cv2.Rodrigues(rvec)
112
+ _, _, euler = cv2.RQDecomp3x3(R)
113
+ pitch, yaw, _ = map(float, euler)
114
+ return yaw, pitch
115
+ except Exception:
116
+ return None, None
117
+
118
+ def detect(self, bgr, sid="-", eid="-"):
119
+ h, w = bgr.shape[:2]
120
+ rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
121
+ res = face_mesh.process(rgb)
122
+ if not res.multi_face_landmarks:
123
+ log("FRAME", sid, eid, note="no_face")
124
+ self.noface_streak += 1
125
+ return "No Face", None, rgb
126
+ self.noface_streak = 0
127
+ lms = res.multi_face_landmarks[0].landmark
128
+ yaw, pitch = self._pose_angles(lms, w, h)
129
+ label = "Looking Forward"
130
+ if yaw and abs(yaw) > YAW_DEG_TRIG: label = "Looking Left" if yaw < 0 else "Looking Right"
131
+ if pitch and pitch > PITCH_DOWN: label = "Looking Down"
132
+ if pitch and -pitch > PITCH_UP: label = "Looking Up"
133
+ return label, _bbox_from_landmarks(lms, w, h), rgb
134
+
135
+ def detect_hands_anywhere(self, rgb):
136
+ res = hands.process(rgb)
137
+ if not res.multi_hand_landmarks:
138
+ self.hand_streak = 0
139
+ return None
140
+ self.hand_streak += 1
141
+ return "Hand Detected"
142
+
143
+ def _throttle_ok(self):
144
+ return int(time.time()*1000) - self.last_capture_ms >= CAPTURE_MIN_MS
145
+ def _mark_captured(self): self.last_capture_ms = int(time.time()*1000)
146
+
147
+ detectors = defaultdict(ProctorDetector)
148
+
149
+ # ----------------------------------------------------------------------
150
+ # CAPTURE HANDLER — NOW CALLS RAILWAY API
151
+ # ----------------------------------------------------------------------
152
+ def _maybe_capture(student_id: str, exam_id: str, bgr, label: str):
153
+ ok, buf = cv2.imencode(".jpg", bgr)
154
+ if not ok:
155
+ log("CAPTURE_SKIP", student_id, exam_id, reason="encode_failed")
156
+ return
157
+
158
+ img_b64 = base64.b64encode(buf).decode("utf-8")
159
+ log("CAPTURE_ENQUEUE", student_id, exam_id, label=label, bytes=len(buf))
160
+
161
+ # 👉 send to Railway backend instead of local DB
162
+ _send_to_railway("/api/save_behavior_log", {
163
+ "user_id": int(student_id),
164
+ "exam_id": int(exam_id),
165
+ "image_base64": img_b64,
166
+ "warning_type": label
167
+ }, student_id, exam_id)
168
+
169
+ _send_to_railway("/api/increment_suspicious", {
170
+ "student_id": int(student_id)
171
+ }, student_id, exam_id)
172
+
173
+ ts = int(time.time() * 1000)
174
+ last_capture[(student_id, exam_id)] = {"label": label, "at": ts}
175
+ log("LAST_CAPTURE_SET", student_id, exam_id, label=label, at=ts)
176
+
177
+ # ----------------------------------------------------------------------
178
+ # WEBRTC OFFER HANDLER
179
+ # ----------------------------------------------------------------------
180
+ async def _wait_ice_complete(pc):
181
+ if pc.iceGatheringState == "complete": return
182
+ done = asyncio.Event()
183
+ @pc.on("icegatheringstatechange")
184
+ def _(_ev=None):
185
+ if pc.iceGatheringState == "complete": done.set()
186
+ await asyncio.wait_for(done.wait(), timeout=5.0)
187
+
188
+ async def handle_offer(data):
189
+ sid, eid = str(data.get("student_id", "0")), str(data.get("exam_id", "0"))
190
+ log("OFFER_HANDLE", sid, eid)
191
+ offer = RTCSessionDescription(sdp=data["sdp"], type=data["type"])
192
+ pc = RTCPeerConnection()
193
+ pcs.add(pc)
194
+
195
+ @pc.on("connectionstatechange")
196
+ async def _():
197
+ if pc.connectionState in ("failed", "closed", "disconnected"):
198
+ await pc.close()
199
+ pcs.discard(pc)
200
+ for d in (detectors, last_warning, last_metrics, last_capture):
201
+ d.pop((sid, eid), None)
202
+ log("PC_CLOSED", sid, eid)
203
+
204
+ @pc.on("track")
205
+ def on_track(track):
206
+ log("TRACK", sid, eid, kind=track.kind)
207
+ if track.kind != "video":
208
+ MediaBlackhole().addTrack(track)
209
+ return
210
+ async def reader():
211
+ det = detectors[(sid, eid)]
212
+ while True:
213
+ try:
214
+ frame = await asyncio.wait_for(track.recv(), timeout=RECV_TIMEOUT_S)
215
+ except Exception as e:
216
+ log("TRACK_RECV_ERR", sid, eid, err=str(e))
217
+ break
218
+ try:
219
+ bgr = frame.to_ndarray(format="bgr24")
220
+ head_label, _, rgb = det.detect(bgr, sid, eid)
221
+ hand_label = det.detect_hands_anywhere(rgb)
222
+ warn = hand_label or head_label
223
+ ts = int(time.time() * 1000)
224
+ last_warning[(sid, eid)] = {"warning": warn, "at": ts}
225
+ if det._throttle_ok() and warn not in ("Looking Forward", None):
226
+ _maybe_capture(sid, eid, bgr, warn)
227
+ det._mark_captured()
228
+ except Exception as e:
229
+ log("DETECT_ERR", sid, eid, err=str(e))
230
+ continue
231
+ asyncio.ensure_future(reader(), loop=_loop)
232
+
233
+ await pc.setRemoteDescription(offer)
234
+ answer = await pc.createAnswer()
235
+ await pc.setLocalDescription(answer)
236
+ await _wait_ice_complete(pc)
237
+ return pc.localDescription
238
+
239
+ # ----------------------------------------------------------------------
240
+ # ROUTES
241
+ # ----------------------------------------------------------------------
242
+ @webrtc_bp.route("/webrtc/offer", methods=["POST"])
243
+ def webrtc_offer():
244
+ try:
245
+ data = request.get_json(force=True)
246
+ desc = asyncio.run_coroutine_threadsafe(handle_offer(data), _loop).result()
247
+ return jsonify({"sdp": desc.sdp, "type": desc.type})
248
+ except Exception as e:
249
+ traceback.print_exc()
250
+ return jsonify({"error": str(e)}), 500
251
+
252
+ @webrtc_bp.route("/webrtc/cleanup", methods=["POST"])
253
+ def webrtc_cleanup():
254
+ async def _close_all():
255
+ for pc in list(pcs):
256
+ await pc.close()
257
+ pcs.discard(pc)
258
+ asyncio.run_coroutine_threadsafe(_close_all(), _loop)
259
+ return jsonify({"ok": True})
260
+
261
+ @webrtc_bp.route("/proctor/last_warning")
262
+ def proctor_last_warning():
263
+ sid, eid = request.args.get("student_id"), request.args.get("exam_id")
264
+ if not sid or not eid:
265
+ return jsonify(error="missing student_id or exam_id"), 400
266
+ return jsonify(last_warning.get((sid, eid), {"warning": "Looking Forward", "at": 0}))
267
+
268
+ @webrtc_bp.route("/proctor/last_capture")
269
+ def proctor_last_capture():
270
+ sid, eid = request.args.get("student_id"), request.args.get("exam_id")
271
+ if not sid or not eid:
272
+ return jsonify(error="missing student_id or exam_id"), 400
273
+ return jsonify(last_capture.get((sid, eid), {"label": None, "at": 0}))