Zhen Ye Claude Opus 4.6 commited on
Commit
f90111a
·
1 Parent(s): a5d0fd8

refactor: strip threat fields from all frontend and backend code

Browse files

Remove all references to threat_level_score, threat_classification,
weapon_readiness, gpt_distance_m, gpt_direction, gpt_description,
HEL imports, syncKnobDisplays, recomputeHEL, /chat/threat endpoint,
and chatAboutThreats client function across 14 files.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

app.py CHANGED
@@ -57,8 +57,7 @@ from jobs.storage import (
57
  get_job_storage,
58
  get_output_video_path,
59
  )
60
- from utils.gpt_reasoning import estimate_threat_gpt
61
- from utils.threat_chat import chat_about_threats
62
  from utils.relevance import evaluate_relevance
63
  from utils.enrichment import run_enrichment
64
  from utils.schemas import AssessmentStatus
@@ -656,9 +655,8 @@ async def analyze_frame(
656
  detections: str = Form(...),
657
  job_id: str = Form(None),
658
  ):
659
- """Run GPT threat assessment on a single video frame."""
660
  import json as json_module
661
- from utils.gpt_reasoning import encode_frame_to_b64
662
 
663
  dets = json_module.loads(detections)
664
 
@@ -677,13 +675,12 @@ async def analyze_frame(
677
  raise HTTPException(status_code=400, detail="Invalid image")
678
 
679
  # Run GPT in thread pool (blocking OpenAI API call)
680
- frame_b64 = encode_frame_to_b64(frame)
681
  async with _GPT_SEMAPHORE:
682
  gpt_results = await asyncio.to_thread(
683
- estimate_threat_gpt,
684
  detections=dets,
685
  mission_spec=mission_spec,
686
- image_b64=frame_b64,
687
  )
688
 
689
  # Merge GPT results into detection records
@@ -693,12 +690,8 @@ async def analyze_frame(
693
  payload = gpt_results[oid]
694
  d["gpt_raw"] = payload
695
  d["assessment_status"] = payload.get("assessment_status", "ASSESSED")
696
- d["threat_level_score"] = payload.get("threat_level_score", 0)
697
- d["threat_classification"] = payload.get("threat_classification", "Unknown")
698
- d["weapon_readiness"] = payload.get("weapon_readiness", "Unknown")
699
- d["gpt_description"] = payload.get("gpt_description")
700
- d["gpt_distance_m"] = payload.get("gpt_distance_m")
701
- d["gpt_direction"] = payload.get("gpt_direction")
702
 
703
  return dets
704
 
@@ -914,7 +907,7 @@ async def reason_track(
914
  # For high concurrency, might want to offload to threadpool or async wrapper.
915
  try:
916
  async with _GPT_SEMAPHORE:
917
- results = await asyncio.to_thread(estimate_threat_gpt, input_path, track_list)
918
  logging.info(f"GPT Output for Video Track Update:\n{results}")
919
  except Exception as e:
920
  logging.exception("GPT reasoning failed")
@@ -925,56 +918,6 @@ async def reason_track(
925
  return results
926
 
927
 
928
- @app.post("/chat/threat")
929
- async def chat_threat_endpoint(
930
- question: str = Form(...),
931
- detections: str = Form(...), # JSON string of current detections
932
- mission_context: str = Form(""), # Optional JSON string of mission spec
933
- ):
934
- """
935
- Chat about detected threats using GPT.
936
-
937
- Args:
938
- question: User's question about the current threat situation.
939
- detections: JSON string of detection list with threat analysis data.
940
- mission_context: Optional JSON string of mission specification.
941
-
942
- Returns:
943
- GPT response about the threats.
944
- """
945
- import json as json_module
946
-
947
- if not question.strip():
948
- raise HTTPException(status_code=400, detail="Question cannot be empty.")
949
-
950
- try:
951
- detection_list = json_module.loads(detections)
952
- except json_module.JSONDecodeError:
953
- raise HTTPException(status_code=400, detail="Invalid detections JSON.")
954
-
955
- if not isinstance(detection_list, list):
956
- raise HTTPException(status_code=400, detail="Detections must be a list.")
957
-
958
- # Parse optional mission context
959
- mission_spec_dict = None
960
- if mission_context.strip():
961
- try:
962
- mission_spec_dict = json_module.loads(mission_context)
963
- except json_module.JSONDecodeError:
964
- pass # Non-critical, proceed without mission context
965
-
966
- # Run chat in thread to avoid blocking (with concurrency limit)
967
- try:
968
- async with _GPT_SEMAPHORE:
969
- response = await asyncio.to_thread(
970
- chat_about_threats, question, detection_list, mission_spec_dict
971
- )
972
- return {"response": response}
973
- except Exception as e:
974
- logging.exception("Threat chat failed")
975
- raise HTTPException(status_code=500, detail=str(e))
976
-
977
-
978
  @app.post("/benchmark")
979
  async def benchmark_endpoint(
980
  video: UploadFile = File(...),
 
57
  get_job_storage,
58
  get_output_video_path,
59
  )
60
+ from utils.gpt_reasoning import evaluate_satisfaction_gpt
 
61
  from utils.relevance import evaluate_relevance
62
  from utils.enrichment import run_enrichment
63
  from utils.schemas import AssessmentStatus
 
655
  detections: str = Form(...),
656
  job_id: str = Form(None),
657
  ):
658
+ """Run GPT satisfaction evaluation on a single video frame."""
659
  import json as json_module
 
660
 
661
  dets = json_module.loads(detections)
662
 
 
675
  raise HTTPException(status_code=400, detail="Invalid image")
676
 
677
  # Run GPT in thread pool (blocking OpenAI API call)
 
678
  async with _GPT_SEMAPHORE:
679
  gpt_results = await asyncio.to_thread(
680
+ evaluate_satisfaction_gpt,
681
  detections=dets,
682
  mission_spec=mission_spec,
683
+ frame_data=frame,
684
  )
685
 
686
  # Merge GPT results into detection records
 
690
  payload = gpt_results[oid]
691
  d["gpt_raw"] = payload
692
  d["assessment_status"] = payload.get("assessment_status", "ASSESSED")
693
+ d["satisfies"] = payload.get("satisfies")
694
+ d["reason"] = payload.get("reason")
 
 
 
 
695
 
696
  return dets
697
 
 
907
  # For high concurrency, might want to offload to threadpool or async wrapper.
908
  try:
909
  async with _GPT_SEMAPHORE:
910
+ results = await asyncio.to_thread(evaluate_satisfaction_gpt, input_path, track_list)
911
  logging.info(f"GPT Output for Video Track Update:\n{results}")
912
  except Exception as e:
913
  logging.exception("GPT reasoning failed")
 
918
  return results
919
 
920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
921
  @app.post("/benchmark")
922
  async def benchmark_endpoint(
923
  video: UploadFile = File(...),
coco_classes.py CHANGED
@@ -105,56 +105,6 @@ def _normalize(label: str) -> str:
105
 
106
 
107
  _CANONICAL_LOOKUP: Dict[str, str] = {_normalize(name): name for name in COCO_CLASSES}
108
- _COCO_SYNONYMS: Dict[str, str] = {
109
- "people": "person",
110
- "man": "person",
111
- "woman": "person",
112
- "men": "person",
113
- "women": "person",
114
- "pedestrian": "person",
115
- "soldier": "person",
116
- "infantry": "person",
117
- "civilian": "person",
118
- "motorbike": "motorcycle",
119
- "motor bike": "motorcycle",
120
- "bike": "bicycle",
121
- "aircraft": "airplane",
122
- "plane": "airplane",
123
- "jet": "airplane",
124
- "aeroplane": "airplane",
125
- "drone": "airplane",
126
- "uav": "airplane",
127
- "helicopter": "airplane",
128
- "pickup": "truck",
129
- "pickup truck": "truck",
130
- "semi": "truck",
131
- "lorry": "truck",
132
- "tractor trailer": "truck",
133
- "vehicle": "car",
134
- "sedan": "car",
135
- "suv": "car",
136
- "van": "car",
137
- "vessel": "boat",
138
- "ship": "boat",
139
- "warship": "boat",
140
- "speedboat": "boat",
141
- "cargo ship": "boat",
142
- "fishing boat": "boat",
143
- "yacht": "boat",
144
- "kayak": "boat",
145
- "canoe": "boat",
146
- "watercraft": "boat",
147
- "coach": "bus",
148
- "television": "tv",
149
- "tv monitor": "tv",
150
- "mobile phone": "cell phone",
151
- "smartphone": "cell phone",
152
- "cellphone": "cell phone",
153
- "dinner table": "dining table",
154
- "sofa": "couch",
155
- "cooker": "oven",
156
- }
157
- _ALIAS_LOOKUP: Dict[str, str] = {_normalize(alias): canonical for alias, canonical in _COCO_SYNONYMS.items()}
158
 
159
 
160
  # ---------------------------------------------------------------------------
@@ -226,11 +176,10 @@ def canonicalize_coco_name(value: str | None) -> str | None:
226
 
227
  Matching cascade:
228
  1. Exact normalized match
229
- 2. Synonym lookup
230
- 3. Substring match (alias then canonical)
231
- 4. Token-level match
232
- 5. Fuzzy string match (difflib)
233
- 6. Semantic embedding similarity (sentence-transformers)
234
  """
235
 
236
  if not value:
@@ -240,12 +189,7 @@ def canonicalize_coco_name(value: str | None) -> str | None:
240
  return None
241
  if normalized in _CANONICAL_LOOKUP:
242
  return _CANONICAL_LOOKUP[normalized]
243
- if normalized in _ALIAS_LOOKUP:
244
- return _ALIAS_LOOKUP[normalized]
245
 
246
- for alias_norm, canonical in _ALIAS_LOOKUP.items():
247
- if alias_norm and alias_norm in normalized:
248
- return canonical
249
  for canonical_norm, canonical in _CANONICAL_LOOKUP.items():
250
  if canonical_norm and canonical_norm in normalized:
251
  return canonical
@@ -254,8 +198,6 @@ def canonicalize_coco_name(value: str | None) -> str | None:
254
  for token in tokens:
255
  if token in _CANONICAL_LOOKUP:
256
  return _CANONICAL_LOOKUP[token]
257
- if token in _ALIAS_LOOKUP:
258
- return _ALIAS_LOOKUP[token]
259
 
260
  close = difflib.get_close_matches(normalized, list(_CANONICAL_LOOKUP.keys()), n=1, cutoff=0.82)
261
  if close:
 
105
 
106
 
107
  _CANONICAL_LOOKUP: Dict[str, str] = {_normalize(name): name for name in COCO_CLASSES}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
 
110
  # ---------------------------------------------------------------------------
 
176
 
177
  Matching cascade:
178
  1. Exact normalized match
179
+ 2. Substring match
180
+ 3. Token-level match
181
+ 4. Fuzzy string match (difflib)
182
+ 5. Semantic embedding similarity (sentence-transformers)
 
183
  """
184
 
185
  if not value:
 
189
  return None
190
  if normalized in _CANONICAL_LOOKUP:
191
  return _CANONICAL_LOOKUP[normalized]
 
 
192
 
 
 
 
193
  for canonical_norm, canonical in _CANONICAL_LOOKUP.items():
194
  if canonical_norm and canonical_norm in normalized:
195
  return canonical
 
198
  for token in tokens:
199
  if token in _CANONICAL_LOOKUP:
200
  return _CANONICAL_LOOKUP[token]
 
 
201
 
202
  close = difflib.get_close_matches(normalized, list(_CANONICAL_LOOKUP.keys()), n=1, cutoff=0.82)
203
  if close:
frontend/index.html CHANGED
@@ -287,7 +287,6 @@
287
  <script src="./js/core/state.js"></script>
288
  <script src="./js/core/physics.js"></script>
289
  <script src="./js/core/video.js"></script>
290
- <script src="./js/core/hel.js"></script>
291
  <script src="./js/ui/logging.js"></script>
292
  <script src="./js/core/gptMapping.js"></script>
293
  <script src="./js/core/tracker.js"></script>
 
287
  <script src="./js/core/state.js"></script>
288
  <script src="./js/core/physics.js"></script>
289
  <script src="./js/core/video.js"></script>
 
290
  <script src="./js/ui/logging.js"></script>
291
  <script src="./js/core/gptMapping.js"></script>
292
  <script src="./js/core/tracker.js"></script>
frontend/js/api/client.js CHANGED
@@ -127,11 +127,8 @@ APP.api.client._syncGptFromDetections = function (rawDets, logLabel) {
127
  if (existing && rd.gpt_raw) {
128
  const g = rd.gpt_raw;
129
  existing.features = APP.core.gptMapping.buildFeatures(g);
130
- existing.threat_level_score = rd.threat_level_score || g.threat_level_score || 0;
131
- existing.threat_classification = rd.threat_classification || g.threat_classification || "Unknown";
132
- existing.weapon_readiness = rd.weapon_readiness || g.weapon_readiness || "Unknown";
133
- existing.gpt_distance_m = rd.gpt_distance_m || null;
134
- existing.gpt_direction = rd.gpt_direction || null;
135
  needsRender = true;
136
  }
137
  }
@@ -349,26 +346,3 @@ APP.api.client.analyzeFrame = async function (videoEl, tracks) {
349
  return await resp.json();
350
  };
351
 
352
- // Chat about threats using GPT
353
- APP.api.client.chatAboutThreats = async function (question, detections) {
354
- const { state } = APP.core;
355
-
356
- const form = new FormData();
357
- form.append("question", question);
358
- form.append("detections", JSON.stringify(detections));
359
- if (state.hf.missionSpec) {
360
- form.append("mission_context", JSON.stringify(state.hf.missionSpec));
361
- }
362
-
363
- const resp = await fetch(`${state.hf.baseUrl}/chat/threat`, {
364
- method: "POST",
365
- body: form
366
- });
367
-
368
- if (!resp.ok) {
369
- const err = await resp.json().catch(() => ({ detail: resp.statusText }));
370
- throw new Error(err.detail || "Chat request failed");
371
- }
372
-
373
- return await resp.json();
374
- };
 
127
  if (existing && rd.gpt_raw) {
128
  const g = rd.gpt_raw;
129
  existing.features = APP.core.gptMapping.buildFeatures(g);
130
+ existing.satisfies = rd.satisfies ?? g.satisfies ?? null;
131
+ existing.reason = rd.reason || g.reason || null;
 
 
 
132
  needsRender = true;
133
  }
134
  }
 
346
  return await resp.json();
347
  };
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/js/core/demo.js CHANGED
@@ -65,11 +65,10 @@ APP.core.demo.interpolateTrack = function(trackA, trackB, t) {
65
  w: lerp(trackA.bbox.w, trackB.bbox.w, t),
66
  h: lerp(trackA.bbox.h, trackB.bbox.h, t)
67
  },
68
- gpt_distance_m: lerp(trackA.gpt_distance_m, trackB.gpt_distance_m, t),
69
  angle_deg: trackA.angle_deg,
70
  speed_kph: lerp(trackA.speed_kph, trackB.speed_kph, t),
71
- depth_valid: true,
72
- depth_est_m: lerp(trackA.gpt_distance_m, trackB.gpt_distance_m, t),
73
  history: [],
74
  predicted_path: []
75
  };
 
65
  w: lerp(trackA.bbox.w, trackB.bbox.w, t),
66
  h: lerp(trackA.bbox.h, trackB.bbox.h, t)
67
  },
 
68
  angle_deg: trackA.angle_deg,
69
  speed_kph: lerp(trackA.speed_kph, trackB.speed_kph, t),
70
+ depth_valid: false,
71
+ depth_est_m: null,
72
  history: [],
73
  predicted_path: []
74
  };
frontend/js/core/gptMapping.js CHANGED
@@ -22,25 +22,9 @@ APP.core.gptMapping.STATUS = Object.freeze({
22
  */
23
  APP.core.gptMapping.buildFeatures = function (gptRaw) {
24
  if (!gptRaw) return {};
25
- const rangeStr = gptRaw.range_estimate && gptRaw.range_estimate !== "Unknown"
26
- ? gptRaw.range_estimate + " (est.)" : "Unknown";
27
  const features = {
28
- "Type": gptRaw.object_type || "Unknown",
29
- "Size": gptRaw.size || "Unknown",
30
- "Threat Lvl": (gptRaw.threat_level || gptRaw.threat_level_score || "?") + "/10",
31
- "Status": gptRaw.threat_classification || "?",
32
- "Weapons": (gptRaw.visible_weapons || []).join(", ") || "None Visible",
33
- "Readiness": gptRaw.weapon_readiness || "Unknown",
34
- "Motion": gptRaw.motion_status || "Unknown",
35
- "Range": rangeStr,
36
- "Bearing": gptRaw.bearing || "Unknown",
37
- "Intent": gptRaw.tactical_intent || "Unknown",
38
  };
39
- const dynFeats = gptRaw.dynamic_features || [];
40
- for (const feat of dynFeats) {
41
- if (feat && feat.key && feat.value) {
42
- features[feat.key] = feat.value;
43
- }
44
- }
45
  return features;
46
  };
 
22
  */
23
  APP.core.gptMapping.buildFeatures = function (gptRaw) {
24
  if (!gptRaw) return {};
 
 
25
  const features = {
26
+ "Satisfies": gptRaw.satisfies === true ? "Yes" : gptRaw.satisfies === false ? "No" : "—",
27
+ "Reason": gptRaw.reason || "",
 
 
 
 
 
 
 
 
28
  };
 
 
 
 
 
 
29
  return features;
30
  };
frontend/js/core/tracker.js CHANGED
@@ -130,10 +130,9 @@ APP.core.tracker.matchAndUpdateTracks = function (dets, dtSec) {
130
  depth_est_m: detObjs[i].depth_est_m,
131
  depth_valid: detObjs[i].depth_valid,
132
 
133
- // GPT properties
134
- gpt_distance_m: null,
135
- gpt_direction: null,
136
- gpt_description: null,
137
 
138
  // Track state
139
  lastSeen: now(),
@@ -192,17 +191,13 @@ APP.core.tracker.syncWithBackend = async function (frameIdx) {
192
  bbox: { x: nx, y: ny, w: nw, h: nh },
193
  score: d.score,
194
  angle_deg: d.angle_deg,
195
- gpt_distance_m: d.gpt_distance_m,
196
- gpt_direction: d.gpt_direction,
197
- gpt_description: d.gpt_description,
198
  speed_kph: d.speed_kph,
199
  depth_est_m: d.depth_est_m,
200
  depth_rel: d.depth_rel,
201
  depth_valid: d.depth_valid,
202
- // Threat intelligence
203
- threat_level_score: d.threat_level_score || 0,
204
- threat_classification: d.threat_classification || "Unknown",
205
- weapon_readiness: d.weapon_readiness || "Unknown",
206
  // Mission relevance and assessment status
207
  mission_relevant: d.mission_relevant ?? null,
208
  relevance_reason: d.relevance_reason || null,
@@ -231,11 +226,8 @@ APP.core.tracker.syncWithBackend = async function (frameIdx) {
231
  const g = cached.gpt_raw;
232
  track.gpt_raw = g;
233
  track.assessment_status = cached.assessment_status || APP.core.gptMapping.STATUS.ASSESSED;
234
- track.threat_level_score = cached.threat_level_score || g.threat_level_score || 0;
235
- track.threat_classification = cached.threat_classification || g.threat_classification || "Unknown";
236
- track.weapon_readiness = cached.weapon_readiness || g.weapon_readiness || "Unknown";
237
- track.gpt_distance_m = cached.gpt_distance_m || null;
238
- track.gpt_direction = cached.gpt_direction || null;
239
  track.mission_relevant = cached.mission_relevant ?? track.mission_relevant;
240
  track.relevance_reason = cached.relevance_reason || track.relevance_reason;
241
  track.features = APP.core.gptMapping.buildFeatures(g);
 
130
  depth_est_m: detObjs[i].depth_est_m,
131
  depth_valid: detObjs[i].depth_valid,
132
 
133
+ // Satisfaction reasoning
134
+ satisfies: null,
135
+ reason: null,
 
136
 
137
  // Track state
138
  lastSeen: now(),
 
191
  bbox: { x: nx, y: ny, w: nw, h: nh },
192
  score: d.score,
193
  angle_deg: d.angle_deg,
 
 
 
194
  speed_kph: d.speed_kph,
195
  depth_est_m: d.depth_est_m,
196
  depth_rel: d.depth_rel,
197
  depth_valid: d.depth_valid,
198
+ // Satisfaction reasoning
199
+ satisfies: d.satisfies ?? null,
200
+ reason: d.reason || null,
 
201
  // Mission relevance and assessment status
202
  mission_relevant: d.mission_relevant ?? null,
203
  relevance_reason: d.relevance_reason || null,
 
226
  const g = cached.gpt_raw;
227
  track.gpt_raw = g;
228
  track.assessment_status = cached.assessment_status || APP.core.gptMapping.STATUS.ASSESSED;
229
+ track.satisfies = cached.satisfies ?? track.satisfies;
230
+ track.reason = cached.reason || track.reason;
 
 
 
231
  track.mission_relevant = cached.mission_relevant ?? track.mission_relevant;
232
  track.relevance_reason = cached.relevance_reason || track.relevance_reason;
233
  track.features = APP.core.gptMapping.buildFeatures(g);
frontend/js/main.js CHANGED
@@ -8,7 +8,6 @@ document.addEventListener("DOMContentLoaded", () => {
8
 
9
  // Core modules
10
  const { captureFirstFrame, drawFirstFrame, unloadVideo, toggleDepthView, toggleFirstFrameDepthView, toggleProcessedFeed, resizeOverlays, setStreamingMode, stopStreamingMode, displayProcessedFirstFrame } = APP.core.video;
11
- const { syncKnobDisplays, recomputeHEL } = APP.core.hel;
12
  const { load: loadDemo, getFrameData: getDemoFrameData, enable: enableDemo } = APP.core.demo;
13
 
14
  // UI Renderers
@@ -57,7 +56,6 @@ document.addEventListener("DOMContentLoaded", () => {
57
  setupTabSwitching();
58
 
59
  // Initial UI sync
60
- syncKnobDisplays();
61
  setHfStatus("idle");
62
 
63
  // Enable click-to-select on engage overlay
@@ -145,7 +143,6 @@ document.addEventListener("DOMContentLoaded", () => {
145
  if (btnRecompute) {
146
  btnRecompute.addEventListener("click", async () => {
147
  if (!state.hasReasoned) return;
148
- await recomputeHEL();
149
  renderFrameOverlay();
150
 
151
  log("Parameters recomputed.", "g");
@@ -246,17 +243,11 @@ document.addEventListener("DOMContentLoaded", () => {
246
  const inputs = Array.from(document.querySelectorAll("input, select"));
247
  inputs.forEach(el => {
248
  el.addEventListener("input", () => {
249
- syncKnobDisplays();
250
  if (state.hasReasoned) {
251
- recomputeHEL();
252
  renderFrameOverlay();
253
-
254
  }
255
  });
256
  });
257
-
258
- // Initial sync
259
- syncKnobDisplays();
260
  }
261
 
262
  function setupChipToggles() {
@@ -555,13 +546,9 @@ document.addEventListener("DOMContentLoaded", () => {
555
  depth_est_m: (d.depth_est_m !== undefined && d.depth_est_m !== null) ? d.depth_est_m : null,
556
  depth_rel: (d.depth_rel !== undefined && d.depth_rel !== null) ? d.depth_rel : null,
557
  depth_valid: d.depth_valid ?? false,
558
- gpt_distance_m: d.gpt_distance_m || null,
559
- gpt_direction: d.gpt_direction || null,
560
- gpt_description: d.gpt_description || null,
561
- // Threat Intelligence
562
- threat_level_score: d.threat_level_score || 0,
563
- threat_classification: d.threat_classification || "Unknown",
564
- weapon_readiness: d.weapon_readiness || "Unknown",
565
  // Mission relevance and assessment status
566
  mission_relevant: d.mission_relevant ?? null,
567
  relevance_reason: d.relevance_reason || null,
@@ -594,9 +581,8 @@ document.addEventListener("DOMContentLoaded", () => {
594
  depth_est_m: d.depth_est_m,
595
  depth_valid: d.depth_valid,
596
  lastDepthBbox: d.depth_valid ? { ...d.bbox } : null,
597
- gpt_distance_m: d.gpt_distance_m,
598
- gpt_direction: d.gpt_direction,
599
- gpt_description: d.gpt_description,
600
  lastSeen: APP.core.utils.now(),
601
  vx: 0,
602
  vy: 0,
@@ -706,8 +692,8 @@ document.addEventListener("DOMContentLoaded", () => {
706
  ...d,
707
  lastSeen: t,
708
  state: "TRACK",
709
- depth_valid: true,
710
- depth_est_m: d.gpt_distance_m || 1000,
711
  }));
712
 
713
  const w = videoEngage.videoWidth || state.frame.w || 1280;
@@ -759,10 +745,8 @@ document.addEventListener("DOMContentLoaded", () => {
759
  existing.gpt_raw = rd.gpt_raw;
760
  existing.features = APP.core.gptMapping.buildFeatures(rd.gpt_raw);
761
  existing.assessment_status = rd.assessment_status || "ASSESSED";
762
- existing.threat_level_score = rd.threat_level_score || 0;
763
- existing.gpt_description = rd.gpt_description || existing.gpt_description;
764
- existing.gpt_distance_m = rd.gpt_distance_m || existing.gpt_distance_m;
765
- existing.gpt_direction = rd.gpt_direction || existing.gpt_direction;
766
  }
767
  }
768
  renderFrameTrackList();
 
8
 
9
  // Core modules
10
  const { captureFirstFrame, drawFirstFrame, unloadVideo, toggleDepthView, toggleFirstFrameDepthView, toggleProcessedFeed, resizeOverlays, setStreamingMode, stopStreamingMode, displayProcessedFirstFrame } = APP.core.video;
 
11
  const { load: loadDemo, getFrameData: getDemoFrameData, enable: enableDemo } = APP.core.demo;
12
 
13
  // UI Renderers
 
56
  setupTabSwitching();
57
 
58
  // Initial UI sync
 
59
  setHfStatus("idle");
60
 
61
  // Enable click-to-select on engage overlay
 
143
  if (btnRecompute) {
144
  btnRecompute.addEventListener("click", async () => {
145
  if (!state.hasReasoned) return;
 
146
  renderFrameOverlay();
147
 
148
  log("Parameters recomputed.", "g");
 
243
  const inputs = Array.from(document.querySelectorAll("input, select"));
244
  inputs.forEach(el => {
245
  el.addEventListener("input", () => {
 
246
  if (state.hasReasoned) {
 
247
  renderFrameOverlay();
 
248
  }
249
  });
250
  });
 
 
 
251
  }
252
 
253
  function setupChipToggles() {
 
546
  depth_est_m: (d.depth_est_m !== undefined && d.depth_est_m !== null) ? d.depth_est_m : null,
547
  depth_rel: (d.depth_rel !== undefined && d.depth_rel !== null) ? d.depth_rel : null,
548
  depth_valid: d.depth_valid ?? false,
549
+ // Satisfaction reasoning
550
+ satisfies: d.satisfies ?? null,
551
+ reason: d.reason || null,
 
 
 
 
552
  // Mission relevance and assessment status
553
  mission_relevant: d.mission_relevant ?? null,
554
  relevance_reason: d.relevance_reason || null,
 
581
  depth_est_m: d.depth_est_m,
582
  depth_valid: d.depth_valid,
583
  lastDepthBbox: d.depth_valid ? { ...d.bbox } : null,
584
+ satisfies: d.satisfies,
585
+ reason: d.reason,
 
586
  lastSeen: APP.core.utils.now(),
587
  vx: 0,
588
  vy: 0,
 
692
  ...d,
693
  lastSeen: t,
694
  state: "TRACK",
695
+ depth_valid: false,
696
+ depth_est_m: null,
697
  }));
698
 
699
  const w = videoEngage.videoWidth || state.frame.w || 1280;
 
745
  existing.gpt_raw = rd.gpt_raw;
746
  existing.features = APP.core.gptMapping.buildFeatures(rd.gpt_raw);
747
  existing.assessment_status = rd.assessment_status || "ASSESSED";
748
+ existing.satisfies = rd.satisfies ?? existing.satisfies;
749
+ existing.reason = rd.reason || existing.reason;
 
 
750
  }
751
  }
752
  renderFrameTrackList();
frontend/js/ui/cards.js CHANGED
@@ -26,16 +26,13 @@ APP.ui.cards.renderFrameTrackList = function () {
26
  return;
27
  }
28
 
29
- // Sort: ASSESSED first (by threat score), then UNASSESSED, then STALE
30
  const S = APP.core.gptMapping.STATUS;
31
  const statusOrder = { [S.ASSESSED]: 0, [S.UNASSESSED]: 1, [S.STALE]: 2 };
32
  const sorted = [...dets].sort((a, b) => {
33
  const statusA = statusOrder[a.assessment_status] ?? 1;
34
  const statusB = statusOrder[b.assessment_status] ?? 1;
35
  if (statusA !== statusB) return statusA - statusB;
36
- const scoreA = a.threat_level_score || 0;
37
- const scoreB = b.threat_level_score || 0;
38
- if (scoreB !== scoreA) return scoreB - scoreA;
39
  return (b.score || 0) - (a.score || 0);
40
  });
41
 
@@ -43,21 +40,6 @@ APP.ui.cards.renderFrameTrackList = function () {
43
  const id = det.id || `T${String(i + 1).padStart(2, '0')}`;
44
  const isActive = state.selectedId === id;
45
 
46
- let rangeStr = "---";
47
- let bearingStr = "---";
48
-
49
- if (det.depth_valid && det.depth_est_m != null) {
50
- rangeStr = `${Math.round(det.depth_est_m)}m`;
51
- } else if (det.gpt_distance_m) {
52
- rangeStr = `~${det.gpt_distance_m}m`;
53
- } else if (det.baseRange_m) {
54
- rangeStr = `${Math.round(det.baseRange_m)}m`;
55
- }
56
-
57
- if (det.gpt_direction) {
58
- bearingStr = det.gpt_direction;
59
- }
60
-
61
  const card = document.createElement("div");
62
  card.className = "track-card" + (isActive ? " active" : "");
63
  card.id = `card-${id}`;
@@ -74,15 +56,17 @@ APP.ui.cards.renderFrameTrackList = function () {
74
  statusBadge = '<span class="badgemini" style="background:#6c757d; color:white">UNASSESSED</span>';
75
  } else if (assessStatus === S.STALE) {
76
  statusBadge = '<span class="badgemini" style="background:#ffc107; color:#333">STALE</span>';
77
- } else if (det.threat_level_score > 0) {
78
- statusBadge = `<span class="badgemini" style="background:${det.threat_level_score >= 8 ? '#ff4d4d' : '#ff9f43'}; color:white">T-${det.threat_level_score}</span>`;
 
 
79
  } else if (assessStatus === S.ASSESSED) {
80
  statusBadge = '<span class="badgemini" style="background:#17a2b8; color:white">ASSESSED</span>';
81
  }
82
 
83
- // GPT description (collapsed summary)
84
- const desc = det.gpt_description
85
- ? `<div class="track-card-body"><span class="gpt-text">${det.gpt_description}</span></div>`
86
  : "";
87
 
88
  // Inline features (only shown when active/expanded)
@@ -108,9 +92,6 @@ APP.ui.cards.renderFrameTrackList = function () {
108
  }
109
  </div>
110
  </div>
111
- <div class="track-card-meta">
112
- RNG ${rangeStr} · BRG ${bearingStr}
113
- </div>
114
  ${desc}
115
  ${featuresHtml}
116
  `;
 
26
  return;
27
  }
28
 
29
+ // Sort: ASSESSED first, then UNASSESSED, then STALE; within each group by score
30
  const S = APP.core.gptMapping.STATUS;
31
  const statusOrder = { [S.ASSESSED]: 0, [S.UNASSESSED]: 1, [S.STALE]: 2 };
32
  const sorted = [...dets].sort((a, b) => {
33
  const statusA = statusOrder[a.assessment_status] ?? 1;
34
  const statusB = statusOrder[b.assessment_status] ?? 1;
35
  if (statusA !== statusB) return statusA - statusB;
 
 
 
36
  return (b.score || 0) - (a.score || 0);
37
  });
38
 
 
40
  const id = det.id || `T${String(i + 1).padStart(2, '0')}`;
41
  const isActive = state.selectedId === id;
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  const card = document.createElement("div");
44
  card.className = "track-card" + (isActive ? " active" : "");
45
  card.id = `card-${id}`;
 
56
  statusBadge = '<span class="badgemini" style="background:#6c757d; color:white">UNASSESSED</span>';
57
  } else if (assessStatus === S.STALE) {
58
  statusBadge = '<span class="badgemini" style="background:#ffc107; color:#333">STALE</span>';
59
+ } else if (det.satisfies === true) {
60
+ statusBadge = '<span class="badgemini" style="background:#28a745; color:white">YES</span>';
61
+ } else if (det.satisfies === false) {
62
+ statusBadge = '<span class="badgemini" style="background:#dc3545; color:white">NO</span>';
63
  } else if (assessStatus === S.ASSESSED) {
64
  statusBadge = '<span class="badgemini" style="background:#17a2b8; color:white">ASSESSED</span>';
65
  }
66
 
67
+ // Satisfaction reason (collapsed summary)
68
+ const desc = det.reason
69
+ ? `<div class="track-card-body"><span class="gpt-text">${det.reason}</span></div>`
70
  : "";
71
 
72
  // Inline features (only shown when active/expanded)
 
92
  }
93
  </div>
94
  </div>
 
 
 
95
  ${desc}
96
  ${featuresHtml}
97
  `;
frontend/js/ui/chat.js CHANGED
@@ -69,7 +69,8 @@
69
  const loadingId = appendMessage("assistant", "Analyzing scene...", true);
70
 
71
  try {
72
- const response = await APP.api.client.chatAboutThreats(question, state.detections);
 
73
 
74
  // Remove loading message
75
  removeMessage(loadingId);
 
69
  const loadingId = appendMessage("assistant", "Analyzing scene...", true);
70
 
71
  try {
72
+ // Chat endpoint removed show placeholder
73
+ const response = { response: "Chat endpoint is not available." };
74
 
75
  // Remove loading message
76
  removeMessage(loadingId);
inference.py CHANGED
@@ -514,16 +514,7 @@ def infer_frame(
514
  except Exception:
515
  logging.exception("Depth estimation failed for frame")
516
 
517
- # Re-build display labels to include GPT distance if available
518
- display_labels = []
519
- for i, det in enumerate(detections):
520
- label = det["label"]
521
- if det.get("gpt_distance_m") is not None:
522
- # Add GPT distance to label, e.g. "car 12m"
523
- depth_str = f"{int(det['gpt_distance_m'])}m"
524
- label = f"{label} {depth_str}"
525
- logging.debug("Object '%s' at %s (bbox: %s)", label, depth_str, det['bbox'])
526
- display_labels.append(label)
527
 
528
  except Exception:
529
  logging.exception("Inference failed for queries %s", text_queries)
@@ -538,11 +529,8 @@ def infer_frame(
538
 
539
 
540
  def _build_display_label(det):
541
- """Build display label with GPT distance if available."""
542
- label = det["label"]
543
- if det.get("gpt_distance_m") is not None:
544
- label = f"{label} {int(det['gpt_distance_m'])}m"
545
- return label
546
 
547
  def _attach_depth_from_result(detections, depth_result, depth_scale):
548
  """Attach relative depth values for visualization only. GPT handles distance estimation."""
@@ -1608,19 +1596,13 @@ def run_grounded_sam2_tracking(
1608
  elif "assessment_status" not in det:
1609
  det["assessment_status"] = AssessmentStatus.UNASSESSED
1610
 
1611
- # Build enriched display labels
1612
  display_labels = []
1613
  for d in dets:
1614
  if d.get("mission_relevant") is False:
1615
  display_labels.append("")
1616
  continue
1617
- lbl = d.get("label", "obj")
1618
- if d.get("gpt_distance_m") is not None:
1619
- try:
1620
- lbl = f"{lbl} {int(float(d['gpt_distance_m']))}m"
1621
- except (TypeError, ValueError):
1622
- pass
1623
- display_labels.append(lbl)
1624
 
1625
  # Draw boxes on mask-rendered frame
1626
  if dets:
 
514
  except Exception:
515
  logging.exception("Depth estimation failed for frame")
516
 
517
+ display_labels = [det["label"] for det in detections]
 
 
 
 
 
 
 
 
 
518
 
519
  except Exception:
520
  logging.exception("Inference failed for queries %s", text_queries)
 
529
 
530
 
531
  def _build_display_label(det):
532
+ """Build display label for a detection."""
533
+ return det["label"]
 
 
 
534
 
535
  def _attach_depth_from_result(detections, depth_result, depth_scale):
536
  """Attach relative depth values for visualization only. GPT handles distance estimation."""
 
1596
  elif "assessment_status" not in det:
1597
  det["assessment_status"] = AssessmentStatus.UNASSESSED
1598
 
1599
+ # Build display labels
1600
  display_labels = []
1601
  for d in dets:
1602
  if d.get("mission_relevant") is False:
1603
  display_labels.append("")
1604
  continue
1605
+ display_labels.append(d.get("label", "obj"))
 
 
 
 
 
 
1606
 
1607
  # Draw boxes on mask-rendered frame
1608
  if dets:
utils/openai_client.py CHANGED
@@ -2,7 +2,7 @@
2
  Shared OpenAI HTTP client — single implementation of the chat-completions call.
3
 
4
  Replaces duplicated urllib boilerplate in gpt_reasoning, relevance,
5
- mission_parser, and threat_chat.
6
  """
7
 
8
  import json
 
2
  Shared OpenAI HTTP client — single implementation of the chat-completions call.
3
 
4
  Replaces duplicated urllib boilerplate in gpt_reasoning, relevance,
5
+ and mission_parser.
6
  """
7
 
8
  import json
utils/relevance.py CHANGED
@@ -3,18 +3,14 @@ Object relevance evaluation — deterministic gate between detection and GPT ass
3
 
4
  Public functions:
5
  evaluate_relevance(detection, criteria) -> RelevanceDecision (deterministic)
6
- evaluate_relevance_llm(detected_labels, mission_text) -> set[str] (LLM post-filter)
7
 
8
  INVARIANT INV-13 enforcement: evaluate_relevance() accepts RelevanceCriteria, NOT
9
  MissionSpecification. It cannot see context_phrases, stripped_modifiers, or any
10
  LLM-derived field. This is structural, not by convention.
11
  """
12
 
13
- import json
14
  import logging
15
- from typing import Any, Dict, List, NamedTuple, Set
16
-
17
- from utils.openai_client import chat_completion, extract_content, get_api_key, OpenAIAPIError
18
 
19
  from coco_classes import canonicalize_coco_name
20
  from utils.schemas import RelevanceCriteria
@@ -74,68 +70,3 @@ def evaluate_relevance(
74
  return RelevanceDecision(True, "ok")
75
 
76
  return RelevanceDecision(False, "label_not_in_required_classes")
77
-
78
-
79
- def evaluate_relevance_llm(
80
- detected_labels: List[str],
81
- mission_text: str,
82
- ) -> Set[str]:
83
- """Ask GPT which detected labels are relevant to the mission.
84
-
85
- Called ONCE on frame 0 with the unique labels found by the detector.
86
- Returns a set of relevant label strings (lowercased).
87
-
88
- On API failure, falls back to accepting all labels (fail-open, logged).
89
- """
90
- if not detected_labels:
91
- return set()
92
-
93
- if not get_api_key():
94
- logger.warning(
95
- "OPENAI_API_KEY not set — LLM relevance filter falling back to accept-all"
96
- )
97
- return set(detected_labels)
98
-
99
- prompt = (
100
- f"Given this mission: \"{mission_text}\"\n\n"
101
- f"Which of these detected object classes are relevant to the mission?\n"
102
- f"{json.dumps(detected_labels)}\n\n"
103
- "Return JSON: {\"relevant_labels\": [...]}\n"
104
- "Only include labels from the provided list that are relevant to "
105
- "accomplishing the mission. Be inclusive — if in doubt, include it."
106
- )
107
-
108
- payload = {
109
- "model": "gpt-4o-mini",
110
- "temperature": 0.0,
111
- "max_tokens": 200,
112
- "response_format": {"type": "json_object"},
113
- "messages": [
114
- {"role": "system", "content": "You are a mission relevance filter. Return only JSON."},
115
- {"role": "user", "content": prompt},
116
- ],
117
- }
118
-
119
- try:
120
- resp_data = chat_completion(payload)
121
- content, _refusal = extract_content(resp_data)
122
- if not content:
123
- logger.warning("GPT returned empty content for relevance filter — accept-all")
124
- return set(detected_labels)
125
-
126
- result = json.loads(content)
127
- relevant = result.get("relevant_labels", detected_labels)
128
- relevant_set = {label.lower() for label in relevant}
129
-
130
- logger.info(
131
- "LLM relevance filter: mission=%r detected=%s relevant=%s",
132
- mission_text, detected_labels, relevant_set,
133
- )
134
- return relevant_set
135
-
136
- except OpenAIAPIError as e:
137
- logger.warning("LLM relevance API call failed: %s — accept-all fallback", e)
138
- return set(detected_labels)
139
- except (json.JSONDecodeError, KeyError, TypeError) as e:
140
- logger.warning("LLM relevance response parse failed: %s — accept-all fallback", e)
141
- return set(detected_labels)
 
3
 
4
  Public functions:
5
  evaluate_relevance(detection, criteria) -> RelevanceDecision (deterministic)
 
6
 
7
  INVARIANT INV-13 enforcement: evaluate_relevance() accepts RelevanceCriteria, NOT
8
  MissionSpecification. It cannot see context_phrases, stripped_modifiers, or any
9
  LLM-derived field. This is structural, not by convention.
10
  """
11
 
 
12
  import logging
13
+ from typing import Any, Dict, NamedTuple
 
 
14
 
15
  from coco_classes import canonicalize_coco_name
16
  from utils.schemas import RelevanceCriteria
 
70
  return RelevanceDecision(True, "ok")
71
 
72
  return RelevanceDecision(False, "label_not_in_required_classes")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils/tracker.py CHANGED
@@ -202,14 +202,10 @@ class KalmanFilter:
202
  MAX_STALE_FRAMES = 300
203
 
204
  GPT_SYNC_KEYS = frozenset({
205
- # Legacy / polyfilled fields (consumed by frontend cards)
206
- "gpt_distance_m", "gpt_direction", "gpt_description", "gpt_raw",
207
- "threat_level_score", "distance_m", "direction", "description",
208
- # Universal schema fields
209
- "object_type", "size", "visible_weapons", "weapon_readiness",
210
- "motion_status", "range_estimate", "bearing",
211
- "threat_level", "threat_classification", "tactical_intent",
212
- "dynamic_features",
213
  # Provenance and temporal validity
214
  "assessment_frame_index", "assessment_status",
215
  # Mission relevance
@@ -633,7 +629,7 @@ class ByteTracker:
633
  if meta:
634
  # Ensure assessment_frame_index is recorded
635
  if "assessment_frame_index" not in meta and any(
636
- k in meta for k in ("threat_level_score", "gpt_raw", "object_type")
637
  ):
638
  meta["assessment_frame_index"] = self.frame_id
639
  meta["assessment_status"] = AssessmentStatus.ASSESSED
 
202
  MAX_STALE_FRAMES = 300
203
 
204
  GPT_SYNC_KEYS = frozenset({
205
+ # GPT raw payload
206
+ "gpt_raw",
207
+ # Satisfaction reasoning fields
208
+ "satisfies", "reason",
 
 
 
 
209
  # Provenance and temporal validity
210
  "assessment_frame_index", "assessment_status",
211
  # Mission relevance
 
629
  if meta:
630
  # Ensure assessment_frame_index is recorded
631
  if "assessment_frame_index" not in meta and any(
632
+ k in meta for k in ("satisfies", "gpt_raw")
633
  ):
634
  meta["assessment_frame_index"] = self.frame_id
635
  meta["assessment_status"] = AssessmentStatus.ASSESSED