Spaces:
Running
Running
Zhen Ye
commited on
Commit
·
2c4431d
1
Parent(s):
374a0ef
Refine GPT enrichment logic, add relevance badges to UI, and fix minor model/logging issues
Browse files- app.py +12 -2
- frontend/js/api/client.js +30 -11
- frontend/js/ui/cards.js +6 -1
- models/detectors/grounding_dino.py +4 -1
- utils/gpt_reasoning.py +2 -2
app.py
CHANGED
|
@@ -112,6 +112,15 @@ async def _enrich_first_frame_gpt(
|
|
| 112 |
gpt_dets = [d for d in detections if d.get("mission_relevant", True)]
|
| 113 |
|
| 114 |
if not gpt_dets:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
return
|
| 116 |
|
| 117 |
# GPT threat assessment
|
|
@@ -131,8 +140,9 @@ async def _enrich_first_frame_gpt(
|
|
| 131 |
info = gpt_results[obj_id]
|
| 132 |
det.update(info)
|
| 133 |
det["gpt_raw"] = info
|
| 134 |
-
|
| 135 |
-
|
|
|
|
| 136 |
|
| 137 |
for det in detections:
|
| 138 |
if "assessment_status" not in det:
|
|
|
|
| 112 |
gpt_dets = [d for d in detections if d.get("mission_relevant", True)]
|
| 113 |
|
| 114 |
if not gpt_dets:
|
| 115 |
+
# All detections filtered as not relevant — mark ASSESSED and persist
|
| 116 |
+
for det in detections:
|
| 117 |
+
det["assessment_status"] = "ASSESSED"
|
| 118 |
+
storage = get_job_storage()
|
| 119 |
+
storage.update(
|
| 120 |
+
job_id,
|
| 121 |
+
first_frame_detections=detections,
|
| 122 |
+
)
|
| 123 |
+
logging.info("All detections non-relevant for job %s; marked ASSESSED", job_id)
|
| 124 |
return
|
| 125 |
|
| 126 |
# GPT threat assessment
|
|
|
|
| 140 |
info = gpt_results[obj_id]
|
| 141 |
det.update(info)
|
| 142 |
det["gpt_raw"] = info
|
| 143 |
+
# Mark ASSESSED regardless of whether GPT returned data for this object
|
| 144 |
+
det["assessment_frame_index"] = 0
|
| 145 |
+
det["assessment_status"] = "ASSESSED"
|
| 146 |
|
| 147 |
for det in detections:
|
| 148 |
if "assessment_status" not in det:
|
frontend/js/api/client.js
CHANGED
|
@@ -161,18 +161,36 @@ APP.api.client.pollAsyncJob = async function () {
|
|
| 161 |
|
| 162 |
// Check if GPT enrichment has updated first-frame detections
|
| 163 |
if (status.first_frame_detections && status.first_frame_detections.length > 0) {
|
| 164 |
-
const
|
| 165 |
-
|
| 166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
state.hf._gptEnriched = true;
|
| 168 |
-
state.hf.firstFrameDetections =
|
| 169 |
-
// Merge GPT data into existing state.detections (preserve bbox/aim/etc)
|
| 170 |
-
const rawDets = status.first_frame_detections;
|
| 171 |
for (const rd of rawDets) {
|
| 172 |
const tid = rd.track_id || `T${String(rawDets.indexOf(rd) + 1).padStart(2, "0")}`;
|
| 173 |
const existing = (state.detections || []).find(d => d.id === tid);
|
| 174 |
if (existing && rd.gpt_raw) {
|
| 175 |
-
existing.features = {};
|
| 176 |
const g = rd.gpt_raw;
|
| 177 |
const rangeStr = g.range_estimate && g.range_estimate !== "Unknown"
|
| 178 |
? g.range_estimate + " (est.)" : "Unknown";
|
|
@@ -197,16 +215,17 @@ APP.api.client.pollAsyncJob = async function () {
|
|
| 197 |
existing.threat_level_score = rd.threat_level_score || g.threat_level_score || 0;
|
| 198 |
existing.threat_classification = rd.threat_classification || g.threat_classification || "Unknown";
|
| 199 |
existing.weapon_readiness = rd.weapon_readiness || g.weapon_readiness || "Unknown";
|
| 200 |
-
existing.assessment_status = rd.assessment_status || "ASSESSED";
|
| 201 |
existing.gpt_distance_m = rd.gpt_distance_m || null;
|
| 202 |
existing.gpt_direction = rd.gpt_direction || null;
|
|
|
|
| 203 |
}
|
| 204 |
}
|
| 205 |
-
if (APP.ui && APP.ui.cards && APP.ui.cards.renderFrameTrackList) {
|
| 206 |
-
APP.ui.cards.renderFrameTrackList();
|
| 207 |
-
}
|
| 208 |
log("Track cards updated with GPT assessment", "g");
|
| 209 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
}
|
| 211 |
}
|
| 212 |
|
|
|
|
| 161 |
|
| 162 |
// Check if GPT enrichment has updated first-frame detections
|
| 163 |
if (status.first_frame_detections && status.first_frame_detections.length > 0) {
|
| 164 |
+
const rawDets = status.first_frame_detections;
|
| 165 |
+
let needsRender = false;
|
| 166 |
+
|
| 167 |
+
// Phase A: Always sync assessment status fields (not gated on gpt_raw)
|
| 168 |
+
for (const rd of rawDets) {
|
| 169 |
+
const tid = rd.track_id || `T${String(rawDets.indexOf(rd) + 1).padStart(2, "0")}`;
|
| 170 |
+
const existing = (state.detections || []).find(d => d.id === tid);
|
| 171 |
+
if (existing) {
|
| 172 |
+
if (rd.assessment_status && existing.assessment_status !== rd.assessment_status) {
|
| 173 |
+
existing.assessment_status = rd.assessment_status;
|
| 174 |
+
needsRender = true;
|
| 175 |
+
}
|
| 176 |
+
if (rd.mission_relevant !== undefined && rd.mission_relevant !== null) {
|
| 177 |
+
existing.mission_relevant = rd.mission_relevant;
|
| 178 |
+
}
|
| 179 |
+
if (rd.relevance_reason) {
|
| 180 |
+
existing.relevance_reason = rd.relevance_reason;
|
| 181 |
+
}
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
// Phase B: One-shot GPT feature merge (gated on gpt_raw + _gptEnriched flag)
|
| 186 |
+
const hasGptData = rawDets.some(d => d.gpt_raw);
|
| 187 |
+
if (hasGptData && !state.hf._gptEnriched) {
|
| 188 |
state.hf._gptEnriched = true;
|
| 189 |
+
state.hf.firstFrameDetections = rawDets;
|
|
|
|
|
|
|
| 190 |
for (const rd of rawDets) {
|
| 191 |
const tid = rd.track_id || `T${String(rawDets.indexOf(rd) + 1).padStart(2, "0")}`;
|
| 192 |
const existing = (state.detections || []).find(d => d.id === tid);
|
| 193 |
if (existing && rd.gpt_raw) {
|
|
|
|
| 194 |
const g = rd.gpt_raw;
|
| 195 |
const rangeStr = g.range_estimate && g.range_estimate !== "Unknown"
|
| 196 |
? g.range_estimate + " (est.)" : "Unknown";
|
|
|
|
| 215 |
existing.threat_level_score = rd.threat_level_score || g.threat_level_score || 0;
|
| 216 |
existing.threat_classification = rd.threat_classification || g.threat_classification || "Unknown";
|
| 217 |
existing.weapon_readiness = rd.weapon_readiness || g.weapon_readiness || "Unknown";
|
|
|
|
| 218 |
existing.gpt_distance_m = rd.gpt_distance_m || null;
|
| 219 |
existing.gpt_direction = rd.gpt_direction || null;
|
| 220 |
+
needsRender = true;
|
| 221 |
}
|
| 222 |
}
|
|
|
|
|
|
|
|
|
|
| 223 |
log("Track cards updated with GPT assessment", "g");
|
| 224 |
}
|
| 225 |
+
|
| 226 |
+
if (needsRender && APP.ui && APP.ui.cards && APP.ui.cards.renderFrameTrackList) {
|
| 227 |
+
APP.ui.cards.renderFrameTrackList();
|
| 228 |
+
}
|
| 229 |
}
|
| 230 |
}
|
| 231 |
|
frontend/js/ui/cards.js
CHANGED
|
@@ -97,7 +97,12 @@ APP.ui.cards.renderFrameTrackList = function () {
|
|
| 97 |
<span>${id} · ${det.label}</span>
|
| 98 |
<div style="display:flex; gap:4px; align-items:center">
|
| 99 |
${statusBadge}
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
</div>
|
| 102 |
</div>
|
| 103 |
<div class="track-card-meta">
|
|
|
|
| 97 |
<span>${id} · ${det.label}</span>
|
| 98 |
<div style="display:flex; gap:4px; align-items:center">
|
| 99 |
${statusBadge}
|
| 100 |
+
${det.mission_relevant === true
|
| 101 |
+
? '<span class="badgemini" style="background:#28a745; color:white">RELEVANT</span>'
|
| 102 |
+
: det.mission_relevant === false
|
| 103 |
+
? '<span class="badgemini" style="background:#6c757d; color:white">N/R</span>'
|
| 104 |
+
: `<span class="badgemini" style="background:rgba(255,255,255,.08); color:rgba(255,255,255,.7)">${(det.score * 100).toFixed(0)}%</span>`
|
| 105 |
+
}
|
| 106 |
</div>
|
| 107 |
</div>
|
| 108 |
<div class="track-card-meta">
|
models/detectors/grounding_dino.py
CHANGED
|
@@ -23,7 +23,10 @@ class GroundingDinoDetector(ObjectDetector):
|
|
| 23 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 24 |
logging.info("Loading %s onto %s", self.MODEL_NAME, self.device)
|
| 25 |
self.processor = GroundingDinoProcessor.from_pretrained(self.MODEL_NAME)
|
| 26 |
-
self.model = GroundingDinoForObjectDetection.from_pretrained(
|
|
|
|
|
|
|
|
|
|
| 27 |
self.model.to(self.device)
|
| 28 |
self.model.eval()
|
| 29 |
|
|
|
|
| 23 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 24 |
logging.info("Loading %s onto %s", self.MODEL_NAME, self.device)
|
| 25 |
self.processor = GroundingDinoProcessor.from_pretrained(self.MODEL_NAME)
|
| 26 |
+
self.model = GroundingDinoForObjectDetection.from_pretrained(
|
| 27 |
+
self.MODEL_NAME,
|
| 28 |
+
low_cpu_mem_usage=False,
|
| 29 |
+
)
|
| 30 |
self.model.to(self.device)
|
| 31 |
self.model.eval()
|
| 32 |
|
utils/gpt_reasoning.py
CHANGED
|
@@ -141,7 +141,7 @@ def estimate_threat_gpt(
|
|
| 141 |
|
| 142 |
api_key = os.environ.get("OPENAI_API_KEY")
|
| 143 |
if not api_key:
|
| 144 |
-
logger.
|
| 145 |
return {}
|
| 146 |
|
| 147 |
# 1. Prepare detections summary for prompt
|
|
@@ -271,5 +271,5 @@ def estimate_threat_gpt(
|
|
| 271 |
return objects
|
| 272 |
|
| 273 |
except Exception as e:
|
| 274 |
-
logger.error(
|
| 275 |
return {}
|
|
|
|
| 141 |
|
| 142 |
api_key = os.environ.get("OPENAI_API_KEY")
|
| 143 |
if not api_key:
|
| 144 |
+
logger.error("OPENAI_API_KEY not set. Skipping GPT threat assessment.")
|
| 145 |
return {}
|
| 146 |
|
| 147 |
# 1. Prepare detections summary for prompt
|
|
|
|
| 271 |
return objects
|
| 272 |
|
| 273 |
except Exception as e:
|
| 274 |
+
logger.error("GPT API call failed: %s", e, exc_info=True)
|
| 275 |
return {}
|