Zhen Ye commited on
Commit
5a72443
·
1 Parent(s): d23c4ea

chore: remove dead code and unused frontend modules

Browse files
frontend/js/LaserPerception_original.js DELETED
The diff for this file is too large to render. See raw diff
 
frontend/js/core/video.js CHANGED
@@ -186,9 +186,7 @@ APP.core.video.unloadVideo = async function (options = {}) {
186
  APP.core.video.clearCanvas($("#engageOverlay"));
187
 
188
  // Re-render UI components
189
- if (APP.ui.radar.renderFrameRadar) APP.ui.radar.renderFrameRadar();
190
  if (APP.ui.cards.renderFrameTrackList) APP.ui.cards.renderFrameTrackList();
191
- if (APP.ui.trade.renderTrade) APP.ui.trade.renderTrade();
192
 
193
  setStatus("warn", "STANDBY · No video loaded");
194
  log("Video unloaded. Demo reset.", "w");
@@ -621,4 +619,3 @@ APP.core.video.displayFirstFrameWithDepth = function () {
621
  APP.core.video.drawFirstFrame();
622
  }
623
  };
624
-
 
186
  APP.core.video.clearCanvas($("#engageOverlay"));
187
 
188
  // Re-render UI components
 
189
  if (APP.ui.cards.renderFrameTrackList) APP.ui.cards.renderFrameTrackList();
 
190
 
191
  setStatus("warn", "STANDBY · No video loaded");
192
  log("Video unloaded. Demo reset.", "w");
 
619
  APP.core.video.drawFirstFrame();
620
  }
621
  };
 
frontend/js/ui/features.js DELETED
@@ -1 +0,0 @@
1
- APP.ui.features = {};
 
 
frontend/js/ui/intel.js DELETED
@@ -1,118 +0,0 @@
1
- // Intel Summary Module - Mission intel generation and display
2
- APP.ui.intel = {};
3
-
4
- APP.ui.intel.setIntelStatus = function (kind, text) {
5
- const { $ } = APP.core.utils;
6
- const intelStamp = $("#intelStamp");
7
- const intelDot = $("#intelDot");
8
-
9
- if (!intelStamp || !intelDot) return;
10
-
11
- intelStamp.innerHTML = text;
12
- intelDot.className = "dot" + (kind === "warn" ? " warn" : (kind === "bad" ? " bad" : ""));
13
- intelDot.style.width = "7px";
14
- intelDot.style.height = "7px";
15
- intelDot.style.boxShadow = "none";
16
- };
17
-
18
- APP.ui.intel.setIntelThumb = function (i, dataUrl) {
19
- const { $ } = APP.core.utils;
20
- const thumbs = [$("#intelThumb0"), $("#intelThumb1"), $("#intelThumb2")];
21
- const img = thumbs[i];
22
- if (!img) return;
23
- img.src = dataUrl || "";
24
- };
25
-
26
- APP.ui.intel.resetIntelUI = function () {
27
- const { $ } = APP.core.utils;
28
- const intelSummaryBox = $("#intelSummaryBox");
29
-
30
- if (!intelSummaryBox) return;
31
- intelSummaryBox.innerHTML = 'Upload a video, then click <b>Detect</b> to generate an unbiased scene summary.';
32
- APP.ui.intel.setIntelStatus("warn", "Idle");
33
- APP.ui.intel.setIntelThumb(0, "");
34
- APP.ui.intel.setIntelThumb(1, "");
35
- APP.ui.intel.setIntelThumb(2, "");
36
- };
37
-
38
- // External hook for intel summary (can be replaced by user)
39
- APP.ui.intel.externalIntel = async function (frames) {
40
- console.log("externalIntel called with", frames.length, "frames");
41
- return "Video processed. No external intel provider connected.";
42
- };
43
-
44
- APP.ui.intel.computeIntelSummary = async function () {
45
- const { state } = APP.core;
46
- const { $ } = APP.core.utils;
47
- const { log } = APP.ui.logging;
48
-
49
- const intelSummaryBox = $("#intelSummaryBox");
50
- const videoHidden = $("#videoHidden");
51
- const videoEngage = $("#videoEngage");
52
-
53
- if (!intelSummaryBox) return;
54
- if (!state.videoLoaded) {
55
- APP.ui.intel.resetIntelUI();
56
- return;
57
- }
58
- if (state.intelBusy) return;
59
-
60
- state.intelBusy = true;
61
- APP.ui.intel.setIntelStatus("warn", "Generating…");
62
- intelSummaryBox.textContent = "Sampling frames and running analysis…";
63
-
64
- try {
65
- const videoEl = videoHidden || videoEngage;
66
- const dur = videoEl ? (videoEl.duration || 0) : 0;
67
- const times = [0, dur ? dur * 0.33 : 1, dur ? dur * 0.66 : 2];
68
- const frames = [];
69
-
70
- for (let i = 0; i < times.length; i++) {
71
- await APP.core.video.seekTo(videoEl, times[i]);
72
-
73
- const canvas = document.createElement("canvas");
74
- canvas.width = 640;
75
- canvas.height = 360;
76
- const ctx = canvas.getContext("2d");
77
- ctx.drawImage(videoEl, 0, 0, canvas.width, canvas.height);
78
- const dataUrl = canvas.toDataURL("image/jpeg", 0.6);
79
- frames.push(dataUrl);
80
-
81
- try {
82
- APP.ui.intel.setIntelThumb(i, dataUrl);
83
- } catch (_) { }
84
- }
85
-
86
- const summary = await APP.ui.intel.externalIntel(frames);
87
-
88
- intelSummaryBox.textContent = summary;
89
- APP.ui.intel.setIntelStatus("good", `Updated · ${new Date().toLocaleTimeString()}`);
90
- } catch (err) {
91
- APP.ui.intel.setIntelStatus("bad", "Summary unavailable");
92
- intelSummaryBox.textContent = `Unable to generate summary: ${err.message}`;
93
- console.error(err);
94
- } finally {
95
- state.intelBusy = false;
96
- }
97
- };
98
-
99
- // Render mission context (if applicable)
100
- APP.ui.intel.renderMissionContext = function () {
101
- const { state } = APP.core;
102
- const { $ } = APP.core.utils;
103
-
104
- const missionClassesEl = $("#missionClasses");
105
- const missionIdEl = $("#missionId");
106
-
107
- if (missionClassesEl) {
108
- if (state.hf.queries && state.hf.queries.length > 0) {
109
- missionClassesEl.textContent = state.hf.queries.join(", ");
110
- } else {
111
- missionClassesEl.textContent = "All objects (no filter)";
112
- }
113
- }
114
-
115
- if (missionIdEl) {
116
- missionIdEl.textContent = state.hf.missionId || "—";
117
- }
118
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/js/ui/radar.js DELETED
@@ -1,668 +0,0 @@
1
- APP.ui.radar = {};
2
-
3
- // Military color palette
4
- APP.ui.radar.colors = {
5
- background: "#0a0d0f",
6
- gridPrimary: "rgba(0, 255, 136, 0.3)",
7
- gridSecondary: "rgba(0, 255, 136, 0.12)",
8
- gridTertiary: "rgba(0, 255, 136, 0.06)",
9
- sweepLine: "rgba(0, 255, 136, 0.9)",
10
- sweepGlow: "rgba(0, 255, 136, 0.4)",
11
- sweepTrail: "rgba(0, 255, 136, 0.15)",
12
- text: "rgba(0, 255, 136, 0.9)",
13
- textDim: "rgba(0, 255, 136, 0.5)",
14
- ownship: "#00ff88",
15
- hostile: "#ff3344",
16
- neutral: "#ffaa00",
17
- friendly: "#00aaff",
18
- selected: "#ffffff",
19
- dataBox: "rgba(0, 20, 10, 0.92)",
20
- dataBorder: "rgba(0, 255, 136, 0.6)"
21
- };
22
-
23
- APP.ui.radar.render = function (canvasId, trackSource, options = {}) {
24
- const isStatic = options.static || false;
25
- const { state } = APP.core;
26
- const { clamp, now, $ } = APP.core.utils;
27
- const canvas = $(`#${canvasId}`);
28
- const colors = APP.ui.radar.colors;
29
-
30
- if (!canvas) return;
31
- const ctx = canvas.getContext("2d");
32
- const rect = canvas.getBoundingClientRect();
33
- const dpr = devicePixelRatio || 1;
34
-
35
- // Resize if needed
36
- const targetW = Math.max(1, Math.floor(rect.width * dpr));
37
- const targetH = Math.max(1, Math.floor(rect.height * dpr));
38
- if (canvas.width !== targetW || canvas.height !== targetH) {
39
- canvas.width = targetW;
40
- canvas.height = targetH;
41
- }
42
-
43
- const w = canvas.width, h = canvas.height;
44
- const cx = w * 0.5, cy = h * 0.5;
45
- const R = Math.min(w, h) * 0.44;
46
- const maxRangeM = 1500;
47
-
48
- ctx.clearRect(0, 0, w, h);
49
-
50
- // --- Control Knobs ---
51
- const histSlider = document.getElementById("radarHistoryLen");
52
- const futSlider = document.getElementById("radarFutureLen");
53
- if (histSlider && document.getElementById("radarHistoryVal")) {
54
- document.getElementById("radarHistoryVal").textContent = histSlider.value;
55
- }
56
- if (futSlider && document.getElementById("radarFutureVal")) {
57
- document.getElementById("radarFutureVal").textContent = futSlider.value;
58
- }
59
- const maxHist = histSlider ? parseInt(histSlider.value, 10) : 30;
60
- const maxFut = futSlider ? parseInt(futSlider.value, 10) : 30;
61
-
62
- // ===========================================
63
- // 1. BACKGROUND - Dark tactical display
64
- // ===========================================
65
- ctx.fillStyle = colors.background;
66
- ctx.fillRect(0, 0, w, h);
67
-
68
- // Subtle noise/static effect
69
- ctx.globalAlpha = 0.03;
70
- for (let i = 0; i < 100; i++) {
71
- const nx = Math.random() * w;
72
- const ny = Math.random() * h;
73
- ctx.fillStyle = "#00ff88";
74
- ctx.fillRect(nx, ny, 1, 1);
75
- }
76
- ctx.globalAlpha = 1;
77
-
78
- // Scanline effect
79
- ctx.strokeStyle = "rgba(0, 255, 136, 0.02)";
80
- ctx.lineWidth = 1;
81
- for (let y = 0; y < h; y += 3) {
82
- ctx.beginPath();
83
- ctx.moveTo(0, y);
84
- ctx.lineTo(w, y);
85
- ctx.stroke();
86
- }
87
-
88
- // ===========================================
89
- // 2. OUTER BEZEL / FRAME
90
- // ===========================================
91
- // Outer border ring
92
- ctx.strokeStyle = colors.gridPrimary;
93
- ctx.lineWidth = 2;
94
- ctx.beginPath();
95
- ctx.arc(cx, cy, R + 8, 0, Math.PI * 2);
96
- ctx.stroke();
97
-
98
- // Inner border ring
99
- ctx.strokeStyle = colors.gridSecondary;
100
- ctx.lineWidth = 1;
101
- ctx.beginPath();
102
- ctx.arc(cx, cy, R + 3, 0, Math.PI * 2);
103
- ctx.stroke();
104
-
105
- // Corner brackets
106
- const bracketSize = 15;
107
- const bracketOffset = R + 20;
108
- ctx.strokeStyle = colors.gridPrimary;
109
- ctx.lineWidth = 2;
110
-
111
- // Top-left
112
- ctx.beginPath();
113
- ctx.moveTo(cx - bracketOffset, cy - bracketOffset + bracketSize);
114
- ctx.lineTo(cx - bracketOffset, cy - bracketOffset);
115
- ctx.lineTo(cx - bracketOffset + bracketSize, cy - bracketOffset);
116
- ctx.stroke();
117
-
118
- // Top-right
119
- ctx.beginPath();
120
- ctx.moveTo(cx + bracketOffset - bracketSize, cy - bracketOffset);
121
- ctx.lineTo(cx + bracketOffset, cy - bracketOffset);
122
- ctx.lineTo(cx + bracketOffset, cy - bracketOffset + bracketSize);
123
- ctx.stroke();
124
-
125
- // Bottom-left
126
- ctx.beginPath();
127
- ctx.moveTo(cx - bracketOffset, cy + bracketOffset - bracketSize);
128
- ctx.lineTo(cx - bracketOffset, cy + bracketOffset);
129
- ctx.lineTo(cx - bracketOffset + bracketSize, cy + bracketOffset);
130
- ctx.stroke();
131
-
132
- // Bottom-right
133
- ctx.beginPath();
134
- ctx.moveTo(cx + bracketOffset - bracketSize, cy + bracketOffset);
135
- ctx.lineTo(cx + bracketOffset, cy + bracketOffset);
136
- ctx.lineTo(cx + bracketOffset, cy + bracketOffset - bracketSize);
137
- ctx.stroke();
138
-
139
- // ===========================================
140
- // 3. RANGE RINGS with labels
141
- // ===========================================
142
- const rangeRings = [
143
- { frac: 0.25, label: "375m" },
144
- { frac: 0.5, label: "750m" },
145
- { frac: 0.75, label: "1125m" },
146
- { frac: 1.0, label: "1500m" }
147
- ];
148
-
149
- rangeRings.forEach((ring, i) => {
150
- const ringR = R * ring.frac;
151
-
152
- // Ring line
153
- ctx.strokeStyle = i === 3 ? colors.gridPrimary : colors.gridSecondary;
154
- ctx.lineWidth = i === 3 ? 1.5 : 1;
155
- ctx.beginPath();
156
- ctx.arc(cx, cy, ringR, 0, Math.PI * 2);
157
- ctx.stroke();
158
-
159
- // Tick marks on outer ring
160
- if (i === 3) {
161
- for (let deg = 0; deg < 360; deg += 5) {
162
- const rad = (deg - 90) * Math.PI / 180;
163
- const tickLen = deg % 30 === 0 ? 8 : (deg % 10 === 0 ? 5 : 2);
164
- const x1 = cx + Math.cos(rad) * ringR;
165
- const y1 = cy + Math.sin(rad) * ringR;
166
- const x2 = cx + Math.cos(rad) * (ringR + tickLen);
167
- const y2 = cy + Math.sin(rad) * (ringR + tickLen);
168
-
169
- ctx.strokeStyle = deg % 30 === 0 ? colors.gridPrimary : colors.gridTertiary;
170
- ctx.lineWidth = deg % 30 === 0 ? 1.5 : 0.5;
171
- ctx.beginPath();
172
- ctx.moveTo(x1, y1);
173
- ctx.lineTo(x2, y2);
174
- ctx.stroke();
175
- }
176
- }
177
-
178
- // Range labels (on right side)
179
- ctx.font = "bold 9px 'Courier New', monospace";
180
- ctx.fillStyle = colors.textDim;
181
- ctx.textAlign = "left";
182
- ctx.textBaseline = "middle";
183
- ctx.fillText(ring.label, cx + ringR + 4, cy);
184
- });
185
-
186
- // ===========================================
187
- // 4. COMPASS ROSE / BEARING LINES
188
- // ===========================================
189
- // Cardinal directions with labels
190
- const cardinals = [
191
- { deg: 0, label: "N", primary: true },
192
- { deg: 45, label: "NE", primary: false },
193
- { deg: 90, label: "E", primary: true },
194
- { deg: 135, label: "SE", primary: false },
195
- { deg: 180, label: "S", primary: true },
196
- { deg: 225, label: "SW", primary: false },
197
- { deg: 270, label: "W", primary: true },
198
- { deg: 315, label: "NW", primary: false }
199
- ];
200
-
201
- cardinals.forEach(dir => {
202
- const rad = (dir.deg - 90) * Math.PI / 180;
203
- const x1 = cx + Math.cos(rad) * 12;
204
- const y1 = cy + Math.sin(rad) * 12;
205
- const x2 = cx + Math.cos(rad) * R;
206
- const y2 = cy + Math.sin(rad) * R;
207
-
208
- // Spoke line
209
- ctx.strokeStyle = dir.primary ? colors.gridSecondary : colors.gridTertiary;
210
- ctx.lineWidth = dir.primary ? 1 : 0.5;
211
- ctx.setLineDash(dir.primary ? [] : [2, 4]);
212
- ctx.beginPath();
213
- ctx.moveTo(x1, y1);
214
- ctx.lineTo(x2, y2);
215
- ctx.stroke();
216
- ctx.setLineDash([]);
217
-
218
- // Cardinal label
219
- const labelR = R + 18;
220
- const lx = cx + Math.cos(rad) * labelR;
221
- const ly = cy + Math.sin(rad) * labelR;
222
-
223
- ctx.font = dir.primary ? "bold 11px 'Courier New', monospace" : "9px 'Courier New', monospace";
224
- ctx.fillStyle = dir.primary ? colors.text : colors.textDim;
225
- ctx.textAlign = "center";
226
- ctx.textBaseline = "middle";
227
- ctx.fillText(dir.label, lx, ly);
228
- });
229
-
230
- // ===========================================
231
- // 5. SWEEP ANIMATION (skip for static mode)
232
- // ===========================================
233
- if (!isStatic) {
234
- const t = now() / 2000; // Slower sweep
235
- const sweepAng = (t * (Math.PI * 2)) % (Math.PI * 2);
236
-
237
- // Sweep trail (gradient arc)
238
- const trailLength = Math.PI * 0.4;
239
- const trailGrad = ctx.createConicGradient(sweepAng - trailLength + Math.PI / 2, cx, cy);
240
- trailGrad.addColorStop(0, "transparent");
241
- trailGrad.addColorStop(0.7, "rgba(0, 255, 136, 0.0)");
242
- trailGrad.addColorStop(1, "rgba(0, 255, 136, 0.12)");
243
-
244
- ctx.fillStyle = trailGrad;
245
- ctx.beginPath();
246
- ctx.arc(cx, cy, R, 0, Math.PI * 2);
247
- ctx.fill();
248
-
249
- // Sweep line with glow
250
- ctx.shadowBlur = 15;
251
- ctx.shadowColor = colors.sweepGlow;
252
- ctx.strokeStyle = colors.sweepLine;
253
- ctx.lineWidth = 2;
254
- ctx.beginPath();
255
- ctx.moveTo(cx, cy);
256
- ctx.lineTo(cx + Math.cos(sweepAng) * R, cy + Math.sin(sweepAng) * R);
257
- ctx.stroke();
258
- ctx.shadowBlur = 0;
259
- }
260
-
261
- // ===========================================
262
- // 6. OWNSHIP (Center)
263
- // ===========================================
264
- // Ownship symbol - aircraft shape
265
- ctx.fillStyle = colors.ownship;
266
- ctx.shadowBlur = 8;
267
- ctx.shadowColor = colors.ownship;
268
-
269
- ctx.beginPath();
270
- ctx.moveTo(cx, cy - 8); // Nose
271
- ctx.lineTo(cx + 5, cy + 4); // Right wing
272
- ctx.lineTo(cx + 2, cy + 2);
273
- ctx.lineTo(cx + 2, cy + 8); // Right tail
274
- ctx.lineTo(cx, cy + 5);
275
- ctx.lineTo(cx - 2, cy + 8); // Left tail
276
- ctx.lineTo(cx - 2, cy + 2);
277
- ctx.lineTo(cx - 5, cy + 4); // Left wing
278
- ctx.closePath();
279
- ctx.fill();
280
- ctx.shadowBlur = 0;
281
-
282
- // Ownship pulse ring (skip for static mode)
283
- if (!isStatic) {
284
- const pulsePhase = (now() / 1000) % 1;
285
- const pulseR = 10 + pulsePhase * 15;
286
- ctx.strokeStyle = `rgba(0, 255, 136, ${0.5 - pulsePhase * 0.5})`;
287
- ctx.lineWidth = 1;
288
- ctx.beginPath();
289
- ctx.arc(cx, cy, pulseR, 0, Math.PI * 2);
290
- ctx.stroke();
291
- } else {
292
- // Static ring for static mode
293
- ctx.strokeStyle = `rgba(0, 255, 136, 0.4)`;
294
- ctx.lineWidth = 1;
295
- ctx.beginPath();
296
- ctx.arc(cx, cy, 12, 0, Math.PI * 2);
297
- ctx.stroke();
298
- }
299
-
300
- // ===========================================
301
- // 7. RENDER TRACKS / TARGETS
302
- // ===========================================
303
- const source = trackSource || state.detections;
304
- const fovRad = (60 * Math.PI) / 180;
305
-
306
- if (source && source.length > 0) {
307
- source.forEach((det, idx) => {
308
- // Calculate range
309
- let rPx;
310
- let dist = 3000;
311
-
312
- if (det.depth_valid && det.depth_rel != null) {
313
- rPx = (det.depth_rel * 0.9 + 0.1) * R;
314
- dist = det.depth_est_m || 3000;
315
- } else {
316
- if (det.gpt_distance_m) {
317
- dist = det.gpt_distance_m;
318
- } else if (det.baseRange_m) {
319
- dist = det.baseRange_m;
320
- }
321
- rPx = (clamp(dist, 0, maxRangeM) / maxRangeM) * R;
322
- }
323
-
324
- // Calculate bearing from bbox
325
- const bx = det.bbox.x + det.bbox.w * 0.5;
326
- let tx = 0;
327
- if (bx <= 2.0) {
328
- tx = bx - 0.5;
329
- } else {
330
- const fw = state.frame.w || 1280;
331
- tx = (bx / fw) - 0.5;
332
- }
333
- const angle = (-Math.PI / 2) + (tx * fovRad);
334
-
335
- // Target position
336
- const px = cx + Math.cos(angle) * rPx;
337
- const py = cy + Math.sin(angle) * rPx;
338
-
339
- const isSelected = (state.selectedId === det.id) || (state.tracker.selectedTrackId === det.id);
340
-
341
- // Determine threat color
342
- let threatColor = colors.hostile; // Default hostile (red)
343
- const label = (det.label || "").toLowerCase();
344
- if (label.includes('person')) threatColor = colors.neutral;
345
- if (label.includes('friendly')) threatColor = colors.friendly;
346
- if (isSelected) threatColor = colors.selected;
347
-
348
- // Target glow for all targets
349
- ctx.shadowBlur = isSelected ? 15 : 8;
350
- ctx.shadowColor = threatColor;
351
-
352
- // ===========================================
353
- // TARGET SYMBOL - Military bracket style
354
- // ===========================================
355
- ctx.save();
356
- ctx.translate(px, py);
357
-
358
- // Rotation based on heading
359
- let rotation = -Math.PI / 2;
360
- if (det.angle_deg !== undefined) {
361
- rotation = det.angle_deg * (Math.PI / 180);
362
- }
363
- ctx.rotate(rotation);
364
-
365
- const size = isSelected ? 16 : 12;
366
-
367
- // Draw target - larger triangle shape
368
- ctx.strokeStyle = threatColor;
369
- ctx.fillStyle = threatColor;
370
- ctx.lineWidth = isSelected ? 2.5 : 2;
371
-
372
- // Triangle pointing in direction of travel
373
- ctx.beginPath();
374
- ctx.moveTo(size, 0); // Front tip
375
- ctx.lineTo(-size * 0.6, -size * 0.5); // Top left
376
- ctx.lineTo(-size * 0.4, 0); // Back indent
377
- ctx.lineTo(-size * 0.6, size * 0.5); // Bottom left
378
- ctx.closePath();
379
- ctx.fill();
380
-
381
- // Outline for better visibility
382
- ctx.strokeStyle = isSelected ? "#ffffff" : "rgba(0, 0, 0, 0.5)";
383
- ctx.lineWidth = 1;
384
- ctx.stroke();
385
-
386
- ctx.restore();
387
- ctx.shadowBlur = 0;
388
-
389
- // ===========================================
390
- // TARGET ID LABEL (always show)
391
- // ===========================================
392
- ctx.font = "bold 9px 'Courier New', monospace";
393
- ctx.fillStyle = threatColor;
394
- ctx.textAlign = "left";
395
- ctx.textBaseline = "middle";
396
- ctx.fillText(det.id, px + 12, py - 2);
397
-
398
- // ===========================================
399
- // SELECTED TARGET - Full data display
400
- // ===========================================
401
- if (isSelected) {
402
- // Targeting brackets around selected target
403
- const bracketS = 18;
404
- ctx.strokeStyle = colors.selected;
405
- ctx.lineWidth = 1.5;
406
-
407
- // Animated bracket expansion (static for static mode)
408
- const bracketPulse = isStatic ? 0 : Math.sin(now() / 200) * 2;
409
- const bOff = bracketS + bracketPulse;
410
-
411
- // Top-left bracket
412
- ctx.beginPath();
413
- ctx.moveTo(px - bOff, py - bOff + 6);
414
- ctx.lineTo(px - bOff, py - bOff);
415
- ctx.lineTo(px - bOff + 6, py - bOff);
416
- ctx.stroke();
417
-
418
- // Top-right bracket
419
- ctx.beginPath();
420
- ctx.moveTo(px + bOff - 6, py - bOff);
421
- ctx.lineTo(px + bOff, py - bOff);
422
- ctx.lineTo(px + bOff, py - bOff + 6);
423
- ctx.stroke();
424
-
425
- // Bottom-left bracket
426
- ctx.beginPath();
427
- ctx.moveTo(px - bOff, py + bOff - 6);
428
- ctx.lineTo(px - bOff, py + bOff);
429
- ctx.lineTo(px - bOff + 6, py + bOff);
430
- ctx.stroke();
431
-
432
- // Bottom-right bracket
433
- ctx.beginPath();
434
- ctx.moveTo(px + bOff - 6, py + bOff);
435
- ctx.lineTo(px + bOff, py + bOff);
436
- ctx.lineTo(px + bOff, py + bOff - 6);
437
- ctx.stroke();
438
-
439
- // Data callout box
440
- const boxX = px + 25;
441
- const boxY = py - 50;
442
- const boxW = 95;
443
- const boxH = det.speed_kph ? 52 : 32;
444
-
445
- // Line from target to box
446
- ctx.strokeStyle = colors.dataBorder;
447
- ctx.lineWidth = 1;
448
- ctx.setLineDash([2, 2]);
449
- ctx.beginPath();
450
- ctx.moveTo(px + 15, py - 10);
451
- ctx.lineTo(boxX, boxY + boxH / 2);
452
- ctx.stroke();
453
- ctx.setLineDash([]);
454
-
455
- // Box background
456
- ctx.fillStyle = colors.dataBox;
457
- ctx.fillRect(boxX, boxY, boxW, boxH);
458
-
459
- // Box border
460
- ctx.strokeStyle = colors.dataBorder;
461
- ctx.lineWidth = 1;
462
- ctx.strokeRect(boxX, boxY, boxW, boxH);
463
-
464
- // Corner accents
465
- ctx.strokeStyle = colors.text;
466
- ctx.lineWidth = 2;
467
- const cornerLen = 5;
468
-
469
- // Top-left
470
- ctx.beginPath();
471
- ctx.moveTo(boxX, boxY + cornerLen);
472
- ctx.lineTo(boxX, boxY);
473
- ctx.lineTo(boxX + cornerLen, boxY);
474
- ctx.stroke();
475
-
476
- // Top-right
477
- ctx.beginPath();
478
- ctx.moveTo(boxX + boxW - cornerLen, boxY);
479
- ctx.lineTo(boxX + boxW, boxY);
480
- ctx.lineTo(boxX + boxW, boxY + cornerLen);
481
- ctx.stroke();
482
-
483
- // Data text
484
- ctx.font = "bold 10px 'Courier New', monospace";
485
- ctx.fillStyle = colors.text;
486
- ctx.textAlign = "left";
487
-
488
- // Range
489
- ctx.fillText(`RNG: ${Math.round(dist)}m`, boxX + 6, boxY + 14);
490
-
491
- // Bearing
492
- const bearingDeg = Math.round((angle + Math.PI / 2) * 180 / Math.PI);
493
- ctx.fillText(`BRG: ${bearingDeg.toString().padStart(3, '0')}°`, boxX + 6, boxY + 28);
494
-
495
- // Speed (if available)
496
- if (det.speed_kph) {
497
- ctx.fillStyle = colors.neutral;
498
- ctx.fillText(`SPD: ${det.speed_kph.toFixed(0)} kph`, boxX + 6, boxY + 42);
499
- }
500
-
501
- // Trail rendering for selected target
502
- if (det.history && det.history.length > 0 && det.gpt_distance_m) {
503
- const currH = det.bbox.h;
504
- const currDist = det.gpt_distance_m;
505
-
506
- ctx.save();
507
- const available = det.history.length;
508
- const startIdx = Math.max(0, available - maxHist);
509
- const subset = det.history.slice(startIdx);
510
-
511
- let points = [];
512
- subset.forEach((hBox) => {
513
- let hH, hX;
514
- if (hBox[0] <= 2.0 && hBox[2] <= 2.0) {
515
- hH = hBox[3] - hBox[1];
516
- hX = (hBox[0] + hBox[2]) / 2;
517
- } else {
518
- const fw = state.frame.w || 1280;
519
- const fh = state.frame.h || 720;
520
- hH = (hBox[3] - hBox[1]) / fh;
521
- hX = ((hBox[0] + hBox[2]) / 2) / fw;
522
- }
523
- if (hH <= 0.001) return;
524
-
525
- let distHist = currDist * (det.bbox.h / hH);
526
- const rPxHist = (clamp(distHist, 0, maxRangeM) / maxRangeM) * R;
527
- const txHist = hX - 0.5;
528
- const angleHist = (-Math.PI / 2) + (txHist * fovRad);
529
- const pxHist = cx + Math.cos(angleHist) * rPxHist;
530
- const pyHist = cy + Math.sin(angleHist) * rPxHist;
531
- points.push({ x: pxHist, y: pyHist });
532
- });
533
-
534
- points.push({ x: px, y: py });
535
-
536
- ctx.lineWidth = 1.5;
537
- for (let i = 0; i < points.length - 1; i++) {
538
- const p1 = points[i];
539
- const p2 = points[i + 1];
540
- const age = points.length - 1 - i;
541
- const alpha = Math.max(0, 1.0 - (age / (maxHist + 1)));
542
-
543
- ctx.beginPath();
544
- ctx.moveTo(p1.x, p1.y);
545
- ctx.lineTo(p2.x, p2.y);
546
- ctx.strokeStyle = threatColor;
547
- ctx.globalAlpha = alpha * 0.6;
548
- ctx.stroke();
549
- }
550
- ctx.globalAlpha = 1;
551
- ctx.restore();
552
- }
553
-
554
- // Predicted path
555
- if (det.predicted_path && maxFut > 0) {
556
- ctx.save();
557
- const futSubset = det.predicted_path.slice(0, maxFut);
558
-
559
- if (futSubset.length > 0) {
560
- const currDist = det.gpt_distance_m || (det.depth_est_m || 2000);
561
- const fw = state.frame.w || 1280;
562
- const fh = state.frame.h || 720;
563
-
564
- let predPoints = [{ x: px, y: py }];
565
-
566
- futSubset.forEach((pt) => {
567
- const pX = pt[0] <= 2.0 ? pt[0] : (pt[0] / fw);
568
- const pY = pt[0] <= 2.0 ? pt[1] : (pt[1] / fh);
569
- const txP = pX - 0.5;
570
- const angP = (-Math.PI / 2) + (txP * fovRad);
571
- const cY = (det.bbox.y <= 2.0) ? (det.bbox.y + det.bbox.h / 2) : ((det.bbox.y + det.bbox.h / 2) / fh);
572
- let distP = currDist * (cY / Math.max(0.01, pY));
573
- const rPxP = (clamp(distP, 0, maxRangeM) / maxRangeM) * R;
574
- const pxP = cx + Math.cos(angP) * rPxP;
575
- const pyP = cy + Math.sin(angP) * rPxP;
576
- predPoints.push({ x: pxP, y: pyP });
577
- });
578
-
579
- ctx.lineWidth = 1.5;
580
- ctx.setLineDash([4, 4]);
581
-
582
- for (let i = 0; i < predPoints.length - 1; i++) {
583
- const p1 = predPoints[i];
584
- const p2 = predPoints[i + 1];
585
- const alpha = Math.max(0, 1.0 - (i / maxFut));
586
-
587
- ctx.beginPath();
588
- ctx.moveTo(p1.x, p1.y);
589
- ctx.lineTo(p2.x, p2.y);
590
- ctx.strokeStyle = threatColor;
591
- ctx.globalAlpha = alpha * 0.8;
592
- ctx.stroke();
593
- }
594
- ctx.setLineDash([]);
595
- ctx.globalAlpha = 1;
596
- }
597
- ctx.restore();
598
- }
599
- }
600
- });
601
- }
602
-
603
- // ===========================================
604
- // 8. STATUS OVERLAY - Top corners
605
- // ===========================================
606
- ctx.font = "bold 9px 'Courier New', monospace";
607
- ctx.fillStyle = colors.textDim;
608
- ctx.textAlign = "left";
609
- ctx.textBaseline = "top";
610
-
611
- // Top left - Mode
612
- ctx.fillText(isStatic ? "SNAPSHOT" : "TGT ACQUISITION", 8, 8);
613
-
614
- // Track count
615
- const trackCount = source ? source.length : 0;
616
- ctx.fillStyle = trackCount > 0 ? colors.text : colors.textDim;
617
- ctx.fillText(`TRACKS: ${trackCount}`, 8, 22);
618
-
619
- // Top right - Range setting
620
- ctx.textAlign = "right";
621
- ctx.fillStyle = colors.textDim;
622
- ctx.fillText(`MAX RNG: ${maxRangeM}m`, w - 8, 8);
623
-
624
- // Time
625
- const timeStr = new Date().toLocaleTimeString('en-US', { hour12: false });
626
- ctx.fillText(timeStr, w - 8, 22);
627
-
628
- // Bottom center - FOV indicator
629
- ctx.textAlign = "center";
630
- ctx.fillStyle = colors.textDim;
631
- ctx.fillText("FOV: 60°", cx, h - 12);
632
- };
633
-
634
- // Aliases for compatibility
635
- APP.ui.radar.renderFrameRadar = function () {
636
- const { state } = APP.core;
637
-
638
- // Only show tracks after first frame is processed
639
- if (!state.firstFrameReady) {
640
- APP.ui.radar.render("frameRadar", [], { static: true });
641
- return;
642
- }
643
-
644
- // In demo mode, use demo data for first frame (time=0) to match video radar initial state
645
- let trackSource = state.detections;
646
- if (APP.core.demo.active && APP.core.demo.data) {
647
- const demoTracks = APP.core.demo.getFrameData(0); // Get frame 0 data
648
- if (demoTracks && demoTracks.length > 0) {
649
- trackSource = demoTracks;
650
- }
651
- }
652
-
653
- // First frame radar is static - no sweep animation
654
- APP.ui.radar.render("frameRadar", trackSource, { static: true });
655
- };
656
-
657
- APP.ui.radar.renderLiveRadar = function () {
658
- const { state } = APP.core;
659
-
660
- // Only show tracks after Engage has been clicked (tracker running)
661
- if (!state.tracker.running) {
662
- APP.ui.radar.render("radarCanvas", [], { static: false });
663
- return;
664
- }
665
-
666
- // Live radar has sweep animation
667
- APP.ui.radar.render("radarCanvas", state.tracker.tracks, { static: false });
668
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/js/ui/trade.js DELETED
@@ -1,291 +0,0 @@
1
- // Trade Space Visualization Module
2
- APP.ui.trade = {};
3
-
4
- APP.ui.trade.populateTradeTarget = function () {
5
- const { state } = APP.core;
6
- const { $ } = APP.core.utils;
7
- const tradeTarget = $("#tradeTarget");
8
-
9
- if (!tradeTarget) return;
10
-
11
- const sel = tradeTarget.value;
12
- tradeTarget.innerHTML = "";
13
-
14
- const ids = state.detections.map(d => d.id);
15
- if (!ids.length) {
16
- const opt = document.createElement("option");
17
- opt.value = "";
18
- opt.textContent = "No targets";
19
- tradeTarget.appendChild(opt);
20
- return;
21
- }
22
-
23
- ids.forEach(id => {
24
- const opt = document.createElement("option");
25
- opt.value = id;
26
- opt.textContent = id;
27
- tradeTarget.appendChild(opt);
28
- });
29
-
30
- if (sel && ids.includes(sel)) tradeTarget.value = sel;
31
- else tradeTarget.value = state.selectedId || ids[0];
32
- };
33
-
34
- APP.ui.trade.renderTrade = function () {
35
- const { state } = APP.core;
36
- const { $, clamp } = APP.core.utils;
37
- const { maxPowerAtTarget, requiredDwell, pkillFromMargin } = APP.core.hel;
38
-
39
- const tradeCanvas = $("#tradeCanvas");
40
- const tradeTarget = $("#tradeTarget");
41
- const rMin = $("#rMin");
42
- const rMax = $("#rMax");
43
- const showPk = $("#showPk");
44
-
45
- if (!tradeCanvas) return;
46
-
47
- const ctx = tradeCanvas.getContext("2d");
48
- const W = tradeCanvas.width, H = tradeCanvas.height;
49
- ctx.clearRect(0, 0, W, H);
50
-
51
- // Background
52
- ctx.fillStyle = "rgba(0,0,0,.32)";
53
- ctx.fillRect(0, 0, W, H);
54
-
55
- if (!state.detections.length) {
56
- ctx.fillStyle = "rgba(255,255,255,.75)";
57
- ctx.font = "14px " + getComputedStyle(document.body).fontFamily;
58
- ctx.fillText("Run Reason to populate trade-space curves.", 18, 34);
59
- return;
60
- }
61
-
62
- const id = (tradeTarget ? tradeTarget.value : null) || state.selectedId || state.detections[0].id;
63
- const d = state.detections.find(x => x.id === id) || state.detections[0];
64
-
65
- const r0 = Math.max(50, rMin ? +rMin.value : 200);
66
- const r1 = Math.max(r0 + 50, rMax ? +rMax.value : 6000);
67
-
68
- // Margins
69
- const padL = 64, padR = 18, padT = 18, padB = 52;
70
- const plotW = W - padL - padR;
71
- const plotH = H - padT - padB;
72
-
73
- // Compute sweep
74
- const N = 120;
75
- const xs = [];
76
- let maxY = 0;
77
- let minY = Infinity;
78
-
79
- for (let i = 0; i <= N; i++) {
80
- const r = r0 + (r1 - r0) * (i / N);
81
- const mp = maxPowerAtTarget(r).Ptar;
82
- const reqP = d.reqP_kW || 40;
83
- const reqD = requiredDwell(r, reqP, mp, d.baseDwell_s || 5);
84
-
85
- xs.push({ r, mp, reqP, reqD });
86
- maxY = Math.max(maxY, mp, reqP);
87
- minY = Math.min(minY, mp, reqP);
88
- }
89
-
90
- maxY = Math.max(maxY, 20);
91
- minY = Math.max(0, minY - 10);
92
-
93
- // Axes
94
- ctx.strokeStyle = "rgba(255,255,255,.14)";
95
- ctx.lineWidth = 1;
96
- ctx.beginPath();
97
- ctx.moveTo(padL, padT);
98
- ctx.lineTo(padL, padT + plotH);
99
- ctx.lineTo(padL + plotW, padT + plotH);
100
- ctx.stroke();
101
-
102
- // Grid lines
103
- ctx.strokeStyle = "rgba(255,255,255,.07)";
104
- for (let i = 1; i <= 5; i++) {
105
- const y = padT + plotH * (i / 5);
106
- ctx.beginPath(); ctx.moveTo(padL, y); ctx.lineTo(padL + plotW, y); ctx.stroke();
107
- }
108
- for (let i = 1; i <= 6; i++) {
109
- const x = padL + plotW * (i / 6);
110
- ctx.beginPath(); ctx.moveTo(x, padT); ctx.lineTo(x, padT + plotH); ctx.stroke();
111
- }
112
-
113
- // Helpers
114
- const xMap = (r) => padL + (r - r0) / (r1 - r0) * plotW;
115
- const yMap = (p) => padT + (1 - (p - minY) / (maxY - minY)) * plotH;
116
-
117
- // Curve: max power at target
118
- ctx.strokeStyle = "rgba(34,211,238,.95)";
119
- ctx.lineWidth = 2.5;
120
- ctx.beginPath();
121
- xs.forEach((pt, i) => {
122
- const x = xMap(pt.r);
123
- const y = yMap(pt.mp);
124
- if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y);
125
- });
126
- ctx.stroke();
127
-
128
- // Curve: required power
129
- ctx.strokeStyle = "rgba(239,68,68,.90)";
130
- ctx.lineWidth = 2.5;
131
- ctx.beginPath();
132
- xs.forEach((pt, i) => {
133
- const x = xMap(pt.r);
134
- const y = yMap(pt.reqP);
135
- if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y);
136
- });
137
- ctx.stroke();
138
-
139
- // Annotate margin zones
140
- ctx.fillStyle = "rgba(34,197,94,.08)";
141
- ctx.beginPath();
142
- xs.forEach((pt, i) => {
143
- const x = xMap(pt.r);
144
- const y = yMap(Math.max(pt.reqP, pt.mp));
145
- if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y);
146
- });
147
- for (let i = xs.length - 1; i >= 0; i--) {
148
- const x = xMap(xs[i].r);
149
- const y = yMap(Math.min(xs[i].reqP, xs[i].mp));
150
- ctx.lineTo(x, y);
151
- }
152
- ctx.closePath();
153
- ctx.fill();
154
-
155
- // Second axis for dwell (scaled)
156
- const dwellMax = Math.max(...xs.map(p => p.reqD));
157
- const yMapD = (dwell) => padT + (1 - (dwell / Math.max(1e-6, dwellMax))) * plotH;
158
-
159
- ctx.strokeStyle = "rgba(124,58,237,.85)";
160
- ctx.lineWidth = 2.2;
161
- ctx.beginPath();
162
- xs.forEach((pt, i) => {
163
- const x = xMap(pt.r);
164
- const y = yMapD(pt.reqD);
165
- if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y);
166
- });
167
- ctx.stroke();
168
-
169
- // Optional pkill band
170
- if (showPk && showPk.value === "on") {
171
- ctx.fillStyle = "rgba(245,158,11,.08)";
172
- ctx.beginPath();
173
- xs.forEach((pt, i) => {
174
- const x = xMap(pt.r);
175
- const mp = pt.mp;
176
- const margin = mp - pt.reqP;
177
- const pk = pkillFromMargin(margin, d.baseDwell_s || 5, pt.reqD);
178
- const y = padT + plotH * (1 - pk);
179
- if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y);
180
- });
181
- ctx.lineTo(padL + plotW, padT + plotH);
182
- ctx.lineTo(padL, padT + plotH);
183
- ctx.closePath();
184
- ctx.fill();
185
- }
186
-
187
- // Labels
188
- ctx.fillStyle = "rgba(255,255,255,.84)";
189
- ctx.font = "bold 14px " + getComputedStyle(document.body).fontFamily;
190
- ctx.fillText(`Target: ${id} (${d.label})`, padL, 16);
191
-
192
- ctx.fillStyle = "rgba(34,211,238,.95)";
193
- ctx.fillText("Max P@Target (kW)", padL + 10, padT + plotH + 30);
194
-
195
- ctx.fillStyle = "rgba(239,68,68,.92)";
196
- ctx.fillText("Required P@Target (kW)", padL + 190, padT + plotH + 30);
197
-
198
- ctx.fillStyle = "rgba(124,58,237,.90)";
199
- ctx.fillText(`Required Dwell (s, scaled)`, padL + 420, padT + plotH + 30);
200
-
201
- ctx.fillStyle = "rgba(255,255,255,.55)";
202
- ctx.font = "11px " + getComputedStyle(document.body).fontFamily;
203
- ctx.fillText(`Range (m)`, padL + plotW - 64, padT + plotH + 46);
204
-
205
- // Axis ticks
206
- ctx.fillStyle = "rgba(255,255,255,.55)";
207
- ctx.font = "11px " + getComputedStyle(document.body).fontFamily;
208
-
209
- for (let i = 0; i <= 5; i++) {
210
- const p = minY + (maxY - minY) * (1 - i / 5);
211
- const y = padT + plotH * (i / 5);
212
- ctx.fillText(p.toFixed(0), 12, y + 4);
213
- }
214
-
215
- for (let i = 0; i <= 6; i++) {
216
- const r = r0 + (r1 - r0) * (i / 6);
217
- const x = padL + plotW * (i / 6);
218
- ctx.fillText(r.toFixed(0), x - 14, padT + plotH + 18);
219
- }
220
-
221
- // Marker at baseline range
222
- const rangeBase = $("#rangeBase");
223
- const baseR = d.baseRange_m || (rangeBase ? +rangeBase.value : 1500);
224
- const xb = xMap(clamp(baseR, r0, r1));
225
- ctx.strokeStyle = "rgba(255,255,255,.28)";
226
- ctx.setLineDash([6, 6]);
227
- ctx.beginPath();
228
- ctx.moveTo(xb, padT);
229
- ctx.lineTo(xb, padT + plotH);
230
- ctx.stroke();
231
- ctx.setLineDash([]);
232
- };
233
-
234
- APP.ui.trade.updateHeadlines = function (sys, bestTarget) {
235
- const { $ } = APP.core.utils;
236
-
237
- const mMaxP = $("#m-maxp");
238
- const mReqP = $("#m-reqp");
239
- const mMargin = $("#m-margin");
240
- const mMaxPSub = $("#m-maxp-sub");
241
- const mPlanSub = $("#m-plan-sub");
242
-
243
- if (!mMaxP) return;
244
-
245
- if (mMaxP) mMaxP.textContent = sys.maxP ? `${sys.maxP} kW` : "—";
246
- if (mReqP) mReqP.textContent = sys.reqP ? `${sys.reqP} kW` : "—";
247
-
248
- if (mMargin) {
249
- const margin = sys.margin || 0;
250
- mMargin.textContent = `${margin > 0 ? "+" : ""}${margin} kW`;
251
- mMargin.style.color = margin >= 0 ? "rgba(34,197,94,.95)" : "rgba(239,68,68,.95)";
252
- }
253
-
254
- if (mMaxPSub) mMaxPSub.textContent = "Calculated by external HEL engine";
255
-
256
- if (bestTarget && bestTarget.pkill > 0) {
257
- const mPlan = $("#m-plan");
258
- if (mPlan) mPlan.textContent = `${bestTarget.id} → Engage`;
259
- if (mPlanSub) mPlanSub.textContent = "Highest P(kill) target";
260
- }
261
- };
262
-
263
- APP.ui.trade.snapshotTrade = function () {
264
- const { state } = APP.core;
265
- const { $, log } = APP.core.utils;
266
- const { log: uiLog } = APP.ui.logging;
267
-
268
- if (!state.detections.length) return;
269
-
270
- const tradeTarget = $("#tradeTarget");
271
- const helPower = $("#helPower");
272
- const atmVis = $("#atmVis");
273
- const atmCn2 = $("#atmCn2");
274
- const aoQ = $("#aoQ");
275
-
276
- const id = tradeTarget ? tradeTarget.value : state.selectedId;
277
- const d = state.detections.find(x => x.id === id) || state.detections[0];
278
-
279
- const snap = {
280
- target: id,
281
- helPower_kW: helPower ? +helPower.value : 0,
282
- vis_km: atmVis ? +atmVis.value : 0,
283
- cn2: atmCn2 ? +atmCn2.value : 0,
284
- ao: aoQ ? +aoQ.value : 0,
285
- baseRange_m: d.baseRange_m,
286
- reqP_kW: d.reqP_kW,
287
- baseDwell_s: d.baseDwell_s
288
- };
289
-
290
- uiLog("SNAPSHOT: " + JSON.stringify(snap), "t");
291
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
inference.py CHANGED
@@ -9,7 +9,7 @@ import collections
9
  import logging
10
  import time
11
  from threading import Event, RLock, Thread
12
- from queue import Queue, PriorityQueue, Full, Empty
13
  from typing import Any, Dict, List, Optional, Sequence, Tuple
14
 
15
  import cv2
@@ -20,8 +20,7 @@ from models.detectors.base import ObjectDetector
20
  from models.model_loader import load_detector, load_detector_on_device
21
  from models.segmenters.model_loader import load_segmenter, load_segmenter_on_device
22
  from models.depth_estimators.model_loader import load_depth_estimator, load_depth_estimator_on_device
23
- from models.depth_estimators.base import DepthEstimator
24
- from utils.video import extract_frames, write_video, VideoReader, VideoWriter, StreamingVideoWriter
25
  from utils.relevance import evaluate_relevance
26
  from utils.enrichment import run_enrichment
27
  from utils.schemas import AssessmentStatus
@@ -538,83 +537,6 @@ def infer_frame(
538
  ), detections
539
 
540
 
541
- def infer_batch(
542
- frames: List[np.ndarray],
543
- frame_indices: List[int],
544
- queries: Sequence[str],
545
- detector_instance: ObjectDetector,
546
- depth_estimator_instance: Optional[DepthEstimator] = None,
547
- depth_scale: float = 1.0,
548
- depth_frame_stride: int = 3,
549
- ) -> List[Tuple[int, np.ndarray, List[Dict[str, Any]]]]:
550
- # Batch detection
551
- text_queries = list(queries) or ["object"]
552
- try:
553
- if detector_instance.supports_batch:
554
- with detector_instance.lock:
555
- det_results = detector_instance.predict_batch(frames, text_queries)
556
- else:
557
- # Fallback
558
- with detector_instance.lock:
559
- det_results = [detector_instance.predict(f, text_queries) for f in frames]
560
- except Exception:
561
- logging.exception("Batch detection failed")
562
- # Return empty for all
563
- return [(idx, f, []) for idx, f in zip(frame_indices, frames)]
564
-
565
- # Batch depth
566
- depth_map_results = {} # frame_idx -> depth_map
567
- depth_batch_inputs = []
568
- depth_batch_indices = []
569
-
570
- for idx, f in zip(frame_indices, frames):
571
- if idx % depth_frame_stride == 0:
572
- depth_batch_inputs.append(f)
573
- depth_batch_indices.append(idx)
574
-
575
- if depth_estimator_instance and depth_batch_inputs:
576
- try:
577
- with depth_estimator_instance.lock:
578
- if depth_estimator_instance.supports_batch:
579
- d_results = depth_estimator_instance.predict_batch(depth_batch_inputs)
580
- else:
581
- d_results = [depth_estimator_instance.predict(f) for f in depth_batch_inputs]
582
-
583
- for idx, res in zip(depth_batch_indices, d_results):
584
- depth_map_results[idx] = res
585
- except Exception:
586
- logging.exception("Batch depth estimation failed")
587
-
588
- # Post-process and merge
589
- outputs = []
590
- for i, (idx, frame, det_result) in enumerate(zip(frame_indices, frames, det_results)):
591
- detections = _build_detection_records(
592
- det_result.boxes, det_result.scores, det_result.labels, text_queries, det_result.label_names
593
- )
594
-
595
- if idx in depth_map_results:
596
- try:
597
- # existing _attach_depth_metrics expects detections and estimator name/instance
598
- # but we already computed depth. We need a helper or just modify logical flow.
599
- # Actually _attach_depth_metrics calls predict(). We want to skip predict.
600
- # Let's manually attach.
601
- d_res = depth_map_results[idx]
602
- # We need to manually invoke the attachment logic using the precomputed result.
603
- # Refactoring _attach_depth_metrics to accept result would be cleaner, but for now:
604
- # Copy-paste logic or use a trick.
605
-
606
- # Let's extract logic from _attach_depth_metrics essentially.
607
- # Wait, _attach_depth_metrics does the box checking.
608
- _attach_depth_from_result(detections, d_res, depth_scale)
609
- except Exception:
610
- logging.warning("Failed to attach depth for frame %d", idx)
611
-
612
- display_labels = [_build_display_label(d) for d in detections]
613
- processed = draw_boxes(frame, det_result.boxes, label_names=display_labels)
614
- outputs.append((idx, processed, detections))
615
-
616
- return outputs
617
-
618
  def _build_display_label(det):
619
  """Build display label with GPT distance if available."""
620
  label = det["label"]
@@ -713,46 +635,6 @@ def extract_first_frame(video_path: str) -> Tuple[np.ndarray, float, int, int]:
713
  return frame, fps, width, height
714
 
715
 
716
- def compute_depth_per_detection(
717
- depth_map: np.ndarray,
718
- detections: List[Dict],
719
- depth_scale: float = 1.0
720
- ) -> List[Dict]:
721
- """Sample depth for each detection bbox, compute relative distances."""
722
- depths = []
723
- for det in detections:
724
- x1, y1, x2, y2 = det["bbox"]
725
- # Sample central 50% region for robustness (avoids edge artifacts)
726
- cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
727
- hw, hh = max(1, (x2 - x1) // 4), max(1, (y2 - y1) // 4)
728
- y_start, y_end = max(0, cy - hh), min(depth_map.shape[0], cy + hh)
729
- x_start, x_end = max(0, cx - hw), min(depth_map.shape[1], cx + hw)
730
- region = depth_map[y_start:y_end, x_start:x_end]
731
-
732
- valid = region[np.isfinite(region)]
733
- if len(valid) >= 10:
734
- det["depth_est_m"] = float(np.median(valid)) * depth_scale
735
- det["depth_valid"] = True
736
- depths.append(det["depth_est_m"])
737
- else:
738
- det["depth_est_m"] = None
739
- det["depth_valid"] = False
740
- det["depth_rel"] = None
741
-
742
- # Per-frame relative normalization
743
- if depths:
744
- min_d, max_d = min(depths), max(depths)
745
- span = max_d - min_d + 1e-6
746
- for det in detections:
747
- if det.get("depth_valid"):
748
- det["depth_rel"] = (det["depth_est_m"] - min_d) / span
749
- elif len(detections) == 1 and detections[0].get("depth_valid"):
750
- # Single detection: assign neutral relative distance
751
- detections[0]["depth_rel"] = 0.5
752
-
753
- return detections
754
-
755
-
756
  def process_first_frame(
757
  video_path: str,
758
  queries: List[str],
@@ -1319,239 +1201,6 @@ def run_inference(
1319
 
1320
  logging.info("Inference complete. Output: %s", output_video_path)
1321
  return output_video_path, sorted_detections
1322
-
1323
-
1324
-
1325
- def run_segmentation(
1326
- input_video_path: str,
1327
- output_video_path: str,
1328
- queries: List[str],
1329
- max_frames: Optional[int] = None,
1330
- segmenter_name: Optional[str] = None,
1331
- job_id: Optional[str] = None,
1332
- stream_queue: Optional[Queue] = None,
1333
- ) -> str:
1334
- # 1. Setup Reader
1335
- try:
1336
- reader = AsyncVideoReader(input_video_path)
1337
- except ValueError:
1338
- logging.exception("Failed to open video at %s", input_video_path)
1339
- raise
1340
-
1341
- fps = reader.fps
1342
- width = reader.width
1343
- height = reader.height
1344
- total_frames = reader.total_frames
1345
-
1346
- if max_frames is not None:
1347
- total_frames = min(total_frames, max_frames)
1348
-
1349
- active_segmenter = segmenter_name or "gsam2_large"
1350
- logging.info("Using segmenter: %s with queries: %s", active_segmenter, queries)
1351
-
1352
- # 2. Load Segmenters (Parallel)
1353
-
1354
- # DEBUG: Log current state
1355
- logging.info(f"[DEBUG] Segmentation PID: {os.getpid()}")
1356
- logging.info(f"[DEBUG] CUDA_VISIBLE_DEVICES before clear: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
1357
-
1358
-
1359
- # if "CUDA_VISIBLE_DEVICES" in os.environ:
1360
- # logging.info("[DEBUG] Deleting CUDA_VISIBLE_DEVICES from env (segmentation)")
1361
- # del os.environ["CUDA_VISIBLE_DEVICES"]
1362
-
1363
- num_gpus = torch.cuda.device_count()
1364
- logging.info(f"[DEBUG] num_gpus: {num_gpus}")
1365
- segmenters = []
1366
-
1367
- if num_gpus > 0:
1368
- logging.info("Detected %d GPUs. Loading segmenters...", num_gpus)
1369
- def load_seg(gpu_id: int):
1370
- device_str = f"cuda:{gpu_id}"
1371
- seg = load_segmenter_on_device(active_segmenter, device_str)
1372
- seg.lock = RLock()
1373
- return (gpu_id, seg)
1374
-
1375
- with ThreadPoolExecutor(max_workers=num_gpus) as loader:
1376
- futures = [loader.submit(load_seg, i) for i in range(num_gpus)]
1377
- results = [f.result() for f in futures]
1378
- results.sort(key=lambda x: x[0])
1379
- segmenters = [r[1] for r in results]
1380
- else:
1381
- seg = load_segmenter(active_segmenter)
1382
- seg.lock = RLock()
1383
- segmenters.append(seg)
1384
-
1385
- # 3. Processing
1386
- queue_in = Queue(maxsize=16)
1387
- queue_out = Queue(maxsize=max(32, len(segmenters)*4))
1388
-
1389
- writer_finished = False # Robustness
1390
-
1391
- def worker_seg(gpu_idx: int):
1392
- seg = segmenters[gpu_idx]
1393
- batch_size = seg.max_batch_size if seg.supports_batch else 1
1394
- batch_accum = []
1395
-
1396
- def flush_batch():
1397
- if not batch_accum: return
1398
- indices = [i for i, _ in batch_accum]
1399
- frames = [f for _, f in batch_accum]
1400
-
1401
- try:
1402
- # 1. Inference
1403
- if seg.supports_batch:
1404
- with seg.lock:
1405
- results = seg.predict_batch(frames, queries)
1406
- else:
1407
- with seg.lock:
1408
- results = [seg.predict(f, queries) for f in frames]
1409
-
1410
- # 2. Post-process loop
1411
- for idx, frm, res in zip(indices, frames, results):
1412
- labels = queries or []
1413
- if len(labels) == 1:
1414
- masks = res.masks if res.masks is not None else []
1415
- labels = [labels[0] for _ in range(len(masks))]
1416
- processed = draw_masks(frm, res.masks, labels=labels)
1417
-
1418
- while True:
1419
- try:
1420
- queue_out.put((idx, processed), timeout=1.0)
1421
- break
1422
- except Full:
1423
- if writer_finished:
1424
- raise RuntimeError("Writer thread died")
1425
- if job_id: _check_cancellation(job_id)
1426
-
1427
- except Exception as e:
1428
- logging.error("Batch seg failed: %s", e)
1429
- # Fallback: Emit failed frames to prevent writer stall
1430
- for idx, frm in batch_accum:
1431
- while True:
1432
- try:
1433
- # Return original frame without mask
1434
- queue_out.put((idx, frm), timeout=1.0)
1435
- break
1436
- except Full:
1437
- if writer_finished: break
1438
- if job_id: _check_cancellation(job_id)
1439
- batch_accum.clear()
1440
-
1441
- while True:
1442
- item = queue_in.get()
1443
- try:
1444
- if item is None:
1445
- flush_batch()
1446
- break
1447
-
1448
- idx, frame = item
1449
- batch_accum.append(item)
1450
- if idx % 30 == 0:
1451
- logging.debug("Seg frame %d (GPU %d)", idx, gpu_idx)
1452
-
1453
- if len(batch_accum) >= batch_size:
1454
- flush_batch()
1455
- finally:
1456
- queue_in.task_done()
1457
-
1458
- workers = []
1459
- for i in range(len(segmenters)):
1460
- t = Thread(target=worker_seg, args=(i,), daemon=True)
1461
- t.start()
1462
- workers.append(t)
1463
-
1464
- # Writer
1465
- # writer_finished moved up for closure scope match
1466
-
1467
-
1468
- # Writer
1469
- # Writer
1470
- # writer_finished defined earlier
1471
-
1472
-
1473
- def writer_loop():
1474
- nonlocal writer_finished
1475
- next_idx = 0
1476
- buffer = {}
1477
-
1478
- try:
1479
- with StreamingVideoWriter(output_video_path, fps, width, height) as writer:
1480
- while next_idx < total_frames:
1481
- try:
1482
- while next_idx not in buffer:
1483
- if len(buffer) > 128:
1484
- logging.warning("Writer reorder buffer too large (%d), applying backpressure (waiting for frame %d)...", len(buffer), next_idx)
1485
- time.sleep(0.05)
1486
-
1487
- idx, frm = queue_out.get(timeout=1.0)
1488
- buffer[idx] = frm
1489
-
1490
- frm = buffer.pop(next_idx)
1491
- writer.write(frm)
1492
-
1493
- if stream_queue:
1494
- try:
1495
- stream_queue.put_nowait(frm)
1496
- except:
1497
- pass
1498
-
1499
- next_idx += 1
1500
- except Exception:
1501
- if job_id and _check_cancellation(job_id): pass
1502
- if not any(w.is_alive() for w in workers) and queue_out.empty():
1503
- break
1504
- continue
1505
- finally:
1506
- writer_finished = True
1507
-
1508
- w_thread = Thread(target=writer_loop, daemon=True)
1509
- w_thread.start()
1510
-
1511
- # Feeder
1512
- try:
1513
- reader_iter = iter(reader)
1514
- frames_fed = 0
1515
- while True:
1516
- _check_cancellation(job_id)
1517
- if max_frames is not None and frames_fed >= max_frames:
1518
- break
1519
-
1520
- try:
1521
- frame = next(reader_iter)
1522
- except StopIteration:
1523
- break
1524
-
1525
- queue_in.put((frames_fed, frame))
1526
- frames_fed += 1
1527
-
1528
- # Update total_frames to actual count
1529
- if frames_fed != total_frames:
1530
- logging.info("Updating total_frames from %d to %d (actual fed)", total_frames, frames_fed)
1531
- total_frames = frames_fed
1532
-
1533
- for _ in workers:
1534
- try: queue_in.put(None, timeout=5.0)
1535
- except Full: pass
1536
-
1537
- queue_in.join()
1538
-
1539
- except Exception:
1540
- logging.exception("Segmentation loop failed")
1541
- for _ in workers:
1542
- try: queue_in.put_nowait(None)
1543
- except Full: pass
1544
- raise
1545
- finally:
1546
- reader.close()
1547
-
1548
- w_thread.join()
1549
-
1550
- logging.info("Segmented video written to: %s", output_video_path)
1551
- return output_video_path
1552
-
1553
-
1554
-
1555
  def _gsam2_render_frame(
1556
  frame_dir: str,
1557
  frame_names: List[str],
@@ -1566,8 +1215,6 @@ def _gsam2_render_frame(
1566
  When *masks_only* is True, skip box rendering so the writer thread can
1567
  draw boxes later with enriched (GPT) labels.
1568
  """
1569
- from models.segmenters.grounded_sam2 import ObjectInfo
1570
-
1571
  frame_path = os.path.join(frame_dir, frame_names[frame_idx])
1572
  frame = cv2.imread(frame_path)
1573
  if frame is None:
@@ -1577,7 +1224,6 @@ def _gsam2_render_frame(
1577
  return frame
1578
 
1579
  masks_list: List[np.ndarray] = []
1580
- mask_labels: List[str] = []
1581
  boxes_list: List[List[int]] = []
1582
  box_labels: List[str] = []
1583
 
@@ -1596,7 +1242,6 @@ def _gsam2_render_frame(
1596
  interpolation=cv2.INTER_NEAREST,
1597
  ).astype(bool)
1598
  masks_list.append(mask_np)
1599
- mask_labels.append(label)
1600
 
1601
  has_box = not (
1602
  obj_info.x1 == 0 and obj_info.y1 == 0
@@ -2489,256 +2134,6 @@ def run_grounded_sam2_tracking(
2489
  logging.warning("Failed to clean up temp frame dir: %s", frame_dir)
2490
 
2491
 
2492
- def run_depth_inference(
2493
- input_video_path: str,
2494
- output_video_path: str,
2495
- detections: Optional[List[List[Dict[str, Any]]]] = None,
2496
- max_frames: Optional[int] = None,
2497
- depth_estimator_name: str = "depth",
2498
- first_frame_depth_path: Optional[str] = None,
2499
- job_id: Optional[str] = None,
2500
- stream_queue: Optional[Queue] = None,
2501
- ) -> str:
2502
- # 1. Setup Reader
2503
- try:
2504
- reader = AsyncVideoReader(input_video_path)
2505
- except ValueError:
2506
- logging.exception("Failed to open video at %s", input_video_path)
2507
- raise
2508
-
2509
- fps = reader.fps
2510
- width = reader.width
2511
- height = reader.height
2512
- total_frames = reader.total_frames
2513
-
2514
- if max_frames is not None:
2515
- total_frames = min(total_frames, max_frames)
2516
-
2517
- logging.info("Using depth estimator: %s", depth_estimator_name)
2518
-
2519
- # 2. Load Estimators (Parallel)
2520
- num_gpus = torch.cuda.device_count()
2521
- estimators = []
2522
-
2523
- # if "CUDA_VISIBLE_DEVICES" in os.environ:
2524
- # del os.environ["CUDA_VISIBLE_DEVICES"]
2525
-
2526
- if num_gpus > 0:
2527
- logging.info("Detected %d GPUs. Loading depth estimators...", num_gpus)
2528
- def load_est(gpu_id: int):
2529
- device_str = f"cuda:{gpu_id}"
2530
- est = load_depth_estimator_on_device(depth_estimator_name, device_str)
2531
- est.lock = RLock()
2532
- return (gpu_id, est)
2533
-
2534
- with ThreadPoolExecutor(max_workers=num_gpus) as loader:
2535
- futures = [loader.submit(load_est, i) for i in range(num_gpus)]
2536
- results = [f.result() for f in futures]
2537
- results.sort(key=lambda x: x[0])
2538
- estimators = [r[1] for r in results]
2539
- else:
2540
- est = load_depth_estimator(depth_estimator_name)
2541
- est.lock = RLock()
2542
- estimators.append(est)
2543
-
2544
- # 3. Incremental Depth Stats (replaces expensive pre-scan)
2545
- depth_stats = IncrementalDepthStats(warmup_frames=30)
2546
-
2547
- # 4. Phase 2: Streaming Inference
2548
- logging.info("Starting Phase 2: Streaming...")
2549
-
2550
- queue_in = Queue(maxsize=16)
2551
- queue_out_max = max(32, (len(estimators) if estimators else 1) * 4)
2552
- queue_out = Queue(maxsize=queue_out_max)
2553
-
2554
- writer_finished = False
2555
-
2556
- def worker_depth(gpu_idx: int):
2557
- est = estimators[gpu_idx]
2558
- batch_size = est.max_batch_size if est.supports_batch else 1
2559
- batch_accum = []
2560
-
2561
- def flush_batch():
2562
- if not batch_accum: return
2563
- indices = [i for i, _ in batch_accum]
2564
- frames = [f for _, f in batch_accum]
2565
-
2566
- try:
2567
- # 1. Inference
2568
- if est.supports_batch:
2569
- with est.lock:
2570
- results = est.predict_batch(frames)
2571
- else:
2572
- with est.lock:
2573
- results = [est.predict(f) for f in frames]
2574
-
2575
- # Update incremental depth stats
2576
- for res in results:
2577
- if res and res.depth_map is not None:
2578
- depth_stats.update(res.depth_map)
2579
-
2580
- # 2. Post-process loop
2581
- for idx, frm, res in zip(indices, frames, results):
2582
- depth_map = res.depth_map
2583
- ds_min, ds_max = depth_stats.range
2584
- colored = colorize_depth_map(depth_map, ds_min, ds_max)
2585
-
2586
- # Overlay Detections
2587
- if detections and idx < len(detections):
2588
- frame_dets = detections[idx]
2589
- if frame_dets:
2590
- boxes = []
2591
- labels = []
2592
- for d in frame_dets:
2593
- boxes.append(d.get("bbox"))
2594
- lbl = d.get("label", "obj")
2595
- if d.get("gpt_distance_m"):
2596
- lbl = f"{lbl} {int(d['gpt_distance_m'])}m"
2597
- labels.append(lbl)
2598
- colored = draw_boxes(colored, boxes=boxes, label_names=labels)
2599
-
2600
- while True:
2601
- try:
2602
- queue_out.put((idx, colored), timeout=1.0)
2603
- break
2604
- except Full:
2605
- if writer_finished:
2606
- raise RuntimeError("Writer died")
2607
- if job_id: _check_cancellation(job_id)
2608
-
2609
- except Exception as e:
2610
- logging.error("Batch depth failed: %s", e)
2611
- # Fallback: Emit original frames (no depth map)
2612
- for idx, frm in batch_accum:
2613
- while True:
2614
- try:
2615
- queue_out.put((idx, frm), timeout=1.0)
2616
- break
2617
- except Full:
2618
- if writer_finished: break
2619
- if job_id: _check_cancellation(job_id)
2620
- batch_accum.clear()
2621
-
2622
- while True:
2623
- item = queue_in.get()
2624
- try:
2625
- if item is None:
2626
- flush_batch()
2627
- break
2628
-
2629
- idx, frame = item
2630
- batch_accum.append(item)
2631
-
2632
- if idx % 30 == 0:
2633
- logging.info("Depth frame %d (GPU %d)", idx, gpu_idx)
2634
-
2635
- if len(batch_accum) >= batch_size:
2636
- flush_batch()
2637
- finally:
2638
- queue_in.task_done()
2639
-
2640
- # Workers
2641
- workers = []
2642
- for i in range(len(estimators)):
2643
- t = Thread(target=worker_depth, args=(i,), daemon=True)
2644
- t.start()
2645
- workers.append(t)
2646
-
2647
- # Writer
2648
- # Writer
2649
- # writer_finished defined earlier
2650
-
2651
- first_frame_saved = False
2652
-
2653
- def writer_loop():
2654
- nonlocal writer_finished, first_frame_saved
2655
- next_idx = 0
2656
- buffer = {}
2657
- processed_frames_subset = [] # Keep first frame for saving if needed
2658
-
2659
- try:
2660
- with StreamingVideoWriter(output_video_path, fps, width, height) as writer:
2661
- while next_idx < total_frames:
2662
- try:
2663
- while next_idx not in buffer:
2664
- if len(buffer) > 128:
2665
- logging.warning("Writer reorder buffer too large (%d), applying backpressure (waiting for frame %d)...", len(buffer), next_idx)
2666
- time.sleep(0.05)
2667
- idx, frm = queue_out.get(timeout=1.0)
2668
- buffer[idx] = frm
2669
-
2670
- frm = buffer.pop(next_idx)
2671
- writer.write(frm)
2672
-
2673
- if stream_queue:
2674
- try:
2675
- stream_queue.put_nowait(frm)
2676
- except:
2677
- pass
2678
-
2679
-
2680
- if first_frame_depth_path and not first_frame_saved and next_idx == 0:
2681
- cv2.imwrite(first_frame_depth_path, frm)
2682
- first_frame_saved = True
2683
-
2684
- next_idx += 1
2685
- if next_idx % 30 == 0:
2686
- logging.debug("Wrote depth frame %d/%d", next_idx, total_frames)
2687
- except Exception:
2688
- if job_id and _check_cancellation(job_id): pass
2689
- if not any(w.is_alive() for w in workers) and queue_out.empty():
2690
- break
2691
- continue
2692
- finally:
2693
- writer_finished = True
2694
-
2695
- w_thread = Thread(target=writer_loop, daemon=True)
2696
- w_thread.start()
2697
-
2698
- # Feeder
2699
- try:
2700
- reader_iter = iter(reader)
2701
- frames_fed = 0
2702
- while True:
2703
- _check_cancellation(job_id)
2704
- if max_frames is not None and frames_fed >= max_frames:
2705
- break
2706
-
2707
- try:
2708
- frame = next(reader_iter)
2709
- except StopIteration:
2710
- break
2711
-
2712
- queue_in.put((frames_fed, frame))
2713
- frames_fed += 1
2714
-
2715
- # Update total_frames to actual count
2716
- if frames_fed != total_frames:
2717
- logging.info("Updating total_frames from %d to %d (actual fed)", total_frames, frames_fed)
2718
- total_frames = frames_fed
2719
-
2720
- for _ in workers:
2721
- try: queue_in.put(None, timeout=5.0)
2722
- except Full: pass
2723
-
2724
- queue_in.join()
2725
-
2726
- except Exception:
2727
- logging.exception("Depth loop failed")
2728
- for _ in workers:
2729
- try: queue_in.put_nowait(None)
2730
- except Full: pass
2731
- raise
2732
-
2733
- finally:
2734
- reader.close()
2735
-
2736
- w_thread.join()
2737
-
2738
- return output_video_path
2739
-
2740
-
2741
-
2742
  def colorize_depth_map(
2743
  depth_map: np.ndarray,
2744
  global_min: float,
 
9
  import logging
10
  import time
11
  from threading import Event, RLock, Thread
12
+ from queue import Queue, Full, Empty
13
  from typing import Any, Dict, List, Optional, Sequence, Tuple
14
 
15
  import cv2
 
20
  from models.model_loader import load_detector, load_detector_on_device
21
  from models.segmenters.model_loader import load_segmenter, load_segmenter_on_device
22
  from models.depth_estimators.model_loader import load_depth_estimator, load_depth_estimator_on_device
23
+ from utils.video import StreamingVideoWriter
 
24
  from utils.relevance import evaluate_relevance
25
  from utils.enrichment import run_enrichment
26
  from utils.schemas import AssessmentStatus
 
537
  ), detections
538
 
539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
  def _build_display_label(det):
541
  """Build display label with GPT distance if available."""
542
  label = det["label"]
 
635
  return frame, fps, width, height
636
 
637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638
  def process_first_frame(
639
  video_path: str,
640
  queries: List[str],
 
1201
 
1202
  logging.info("Inference complete. Output: %s", output_video_path)
1203
  return output_video_path, sorted_detections
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204
  def _gsam2_render_frame(
1205
  frame_dir: str,
1206
  frame_names: List[str],
 
1215
  When *masks_only* is True, skip box rendering so the writer thread can
1216
  draw boxes later with enriched (GPT) labels.
1217
  """
 
 
1218
  frame_path = os.path.join(frame_dir, frame_names[frame_idx])
1219
  frame = cv2.imread(frame_path)
1220
  if frame is None:
 
1224
  return frame
1225
 
1226
  masks_list: List[np.ndarray] = []
 
1227
  boxes_list: List[List[int]] = []
1228
  box_labels: List[str] = []
1229
 
 
1242
  interpolation=cv2.INTER_NEAREST,
1243
  ).astype(bool)
1244
  masks_list.append(mask_np)
 
1245
 
1246
  has_box = not (
1247
  obj_info.x1 == 0 and obj_info.y1 == 0
 
2134
  logging.warning("Failed to clean up temp frame dir: %s", frame_dir)
2135
 
2136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2137
  def colorize_depth_map(
2138
  depth_map: np.ndarray,
2139
  global_min: float,
models/depth_estimators/base.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import NamedTuple, Sequence, List
2
 
3
  import numpy as np
4
 
 
1
+ from typing import NamedTuple, Sequence
2
 
3
  import numpy as np
4
 
models/detectors/drone_yolo.py CHANGED
@@ -1,5 +1,4 @@
1
  import logging
2
- import os
3
  from typing import List, Sequence
4
 
5
  import numpy as np
 
1
  import logging
 
2
  from typing import List, Sequence
3
 
4
  import numpy as np
models/segmenters/base.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import NamedTuple, Optional, Sequence, List
2
 
3
  import numpy as np
4
 
 
1
+ from typing import NamedTuple, Optional, Sequence
2
 
3
  import numpy as np
4
 
utils/gpt_reasoning.py CHANGED
@@ -1,11 +1,10 @@
1
- import os
2
  import re
3
  import json
4
  import base64
5
  import logging
6
  from typing import List, Dict, Any, Optional
7
- from utils.schemas import FrameThreatAnalysis, AssessmentStatus
8
- from utils.openai_client import chat_completion, extract_content, get_api_key, OpenAIAPIError
9
 
10
  logger = logging.getLogger(__name__)
11
 
 
 
1
  import re
2
  import json
3
  import base64
4
  import logging
5
  from typing import List, Dict, Any, Optional
6
+ from utils.schemas import AssessmentStatus
7
+ from utils.openai_client import chat_completion, extract_content, get_api_key
8
 
9
  logger = logging.getLogger(__name__)
10
 
utils/profiler.py CHANGED
@@ -9,7 +9,7 @@ import logging
9
  import statistics
10
  import time
11
  from dataclasses import dataclass, field
12
- from typing import Dict, List, Optional, Sequence
13
 
14
  import cv2
15
  import numpy as np
 
9
  import statistics
10
  import time
11
  from dataclasses import dataclass, field
12
+ from typing import List, Optional, Sequence
13
 
14
  import cv2
15
  import numpy as np
utils/schemas.py CHANGED
@@ -1,49 +1,5 @@
1
  from pydantic import BaseModel, Field
2
- from typing import List, Optional, Literal
3
-
4
-
5
- class DynamicFeature(BaseModel):
6
- """A single domain-specific key-value observation chosen by the analyst."""
7
- key: str = Field(..., description="Feature name (e.g., 'wake_description', 'deck_activity', 'camouflage').")
8
- value: str = Field(..., description="Observed value for this feature.")
9
-
10
-
11
- class ThreatAssessment(BaseModel):
12
- """
13
- Universal tactical threat assessment for a detected object.
14
- Works across NAVAL, GROUND, AERIAL, URBAN, and GENERIC domains.
15
- 10 predefined fields + up to 5 analyst-chosen dynamic features.
16
- """
17
- # 1. Classification
18
- object_type: str = Field(..., description="Broad category (e.g., 'Warship', 'APC', 'Sedan', 'Rotary-wing Aircraft', 'Person').")
19
- size: str = Field("Unknown", description="Relative size estimate (e.g., 'Large', 'Medium', 'Small', '~50m length').")
20
-
21
- # 2. Capabilities & Weapons
22
- visible_weapons: List[str] = Field(default_factory=list, description="Visible weaponry (e.g., 'Deck Gun', 'Turret-mounted MG', 'Rifle').")
23
- weapon_readiness: str = Field("Unknown", description="State of visible weapons (e.g., 'Stowed/PEACE', 'Trained/Aiming', 'Firing/HOSTILE').")
24
-
25
- # 3. Kinematics
26
- motion_status: str = Field("Unknown", description="Movement status (e.g., 'Stationary', 'Moving Slow', 'Moving Fast', 'Hovering').")
27
-
28
- # 4. Spatial / Geometry
29
- range_estimate: str = Field("Unknown", description="Estimated range as free text (e.g., '~500m', '~2NM', '~1km').")
30
- bearing: str = Field("Unknown", description="Relative bearing (e.g., '12 o\\'clock', 'NNE', '045°').")
31
-
32
- # 5. Threat Assessment
33
- threat_level: int = Field(..., ge=1, le=10, description="1-10 Threat Score (1=Benign, 10=Imminent Attack).")
34
- threat_classification: Literal["Friendly", "Neutral", "Suspect", "Hostile"] = Field(..., description="Tactical classification.")
35
- tactical_intent: str = Field(..., description="Inferred intent (e.g., 'Transit', 'Patrol', 'Attack Profile', 'Surveillance').")
36
-
37
- # 6. Dynamic Features (analyst-chosen, domain-specific)
38
- dynamic_features: List[DynamicFeature] = Field(
39
- default_factory=list, max_length=5,
40
- description="Up to 5 additional domain-specific observations chosen by the analyst."
41
- )
42
-
43
-
44
- class FrameThreatAnalysis(BaseModel):
45
- objects: dict[str, ThreatAssessment] = Field(..., description="Map of Object ID (e.g., 'T01') to its assessment.")
46
-
47
 
48
  # --- Mission-Driven Abstractions ---
49
 
@@ -157,4 +113,3 @@ class AssessmentStatus:
157
  ERROR = "ERROR"
158
  NO_RESPONSE = "NO_RESPONSE"
159
  STALE = "STALE"
160
-
 
1
  from pydantic import BaseModel, Field
2
+ from typing import List, Literal
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  # --- Mission-Driven Abstractions ---
5
 
 
113
  ERROR = "ERROR"
114
  NO_RESPONSE = "NO_RESPONSE"
115
  STALE = "STALE"
 
utils/tiling.py CHANGED
@@ -1,7 +1,6 @@
1
  import numpy as np
2
  import torch
3
- import logging
4
- from typing import List, Tuple, Dict, Any, Optional
5
 
6
  def get_slice_bboxes(
7
  image_height: int,
 
1
  import numpy as np
2
  import torch
3
+ from typing import List
 
4
 
5
  def get_slice_bboxes(
6
  image_height: int,
utils/video.py CHANGED
@@ -54,29 +54,6 @@ def extract_frames_to_jpeg_dir(
54
  return frame_names, fps, width, height
55
 
56
 
57
- def extract_frames(video_path: str) -> Tuple[List[np.ndarray], float, int, int]:
58
- cap = cv2.VideoCapture(video_path)
59
- if not cap.isOpened():
60
- raise ValueError("Unable to open video.")
61
-
62
- fps = cap.get(cv2.CAP_PROP_FPS) or 0.0
63
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
64
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
65
-
66
- frames: List[np.ndarray] = []
67
- success, frame = cap.read()
68
- while success:
69
- frames.append(frame)
70
- success, frame = cap.read()
71
-
72
- cap.release()
73
-
74
- if not frames:
75
- raise ValueError("Video decode produced zero frames.")
76
-
77
- return frames, fps, width, height
78
-
79
-
80
  def _transcode_with_ffmpeg(src_path: str, dst_path: str) -> None:
81
  cmd = [
82
  "ffmpeg",
@@ -102,31 +79,6 @@ def _transcode_with_ffmpeg(src_path: str, dst_path: str) -> None:
102
  logging.info("ffmpeg success")
103
 
104
 
105
- def write_video(frames: List[np.ndarray], output_path: str, fps: float, width: int, height: int) -> None:
106
- if not frames:
107
- raise ValueError("No frames available for writing.")
108
- temp_fd, temp_path = tempfile.mkstemp(prefix="raw_", suffix=".mp4")
109
- os.close(temp_fd)
110
- writer = cv2.VideoWriter(temp_path, cv2.VideoWriter_fourcc(*"mp4v"), fps or 1.0, (width, height))
111
- if not writer.isOpened():
112
- os.remove(temp_path)
113
- raise ValueError("Failed to open VideoWriter.")
114
-
115
- for frame in frames:
116
- writer.write(frame)
117
-
118
- writer.release()
119
- try:
120
- _transcode_with_ffmpeg(temp_path, output_path)
121
- logging.debug("Transcoded video to H.264 for browser compatibility.")
122
- os.remove(temp_path)
123
- except FileNotFoundError:
124
- logging.warning("ffmpeg not found; serving fallback MP4V output.")
125
- shutil.move(temp_path, output_path)
126
- except RuntimeError as exc:
127
- logging.warning("ffmpeg transcode failed (%s); serving fallback MP4V output.", exc)
128
- shutil.move(temp_path, output_path)
129
-
130
  class VideoReader:
131
  def __init__(self, video_path: str):
132
  self.video_path = video_path
@@ -163,94 +115,6 @@ class VideoReader:
163
  self.close()
164
 
165
 
166
- class AsyncVideoReader:
167
- """
168
- Async video reader that decodes frames in a background thread.
169
-
170
- This prevents GPU starvation on multi-GPU systems by prefetching frames
171
- while the main thread is busy dispatching work to GPUs.
172
- """
173
-
174
- def __init__(self, video_path: str, prefetch_size: int = 32):
175
- """
176
- Initialize async video reader.
177
-
178
- Args:
179
- video_path: Path to video file
180
- prefetch_size: Number of frames to prefetch (default 32)
181
- """
182
- from queue import Queue
183
- from threading import Thread
184
-
185
- self.video_path = video_path
186
- self.prefetch_size = prefetch_size
187
-
188
- # Open video to get metadata
189
- self._cap = cv2.VideoCapture(video_path)
190
- if not self._cap.isOpened():
191
- raise ValueError(f"Unable to open video: {video_path}")
192
-
193
- self.fps = self._cap.get(cv2.CAP_PROP_FPS) or 30.0
194
- self.width = int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH))
195
- self.height = int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
196
- self.total_frames = int(self._cap.get(cv2.CAP_PROP_FRAME_COUNT))
197
-
198
- # Prefetch queue
199
- self._queue: Queue = Queue(maxsize=prefetch_size)
200
- self._error: Exception = None
201
- self._finished = False
202
-
203
- # Start decoder thread
204
- self._thread = Thread(target=self._decode_loop, daemon=True)
205
- self._thread.start()
206
-
207
- def _decode_loop(self):
208
- """Background thread that continuously decodes frames."""
209
- try:
210
- while True:
211
- success, frame = self._cap.read()
212
- if not success:
213
- break
214
- self._queue.put(frame) # Blocks when queue is full (backpressure)
215
- except Exception as e:
216
- self._error = e
217
- logging.error(f"AsyncVideoReader decode error: {e}")
218
- finally:
219
- self._cap.release()
220
- self._queue.put(None) # Sentinel to signal end
221
- self._finished = True
222
-
223
- def __iter__(self):
224
- return self
225
-
226
- def __next__(self) -> np.ndarray:
227
- if self._error:
228
- raise self._error
229
-
230
- frame = self._queue.get()
231
- if frame is None:
232
- raise StopIteration
233
- return frame
234
-
235
- def close(self):
236
- """Stop the decoder thread and release resources."""
237
- # Signal thread to stop by releasing cap (if not already done)
238
- if self._cap.isOpened():
239
- self._cap.release()
240
- # Drain queue to unblock thread if it's waiting on put()
241
- while not self._queue.empty():
242
- try:
243
- self._queue.get_nowait()
244
- except:
245
- break
246
-
247
- def __enter__(self):
248
- return self
249
-
250
- def __exit__(self, exc_type, exc_val, exc_tb):
251
- self.close()
252
-
253
-
254
  class VideoWriter:
255
  def __init__(self, output_path: str, fps: float, width: int, height: int):
256
  self.output_path = output_path
 
54
  return frame_names, fps, width, height
55
 
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  def _transcode_with_ffmpeg(src_path: str, dst_path: str) -> None:
58
  cmd = [
59
  "ffmpeg",
 
79
  logging.info("ffmpeg success")
80
 
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  class VideoReader:
83
  def __init__(self, video_path: str):
84
  self.video_path = video_path
 
115
  self.close()
116
 
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  class VideoWriter:
119
  def __init__(self, output_path: str, fps: float, width: int, height: int):
120
  self.output_path = output_path