Spaces:
Sleeping
Sleeping
| /* ========================= | |
| Weapon-Grade Demo Engine | |
| - Tab 1: first-frame perception + reasoning | |
| - Tab 2: closed-loop tracking + dynamic dwell update | |
| - Tab 3: trade-space console | |
| ========================= */ | |
| (() => { | |
| const API_CONFIG = window.API_CONFIG || {}; | |
| const BACKEND_BASE = (() => { | |
| const raw = (API_CONFIG.BACKEND_BASE || API_CONFIG.BASE_URL || "").trim(); | |
| if (raw) return raw.replace(/\/$/, ""); | |
| const origin = (window.location && window.location.origin) || ""; | |
| if (origin && origin !== "null") return origin; | |
| return ""; | |
| })(); | |
| const $ = (sel, root = document) => root.querySelector(sel); | |
| const $$ = (sel, root = document) => Array.from(root.querySelectorAll(sel)); | |
| const clamp = (x, a, b) => Math.min(b, Math.max(a, x)); | |
| const lerp = (a, b, t) => a + (b - a) * t; | |
| const now = () => performance.now(); | |
| const ENABLE_KILL = false; | |
| const state = { | |
| videoUrl: null, | |
| videoFile: null, | |
| videoLoaded: false, | |
| useProcessedFeed: false, | |
| useDepthFeed: false, // Flag for depth view (Tab 2 video) | |
| useFrameDepthView: false, // Flag for first frame depth view (Tab 1) | |
| hasReasoned: false, | |
| isReasoning: false, // Flag to prevent concurrent Reason executions | |
| hf: { | |
| baseUrl: BACKEND_BASE, | |
| detector: "auto", | |
| asyncJobId: null, // Current job ID from /detect/async | |
| asyncPollInterval: null, // Polling timer handle | |
| firstFrameUrl: null, // First frame preview URL | |
| firstFrameDetections: null, // First-frame detections from backend | |
| statusUrl: null, // Status polling URL | |
| videoUrl: null, // Final video URL | |
| asyncStatus: "idle", // "idle"|"processing"|"completed"|"failed" | |
| asyncProgress: null, // Progress data from status endpoint | |
| queries: [], // Mission objective used as query | |
| processedUrl: null, | |
| processedBlob: null, | |
| depthVideoUrl: null, // Depth video URL | |
| depthFirstFrameUrl: null, // First frame depth URL | |
| depthBlob: null, // Depth video blob | |
| depthFirstFrameBlob: null, // Depth first frame blob | |
| summary: null, | |
| busy: false, | |
| lastError: null | |
| }, | |
| detector: { | |
| mode: "coco", | |
| kind: "object", | |
| loaded: false, | |
| model: null, | |
| loading: false, | |
| cocoBlocked: false, | |
| hfTrackingWarned: false | |
| }, | |
| tracker: { | |
| mode: "iou", | |
| tracks: [], | |
| nextId: 1, | |
| lastDetTime: 0, | |
| running: false, | |
| selectedTrackId: null, | |
| beamOn: false, | |
| lastFrameTime: 0 | |
| }, | |
| frame: { | |
| w: 1280, | |
| h: 720, | |
| bitmap: null | |
| }, | |
| detections: [], // from Tab 1 | |
| selectedId: null, | |
| intelBusy: false, | |
| ui: { | |
| cursorMode: "on", | |
| agentCursor: { x: 0.65, y: 0.28, vx: 0, vy: 0, visible: false, target: null, mode: "idle", t0: 0 } | |
| } | |
| }; | |
| // Config: Update track reasoning every 30 frames | |
| const REASON_INTERVAL = 30; | |
| // ========= Elements ========= | |
| const sysDot = $("#sys-dot"); | |
| const sysStatus = $("#sys-status"); | |
| const sysLog = $("#sysLog"); | |
| const telemetry = $("#telemetry"); | |
| const videoFile = $("#videoFile"); | |
| const btnEject = $("#btnEject"); | |
| const detectorSelect = $("#detectorSelect"); | |
| const trackerSelect = $("#trackerSelect"); | |
| function getDetectorSelection() { | |
| const opt = detectorSelect?.options?.[detectorSelect.selectedIndex]; | |
| return { | |
| value: detectorSelect?.value || "coco", | |
| kind: opt?.dataset?.kind || "object", | |
| label: (opt?.textContent || "").trim() | |
| }; | |
| } | |
| const helPower = $("#helPower"); | |
| const helAperture = $("#helAperture"); | |
| const helM2 = $("#helM2"); | |
| const helJitter = $("#helJitter"); | |
| const helDuty = $("#helDuty"); | |
| const helMode = $("#helMode"); | |
| const atmVis = $("#atmVis"); | |
| const atmCn2 = $("#atmCn2"); | |
| const seaSpray = $("#seaSpray"); | |
| const aoQ = $("#aoQ"); | |
| const rangeBase = $("#rangeBase"); | |
| const detHz = $("#detHz"); | |
| const policyMode = $("#policyMode"); | |
| const assessWindow = $("#assessWindow"); | |
| const cursorMode = $("#cursorMode"); | |
| const btnReason = $("#btnReason"); | |
| const btnCancelReason = $("#btnCancelReason"); | |
| const btnRecompute = $("#btnRecompute"); | |
| const btnClear = $("#btnClear"); | |
| const frameCanvas = $("#frameCanvas"); | |
| const frameOverlay = $("#frameOverlay"); | |
| const frameRadar = $("#frameRadar"); | |
| const frameEmpty = $("#frameEmpty"); | |
| const frameNote = $("#frameNote"); | |
| // const objList = $("#objList"); // Removed | |
| // const objList = $("#objList"); // Removed | |
| const objCount = $("#objCount"); | |
| const featureTable = $("#featureTable"); | |
| const selId = $("#selId"); | |
| const checkEnableGPT = $("#enableGPTToggle"); | |
| const trackCount = $("#trackCount"); | |
| const frameTrackList = $("#frameTrackList"); | |
| // Removed old summary references | |
| const videoHidden = $("#videoHidden"); | |
| const videoEngage = $("#videoEngage"); | |
| const engageOverlay = $("#engageOverlay"); | |
| const engageEmpty = $("#engageEmpty"); | |
| const engageNote = $("#engageNote"); | |
| // Mission-driven (HF Space) backend controls | |
| const missionText = $("#missionText"); | |
| const hfBackendStatus = $("#hfBackendStatus"); | |
| const intelSummaryBox = $("#intelSummaryBox"); | |
| const intelStamp = $("#intelStamp"); | |
| const intelDot = $("#intelDot"); | |
| const btnIntelRefresh = $("#btnIntelRefresh"); | |
| const intelThumbs = [$("#intelThumb0"), $("#intelThumb1"), $("#intelThumb2")]; | |
| const missionClassesEl = $("#missionClasses"); | |
| const missionIdEl = $("#missionId"); | |
| const chipFeed = $("#chipFeed"); | |
| const btnEngage = $("#btnEngage"); | |
| // Debug hook for console inspection | |
| window.__LP_STATE__ = state; | |
| const btnPause = $("#btnPause"); | |
| const btnReset = $("#btnReset"); | |
| const btnToggleSidebar = $("#btnToggleSidebar"); | |
| const chipPolicy = $("#chipPolicy"); | |
| const chipTracks = $("#chipTracks"); | |
| const chipBeam = $("#chipBeam"); | |
| const chipHz = $("#chipHz"); | |
| const chipDepth = $("#chipDepth"); | |
| const chipFrameDepth = $("#chipFrameDepth"); | |
| const dwellText = $("#dwellText"); | |
| const dwellBar = $("#dwellBar"); | |
| const radarCanvas = $("#radarCanvas"); | |
| const trackList = $("#trackList"); | |
| const liveStamp = $("#liveStamp"); | |
| const tradeCanvas = $("#tradeCanvas"); | |
| const tradeTarget = $("#tradeTarget"); | |
| const rMin = $("#rMin"); | |
| const rMax = $("#rMax"); | |
| const showPk = $("#showPk"); | |
| const btnReplot = $("#btnReplot"); | |
| const btnSnap = $("#btnSnap"); | |
| // ========= UI: knobs display ========= | |
| function syncKnobDisplays() { | |
| $("#helPowerVal").textContent = helPower.value; | |
| $("#helApertureVal").textContent = (+helAperture.value).toFixed(2); | |
| $("#helM2Val").textContent = (+helM2.value).toFixed(1); | |
| $("#helJitterVal").textContent = (+helJitter.value).toFixed(1); | |
| $("#helDutyVal").textContent = helDuty.value; | |
| $("#atmVisVal").textContent = atmVis.value; | |
| $("#atmCn2Val").textContent = atmCn2.value; | |
| $("#seaSprayVal").textContent = seaSpray.value; | |
| $("#aoQVal").textContent = aoQ.value; | |
| $("#rangeBaseVal").textContent = rangeBase.value; | |
| $("#detHzVal").textContent = detHz.value; | |
| $("#assessWindowVal").textContent = (+assessWindow.value).toFixed(1); | |
| chipPolicy.textContent = `POLICY:${policyMode.value.toUpperCase()}`; | |
| chipHz.textContent = `DET:${detHz.value}Hz`; | |
| telemetry.textContent = `HEL=${helPower.value}kW · VIS=${atmVis.value}km · Cn²=${atmCn2.value}/10 · AO=${aoQ.value}/10 · DET=${detHz.value}Hz`; | |
| } | |
| $$("input,select").forEach(el => el.addEventListener("input", () => { | |
| syncKnobDisplays(); | |
| if (state.hasReasoned) { | |
| // keep it responsive: recompute power/dwell numerics even without rerunning detection | |
| recomputeHEL(); // async but we don't await here for UI responsiveness | |
| renderFrameOverlay(); | |
| renderTrade(); | |
| } | |
| })); | |
| syncKnobDisplays(); | |
| renderMissionContext(); | |
| setHfStatus("idle"); | |
| const detInit = getDetectorSelection(); | |
| state.detector.mode = detInit.value; | |
| state.detector.kind = detInit.kind; | |
| state.hf.detector = detInit.value; | |
| // Toggle RAW vs HF feed | |
| chipFeed.addEventListener("click", async () => { | |
| if (!state.videoLoaded) return; | |
| if (!state.hf.processedUrl) { | |
| log("HF processed feed not ready yet. Run Reason (HF mode) and wait for backend.", "w"); | |
| return; | |
| } | |
| await setEngageFeed(!state.useProcessedFeed); | |
| log(`Engage feed set to: ${state.useProcessedFeed ? "HF" : "RAW"}`, "t"); | |
| }); | |
| // Toggle depth view | |
| chipDepth.addEventListener("click", async () => { | |
| if (!state.videoLoaded) return; | |
| if (!state.hf.depthVideoUrl) { | |
| log("Depth video not ready yet. Run Reason and wait for depth processing.", "w"); | |
| return; | |
| } | |
| await toggleDepthView(); | |
| log(`View set to: ${state.useDepthFeed ? "DEPTH" : "DEFAULT"}`, "t"); | |
| }); | |
| // Toggle first frame depth view (Tab 1) | |
| if (chipFrameDepth) { | |
| chipFrameDepth.addEventListener("click", () => { | |
| if (!state.videoLoaded) return; | |
| if (!state.hf.depthFirstFrameUrl) { | |
| log("First frame depth not ready yet. Run Reason and wait for depth processing.", "w"); | |
| return; | |
| } | |
| toggleFirstFrameDepthView(); | |
| log(`First frame view set to: ${state.useFrameDepthView ? "DEPTH" : "DEFAULT"}`, "t"); | |
| }); | |
| } | |
| // Refresh intel summary (unbiased) | |
| if (btnIntelRefresh) { | |
| btnIntelRefresh.addEventListener("click", async () => { | |
| if (!state.videoLoaded) return; | |
| log("Refreshing mission intel summary (unbiased)…", "t"); | |
| await computeIntelSummary(); | |
| }); | |
| } | |
| // ========= Logging ========= | |
| function log(msg, level = "t") { | |
| const ts = new Date().toLocaleTimeString(); | |
| const prefix = level === "e" ? "[ERR]" : (level === "w" ? "[WARN]" : (level === "g" ? "[OK]" : "[SYS]")); | |
| const line = `${ts} ${prefix} ${msg}\n`; | |
| const span = document.createElement("span"); | |
| span.className = level; | |
| span.textContent = line; | |
| sysLog.appendChild(span); | |
| sysLog.scrollTop = sysLog.scrollHeight; | |
| } | |
| function setStatus(kind, text) { | |
| sysStatus.textContent = text; | |
| sysDot.className = "dot" + (kind === "warn" ? " warn" : (kind === "bad" ? " bad" : "")); | |
| } | |
| // ========= Mission Intel Summary (unbiased, no location) ========= | |
| function setIntelStatus(kind, text) { | |
| if (!intelStamp || !intelDot) return; | |
| intelStamp.innerHTML = text; | |
| intelDot.className = "dot" + (kind === "warn" ? " warn" : (kind === "bad" ? " bad" : "")); | |
| intelDot.style.width = "7px"; | |
| intelDot.style.height = "7px"; | |
| intelDot.style.boxShadow = "none"; | |
| } | |
| function setIntelThumb(i, dataUrl) { | |
| const img = intelThumbs?.[i]; | |
| if (!img) return; | |
| img.src = dataUrl || ""; | |
| } | |
| function resetIntelUI() { | |
| if (!intelSummaryBox) return; | |
| intelSummaryBox.innerHTML = 'Upload a video, then click <b>Reason</b> to generate an unbiased scene summary.'; | |
| setIntelStatus("warn", "Idle"); | |
| setIntelThumb(0, ""); | |
| setIntelThumb(1, ""); | |
| setIntelThumb(2, ""); | |
| } | |
| function pluralize(label, n) { | |
| if (n === 1) return label; | |
| if (label.endsWith("s")) return label; | |
| return label + "s"; | |
| } | |
| // [Deleted] inferSceneDescriptor | |
| async function computeIntelSummary() { | |
| if (!intelSummaryBox) return; | |
| if (!state.videoLoaded) { resetIntelUI(); return; } | |
| if (state.intelBusy) return; | |
| state.intelBusy = true; | |
| setIntelStatus("warn", "Generating…"); | |
| intelSummaryBox.textContent = "Sampling frames and running analysis…"; | |
| try { | |
| const dur = (videoHidden?.duration || videoEngage?.duration || 0); | |
| const times = [0, dur ? dur * 0.33 : 1, dur ? dur * 0.66 : 2]; | |
| const frames = []; | |
| // Sample frames | |
| for (let i = 0; i < times.length; i++) { | |
| await seekTo(videoHidden, times[i]); | |
| const bmp = await frameToBitmap(videoHidden); | |
| // Draw to temp canvas to get dataURL | |
| const c = document.createElement("canvas"); | |
| c.width = 640; c.height = 360; // downscale | |
| const ctx = c.getContext("2d"); | |
| ctx.drawImage(bmp, 0, 0, c.width, c.height); | |
| const dataUrl = c.toDataURL("image/jpeg", 0.6); | |
| frames.push(dataUrl); | |
| // update thumb | |
| try { setIntelThumb(i, dataUrl); } catch (_) { } | |
| } | |
| // Call external hook | |
| const summary = await externalIntel(frames); | |
| intelSummaryBox.textContent = summary; | |
| setIntelStatus("good", `Updated · ${new Date().toLocaleTimeString()}`); | |
| } catch (err) { | |
| setIntelStatus("bad", "Summary unavailable"); | |
| intelSummaryBox.textContent = `Unable to generate summary: ${err.message}`; | |
| console.error(err); | |
| } finally { | |
| state.intelBusy = false; | |
| } | |
| } | |
| // ========= Tabs ========= | |
| $$(".tabbtn").forEach(btn => { | |
| btn.addEventListener("click", () => { | |
| $$(".tabbtn").forEach(b => b.classList.remove("active")); | |
| btn.classList.add("active"); | |
| const tab = btn.dataset.tab; | |
| $$(".tab").forEach(t => t.classList.remove("active")); | |
| $(`#tab-${tab}`).classList.add("active"); | |
| if (tab === "trade") renderTrade(); | |
| if (tab === "engage") { | |
| resizeOverlays(); | |
| renderRadar(); | |
| renderTrackCards(); | |
| } | |
| }); | |
| }); | |
| // ========= Video load / unload ========= | |
| async function unloadVideo(options = {}) { | |
| const preserveInput = !!options.preserveInput; | |
| // Stop polling if running | |
| if (state.hf.asyncPollInterval) { | |
| clearInterval(state.hf.asyncPollInterval); | |
| state.hf.asyncPollInterval = null; | |
| } | |
| if (state.videoUrl && state.videoUrl.startsWith("blob:")) { | |
| URL.revokeObjectURL(state.videoUrl); | |
| } | |
| if (state.hf.processedUrl && state.hf.processedUrl.startsWith("blob:")) { | |
| try { URL.revokeObjectURL(state.hf.processedUrl); } catch (_) { } | |
| } | |
| if (state.hf.depthVideoUrl && state.hf.depthVideoUrl.startsWith("blob:")) { | |
| try { URL.revokeObjectURL(state.hf.depthVideoUrl); } catch (_) { } | |
| } | |
| if (state.hf.depthFirstFrameUrl && state.hf.depthFirstFrameUrl.startsWith("blob:")) { | |
| try { URL.revokeObjectURL(state.hf.depthFirstFrameUrl); } catch (_) { } | |
| } | |
| state.videoUrl = null; | |
| state.videoFile = null; | |
| state.videoLoaded = false; | |
| state.useProcessedFeed = false; | |
| state.useDepthFeed = false; | |
| state.useFrameDepthView = false; | |
| state.hf.missionId = null; | |
| state.hf.plan = null; | |
| state.hf.processedUrl = null; | |
| state.hf.processedBlob = null; | |
| state.hf.depthVideoUrl = null; | |
| state.hf.depthBlob = null; | |
| state.hf.depthFirstFrameUrl = null; | |
| state.hf.depthFirstFrameBlob = null; | |
| state.hf.summary = null; | |
| state.hf.busy = false; | |
| state.hf.lastError = null; | |
| state.hf.asyncJobId = null; | |
| state.hf.asyncStatus = "idle"; | |
| setHfStatus("idle"); | |
| renderMissionContext(); | |
| resetIntelUI(); | |
| state.hasReasoned = false; | |
| state.isReasoning = false; // Reset reasoning lock | |
| // Reset Reason button state | |
| btnReason.disabled = false; | |
| btnReason.style.opacity = "1"; | |
| btnReason.style.cursor = "pointer"; | |
| btnCancelReason.style.display = "none"; | |
| btnEngage.disabled = true; | |
| state.detections = []; | |
| state.selectedId = null; | |
| state.tracker.tracks = []; | |
| state.tracker.nextId = 1; | |
| state.tracker.running = false; | |
| state.tracker.selectedTrackId = null; | |
| state.tracker.beamOn = false; | |
| videoHidden.removeAttribute("src"); | |
| videoEngage.removeAttribute("src"); | |
| videoHidden.load(); | |
| videoEngage.load(); | |
| if (!preserveInput) { | |
| videoFile.value = ""; | |
| } | |
| if (!preserveInput) { | |
| $("#videoMeta").textContent = "No file"; | |
| } | |
| frameEmpty.style.display = "flex"; | |
| engageEmpty.style.display = "flex"; | |
| frameNote.textContent = "Awaiting video"; | |
| engageNote.textContent = "Awaiting video"; | |
| clearCanvas(frameCanvas); | |
| clearCanvas(frameOverlay); | |
| clearCanvas(engageOverlay); | |
| renderRadar(); | |
| renderFrameTrackList(); | |
| // renderSummary(); | |
| renderFeatures(null); | |
| renderTrade(); | |
| setStatus("warn", "STANDBY · No video loaded"); | |
| log("Video unloaded. Demo reset.", "w"); | |
| } | |
| btnEject.addEventListener("click", async () => { | |
| await unloadVideo(); | |
| }); | |
| videoFile.addEventListener("change", async (e) => { | |
| const file = e.target.files && e.target.files[0]; | |
| if (!file) return; | |
| const pendingFile = file; | |
| await unloadVideo({ preserveInput: true }); | |
| state.videoFile = pendingFile; | |
| const nullOrigin = (window.location && window.location.origin) === "null"; | |
| if (nullOrigin) { | |
| state.videoUrl = await readFileAsDataUrl(pendingFile); | |
| } else { | |
| state.videoUrl = URL.createObjectURL(pendingFile); | |
| } | |
| // STOP any existing async polling | |
| stopAsyncPolling(); | |
| // reset HF backend state for this new upload | |
| if (state.hf.processedUrl && state.hf.processedUrl.startsWith("blob:")) { | |
| try { URL.revokeObjectURL(state.hf.processedUrl); } catch (_) { } | |
| } | |
| if (state.hf.depthVideoUrl && state.hf.depthVideoUrl.startsWith("blob:")) { | |
| try { URL.revokeObjectURL(state.hf.depthVideoUrl); } catch (_) { } | |
| } | |
| if (state.hf.depthFirstFrameUrl && state.hf.depthFirstFrameUrl.startsWith("blob:")) { | |
| try { URL.revokeObjectURL(state.hf.depthFirstFrameUrl); } catch (_) { } | |
| } | |
| state.hf.processedUrl = null; | |
| state.hf.processedBlob = null; | |
| state.hf.depthVideoUrl = null; | |
| state.hf.depthBlob = null; | |
| state.hf.depthFirstFrameUrl = null; | |
| state.hf.depthFirstFrameBlob = null; | |
| state.hf.asyncJobId = null; | |
| state.hf.firstFrameUrl = null; | |
| state.hf.firstFrameDetections = null; | |
| state.hf.statusUrl = null; | |
| state.hf.videoUrl = null; | |
| state.hf.asyncStatus = "idle"; | |
| state.hf.asyncProgress = null; | |
| state.hf.queries = []; | |
| state.hf.summary = null; | |
| state.hf.lastError = null; | |
| state.hf.busy = false; | |
| state.useProcessedFeed = false; | |
| state.useDepthFeed = false; | |
| state.useFrameDepthView = false; | |
| setHfStatus("idle"); | |
| renderMissionContext(); | |
| videoHidden.src = state.videoUrl; | |
| videoEngage.removeAttribute("src"); | |
| videoEngage.load(); | |
| // Initialize with no engage feed until processed video is ready | |
| videoEngage.setAttribute("data-processed", "false"); | |
| btnEngage.disabled = true; | |
| setStatus("warn", "LOADING · Parsing video metadata"); | |
| log(`Video selected: ${pendingFile.name} (${Math.round(pendingFile.size / 1024 / 1024)} MB)`, "t"); | |
| await Promise.all([ | |
| waitVideoReady(videoHidden), | |
| waitVideoReady(videoHidden) | |
| ]); | |
| const dur = videoHidden.duration || 0; | |
| const w = videoHidden.videoWidth || 1280; | |
| const h = videoHidden.videoHeight || 720; | |
| state.videoLoaded = true; | |
| state.frame.w = w; | |
| state.frame.h = h; | |
| $("#videoMeta").textContent = `${pendingFile.name} · ${dur.toFixed(1)}s · ${w}×${h}`; | |
| frameEmpty.style.display = "none"; | |
| engageEmpty.style.display = "none"; | |
| frameNote.textContent = `${w}×${h} · First frame only`; | |
| engageNote.textContent = `${dur.toFixed(1)}s · paused`; | |
| setStatus("warn", "READY · Video loaded (run Reason)"); | |
| log("Video loaded. Ready for first-frame reasoning.", "g"); | |
| resizeOverlays(); | |
| await captureFirstFrame(); | |
| drawFirstFrame(); | |
| renderFrameOverlay(); | |
| renderRadar(); | |
| renderTrade(); | |
| }); | |
| function displayAsyncFirstFrame() { | |
| if (!state.hf.firstFrameUrl) return; | |
| log(`Fetching HF first frame: ${state.hf.firstFrameUrl}`, "t"); | |
| // Display first frame with detections overlaid (segmentation masks or bounding boxes) | |
| const img = new Image(); | |
| img.crossOrigin = "anonymous"; | |
| img.src = `${state.hf.firstFrameUrl}?t=${Date.now()}`; // Cache bust | |
| img.onload = () => { | |
| frameCanvas.width = img.width; | |
| frameCanvas.height = img.height; | |
| frameOverlay.width = img.width; | |
| frameOverlay.height = img.height; | |
| const ctx = frameCanvas.getContext("2d"); | |
| ctx.clearRect(0, 0, img.width, img.height); | |
| ctx.drawImage(img, 0, 0); | |
| frameEmpty.style.display = "none"; | |
| log(`✓ HF first frame displayed (${img.width}×${img.height})`, "g"); | |
| }; | |
| img.onerror = (err) => { | |
| console.error("Failed to load first frame:", err); | |
| log("✗ HF first frame load failed - check CORS or URL", "e"); | |
| }; | |
| } | |
| async function waitVideoReady(v) { | |
| return new Promise((resolve) => { | |
| const onReady = () => { v.removeEventListener("loadedmetadata", onReady); resolve(); }; | |
| if (v.readyState >= 1 && v.videoWidth) { resolve(); return; } | |
| v.addEventListener("loadedmetadata", onReady); | |
| v.load(); | |
| }); | |
| } | |
| function readFileAsDataUrl(file) { | |
| return new Promise((resolve, reject) => { | |
| const reader = new FileReader(); | |
| reader.onload = () => resolve(String(reader.result || "")); | |
| reader.onerror = () => reject(new Error("Failed to read file")); | |
| reader.readAsDataURL(file); | |
| }); | |
| } | |
| async function captureFirstFrame() { | |
| if (!state.videoLoaded) return; | |
| await seekTo(videoHidden, 0.0); | |
| const bmp = await frameToBitmap(videoHidden); | |
| state.frame.bitmap = bmp; | |
| log("Captured first frame for Tab 1 reasoning.", "t"); | |
| } | |
| async function seekTo(v, timeSec) { | |
| return new Promise((resolve, reject) => { | |
| const onSeeked = () => { v.removeEventListener("seeked", onSeeked); resolve(); }; | |
| const onError = () => { v.removeEventListener("error", onError); reject(new Error("Video seek error")); }; | |
| v.addEventListener("seeked", onSeeked); | |
| v.addEventListener("error", onError); | |
| try { | |
| v.currentTime = clamp(timeSec, 0, Math.max(0, v.duration - 0.05)); | |
| } catch (err) { | |
| v.removeEventListener("seeked", onSeeked); | |
| v.removeEventListener("error", onError); | |
| reject(err); | |
| } | |
| }); | |
| } | |
| async function frameToBitmap(videoEl) { | |
| const w = videoEl.videoWidth || 1280; | |
| const h = videoEl.videoHeight || 720; | |
| const c = document.createElement("canvas"); | |
| c.width = w; c.height = h; | |
| const ctx = c.getContext("2d"); | |
| ctx.drawImage(videoEl, 0, 0, w, h); | |
| if ("createImageBitmap" in window) { | |
| return await createImageBitmap(c); | |
| } | |
| return c; // fallback | |
| } | |
| function clearCanvas(canvas) { | |
| const ctx = canvas.getContext("2d"); | |
| ctx.clearRect(0, 0, canvas.width, canvas.height); | |
| } | |
| // ========= Detector loading ========= | |
| // ========= Mission-driven HF Space backend ========= | |
| const ALL_CLASSES_PROMPT = "No mission objective provided. Run full-class object detection across all supported classes. Detect and label every object you can with bounding boxes; do not filter by mission."; | |
| const DEFAULT_QUERY_CLASSES = [ | |
| "person", | |
| "car", | |
| "truck", | |
| "motorcycle", | |
| "bicycle", | |
| "bus", | |
| "train", | |
| "airplane" | |
| ]; | |
| function missionPromptOrAll() { | |
| const t = (missionText?.value || "").trim(); | |
| return t || ALL_CLASSES_PROMPT; | |
| } | |
| const HF_SPACE_DETECTORS = new Set([ | |
| "hf_yolov8", | |
| "detr_resnet50", | |
| "grounding_dino", | |
| "sam3", | |
| "drone_yolo", | |
| ]); | |
| // Backend currently requires latitude/longitude form fields. We send neutral defaults (no UI, no location in outputs). | |
| const DEFAULT_LAT = "0"; | |
| const DEFAULT_LON = "0"; | |
| function isHfMode(mode) { | |
| return !["coco", "external"].includes(mode); | |
| } | |
| function isHfSpaceDetector(det) { | |
| return HF_SPACE_DETECTORS.has(det); | |
| } | |
| function isSpaceUrl(value) { | |
| return /^https?:\/\/huggingface\.co\/spaces\//.test(String(value || "")); | |
| } | |
| function isHfInferenceModel(value) { | |
| const v = String(value || ""); | |
| return v.includes("/") && !isSpaceUrl(v); | |
| } | |
| function setHfStatus(msg) { | |
| if (!hfBackendStatus) return; | |
| const statusPrefix = state.hf.asyncStatus !== "idle" | |
| ? `[${state.hf.asyncStatus.toUpperCase()}] ` | |
| : ""; | |
| const normalized = String(msg || "").toLowerCase(); | |
| let display = msg; | |
| if (normalized.includes("ready")) { | |
| display = "MISSION PACKAGE READY"; | |
| } else if (normalized.includes("idle")) { | |
| display = "STANDBY"; | |
| } else if (normalized.includes("completed")) { | |
| display = "PROCESS COMPLETE"; | |
| } else if (normalized.includes("processing")) { | |
| display = "ACTIVE SCAN"; | |
| } else if (normalized.includes("cancelled")) { | |
| display = "STAND-DOWN"; | |
| } else if (normalized.includes("error")) { | |
| display = "FAULT CONDITION"; | |
| } | |
| hfBackendStatus.textContent = `HF Backend: ${statusPrefix}${display}`; | |
| // Color coding | |
| if (msg.includes("error")) { | |
| hfBackendStatus.style.color = "var(--bad)"; | |
| } else if (msg.includes("ready") || msg.includes("completed")) { | |
| hfBackendStatus.style.color = "var(--good)"; | |
| } else { | |
| hfBackendStatus.style.color = "var(--warn)"; | |
| } | |
| } | |
| function renderMissionContext() { | |
| const queries = state.hf.queries || []; | |
| if (missionClassesEl) missionClassesEl.textContent = queries.length ? queries.join(", ") : "—"; | |
| if (missionIdEl) missionIdEl.textContent = `Mission: ${state.hf.missionId || "—"}`; | |
| if (chipFeed) { | |
| chipFeed.textContent = state.useProcessedFeed ? "FEED:HF" : "FEED:RAW"; | |
| } | |
| updateDepthChip(); | |
| } | |
| function normalizeToken(s) { | |
| return String(s || "") | |
| .toLowerCase() | |
| .replace(/[_\-]+/g, " ") | |
| .replace(/[^a-z0-9\s]/g, "") | |
| .trim(); | |
| } | |
| function missionSynonyms(tokens) { | |
| const out = new Set(); | |
| tokens.forEach(t => { | |
| const v = normalizeToken(t); | |
| if (!v) return; | |
| out.add(v); | |
| // broad synonyms / fallbacks for common mission terms | |
| if (v.includes("drone") || v.includes("uav") || v.includes("quad") || v.includes("small uav")) { | |
| ["airplane", "bird", "kite"].forEach(x => out.add(x)); | |
| } | |
| if (v.includes("aircraft") || v.includes("fixed wing") || v.includes("jet")) { | |
| ["airplane", "bird"].forEach(x => out.add(x)); | |
| } | |
| if (v.includes("boat") || v.includes("ship") || v.includes("vessel") || v.includes("usv")) { | |
| ["boat"].forEach(x => out.add(x)); | |
| } | |
| if (v.includes("person") || v.includes("diver") || v.includes("swimmer")) { | |
| ["person"].forEach(x => out.add(x)); | |
| } | |
| if (v.includes("vehicle") || v.includes("truck") || v.includes("car")) { | |
| ["car", "truck"].forEach(x => out.add(x)); | |
| } | |
| }); | |
| return out; | |
| } | |
| function filterPredsByMission(preds) { | |
| // Mission filtering is now handled by the backend | |
| // Local detectors (COCO) will show all results | |
| return preds; | |
| } | |
| function isMissionFocusLabel(label) { | |
| // Mission focus detection is now handled by the backend | |
| // All detections from HF backend are considered mission-relevant | |
| return false; | |
| } | |
| // ========= HF Async Detection Pipeline ========= | |
| async function hfDetectAsync() { | |
| const sel = getDetectorSelection(); | |
| const detector = sel.value; | |
| const kind = sel.kind; | |
| const videoFile = state.videoFile; | |
| // Reset State & UI for new run | |
| state.detections = []; | |
| state.selectedId = null; | |
| state.tracker.tracks = []; // Clear tracking state too | |
| // Clear cached backend results so they don't reappear | |
| state.hf.firstFrameDetections = null; | |
| // Explicitly clear UI using standard renderers | |
| renderFrameTrackList(); | |
| renderFrameOverlay(); | |
| // Force a clear of the radar canvas (renderFrameRadar loop will pick up empty state next frame) | |
| if (frameRadar) { | |
| const ctx = frameRadar.getContext("2d"); | |
| ctx.clearRect(0, 0, frameRadar.width, frameRadar.height); | |
| } | |
| // Clear counts | |
| if (trackCount) trackCount.textContent = "0"; | |
| if (objCount) objCount.textContent = "0"; | |
| // Show loading state in list manually if needed, or let renderFrameTrackList handle it (it shows "No objects tracked") | |
| // But we want "Computing..." | |
| if (frameTrackList) frameTrackList.innerHTML = '<div style="font-style:italic; color:var(--text-dim); text-align:center; margin-top:20px;">Computing...</div>'; | |
| renderFeatures(null); // Clear feature panel | |
| if (!videoFile) { | |
| throw new Error("No video loaded"); | |
| } | |
| // Determine mode based on kind | |
| let mode; | |
| if (kind === "segmentation") { | |
| mode = "segmentation"; | |
| } else if (kind === "drone") { | |
| mode = "drone_detection"; | |
| } else { | |
| mode = "object_detection"; | |
| } | |
| // Use mission objective directly as detector input | |
| const missionObjective = (missionText?.value || "").trim(); | |
| let queries = ""; | |
| if (missionObjective) { | |
| // Use mission objective text directly - let backend interpret it | |
| queries = missionObjective; | |
| state.hf.queries = [missionObjective]; | |
| log(`Using mission objective: "${queries}"`); | |
| } else { | |
| if (mode === "drone_detection") { | |
| // Drone mode defaults on backend; omit queries entirely. | |
| queries = ""; | |
| state.hf.queries = []; | |
| log("No mission objective specified - using drone defaults"); | |
| } else { | |
| // No mission objective - use predefined classes | |
| queries = DEFAULT_QUERY_CLASSES.join(", "); | |
| state.hf.queries = DEFAULT_QUERY_CLASSES.slice(); | |
| log("No mission objective specified - using default classes"); | |
| } | |
| } | |
| // Build FormData | |
| const form = new FormData(); | |
| form.append("video", videoFile); | |
| form.append("mode", mode); | |
| if (queries) { | |
| form.append("queries", queries); | |
| } | |
| // Add detector for object_detection mode | |
| if (mode === "object_detection" && detector) { | |
| form.append("detector", detector); | |
| } | |
| if (mode === "segmentation") { | |
| form.append("segmenter", "sam3"); | |
| } | |
| // drone_detection uses drone_yolo automatically | |
| // Add depth_estimator parameter for depth processing | |
| const enableDepthToggle = document.getElementById("enableDepthToggle"); | |
| const useLegacyDepth = enableDepthToggle && enableDepthToggle.checked; | |
| const useGPT = checkEnableGPT && checkEnableGPT.checked; | |
| form.append("depth_estimator", useLegacyDepth ? "depth" : ""); | |
| form.append("enable_depth", useLegacyDepth ? "true" : "false"); | |
| form.append("enable_gpt", useGPT ? "true" : "false"); | |
| // Submit async job | |
| setHfStatus(`submitting ${mode} job...`); | |
| log(`Submitting ${mode} to ${state.hf.baseUrl || "(same-origin)"} (detector=${detector || "n/a"})`, "t"); | |
| const resp = await fetch(`${state.hf.baseUrl}/detect/async`, { | |
| method: "POST", | |
| body: form | |
| }); | |
| if (!resp.ok) { | |
| const err = await resp.json().catch(() => ({ detail: resp.statusText })); | |
| throw new Error(err.detail || "Async detection submission failed"); | |
| } | |
| const data = await resp.json(); | |
| // Store job info | |
| state.hf.asyncJobId = data.job_id; | |
| state.hf.firstFrameUrl = `${state.hf.baseUrl}${data.first_frame_url}`; | |
| state.hf.firstFrameDetections = Array.isArray(data.first_frame_detections) | |
| ? data.first_frame_detections | |
| : null; | |
| state.hf.statusUrl = `${state.hf.baseUrl}${data.status_url}`; | |
| state.hf.videoUrl = `${state.hf.baseUrl}${data.video_url}`; | |
| state.hf.asyncStatus = data.status; | |
| // Store depth URLs if provided | |
| if (data.depth_video_url) { | |
| state.hf.depthVideoUrl = `${state.hf.baseUrl}${data.depth_video_url}`; | |
| log("Depth video URL received", "t"); | |
| } | |
| if (data.first_frame_depth_url) { | |
| state.hf.depthFirstFrameUrl = `${state.hf.baseUrl}${data.first_frame_depth_url}`; | |
| log("First frame depth URL received (will fetch when ready)", "t"); | |
| } | |
| // Start Streaming if available | |
| if (data.stream_url) { | |
| log("Activating live stream...", "t"); | |
| const streamUrl = `${state.hf.baseUrl}${data.stream_url}`; | |
| setStreamingMode(streamUrl); | |
| // NOTE: Auto-switch removed to allow viewing First Frame on Tab 1 | |
| log("Live view available in 'Engage' tab.", "g"); | |
| setStatus("warn", "Live processing... View in Engage tab"); | |
| // Trigger resize/render (background setup) | |
| resizeOverlays(); | |
| renderRadar(); | |
| renderTrackCards(); | |
| } | |
| // Display first frame immediately (if object detection, segmentation, or drone) | |
| if ((mode === "object_detection" || mode === "segmentation" || mode === "drone_detection") && state.hf.firstFrameUrl) { | |
| const count = Array.isArray(data.first_frame_detections) ? data.first_frame_detections.length : null; | |
| if (count != null) { | |
| log(`First frame: ${count} detections`); | |
| } else { | |
| log("First frame ready (no detections payload)", "t"); | |
| } | |
| displayAsyncFirstFrame(); | |
| // Populate state.detections with backend results so Radar and Cards work | |
| if (state.hf.firstFrameDetections) { | |
| state.detections = state.hf.firstFrameDetections.map((d, i) => { | |
| const id = `T${String(i + 1).padStart(2, '0')}`; | |
| const [x1, y1, x2, y2] = d.bbox || [0, 0, 0, 0]; | |
| const w = x2 - x1; | |
| const h = y2 - y1; | |
| const ap = defaultAimpoint(d.label); // Ensure defaultAimpoint is accessible | |
| return { | |
| id, | |
| label: d.label, | |
| score: d.score, | |
| bbox: { x: x1, y: y1, w: w, h: h }, | |
| aim: { ...ap }, | |
| features: null, | |
| baseRange_m: d.gpt_distance_m || null, // GPT is sole source of distance | |
| baseAreaFrac: null, | |
| baseDwell_s: null, | |
| reqP_kW: null, | |
| maxP_kW: null, | |
| pkill: null, | |
| // GPT properties - sole source of distance estimation | |
| gpt_distance_m: d.gpt_distance_m, | |
| gpt_direction: d.gpt_direction, | |
| gpt_description: d.gpt_description, | |
| // Depth visualization only (not for distance) | |
| depth_rel: d.depth_rel | |
| }; | |
| }); | |
| // Update UI components | |
| log(`Populating UI with ${state.detections.length} tracked objects`, "t"); | |
| renderFrameTrackList(); | |
| renderFrameRadar(); | |
| renderFeatures(null); | |
| renderTrade(); | |
| renderFrameOverlay(); | |
| } | |
| } | |
| log(`Backend job ID: ${data.job_id} (polling every 3s)`, "t"); | |
| setHfStatus(`job ${data.job_id.substring(0, 8)}: processing...`); | |
| // Start polling | |
| await pollAsyncJob(); | |
| } | |
| async function cancelBackendJob(jobId, source = "user") { | |
| if (!jobId) return; | |
| if ((state.hf.baseUrl || "").includes("hf.space")) { | |
| log("Cancel request suppressed for HF Space (replica job store can return 404).", "w"); | |
| return { status: "skipped", message: "Cancel disabled for HF Space" }; | |
| } | |
| // Check if job is already in a terminal state | |
| if (state.hf.asyncStatus === "completed" || state.hf.asyncStatus === "failed") { | |
| log(`Backend job ${jobId.substring(0, 8)}: already ${state.hf.asyncStatus}, skipping cancel`, "t"); | |
| return { status: state.hf.asyncStatus, message: `Job already ${state.hf.asyncStatus}` }; | |
| } | |
| log(`Sending DELETE to /detect/job/${jobId.substring(0, 8)}... (${source})`, "t"); | |
| try { | |
| const response = await fetch(`${state.hf.baseUrl}/detect/job/${jobId}`, { | |
| method: "DELETE" | |
| }); | |
| if (response.ok) { | |
| const result = await response.json(); | |
| log(`✓ Backend job ${jobId.substring(0, 8)}: ${result.message || "cancelled"} (status: ${result.status})`, "g"); | |
| return result; | |
| } else if (response.status === 404) { | |
| const detail = await response.json().catch(() => ({ detail: "Job not found" })); | |
| log(`⚠ Backend job ${jobId.substring(0, 8)}: ${detail.detail || "not found or already cleaned up"}`, "w"); | |
| return { status: "not_found", message: detail.detail }; | |
| } else { | |
| const errorText = await response.text().catch(() => "Unknown error"); | |
| log(`✗ Backend job ${jobId.substring(0, 8)}: cancel failed (${response.status}) - ${errorText}`, "e"); | |
| return { status: "error", message: errorText }; | |
| } | |
| } catch (err) { | |
| log(`✗ Backend job ${jobId.substring(0, 8)}: cancel error - ${err.message}`, "e"); | |
| return { status: "error", message: err.message }; | |
| } | |
| } | |
| function setStreamingMode(url) { | |
| // Ensure stream image element exists | |
| let streamView = $("#streamView"); | |
| if (!streamView) { | |
| streamView = document.createElement("img"); | |
| streamView.id = "streamView"; | |
| streamView.style.width = "100%"; | |
| streamView.style.height = "100%"; | |
| streamView.style.objectFit = "contain"; | |
| streamView.style.position = "absolute"; | |
| streamView.style.top = "0"; | |
| streamView.style.left = "0"; | |
| streamView.style.zIndex = "10"; // Above video | |
| streamView.style.backgroundColor = "#000"; | |
| // Insert into the wrapper | |
| // videoEngage is likely inside a container or just in the DOM | |
| // We'll insert it as a sibling or wrapper child | |
| if (videoEngage && videoEngage.parentNode) { | |
| videoEngage.parentNode.appendChild(streamView); | |
| // Ensure container is relative | |
| if (getComputedStyle(videoEngage.parentNode).position === "static") { | |
| videoEngage.parentNode.style.position = "relative"; | |
| } | |
| } | |
| } | |
| if (streamView) { | |
| streamView.src = url; | |
| streamView.style.display = "block"; | |
| if (videoEngage) videoEngage.style.display = "none"; | |
| // Also hide empty state | |
| if (engageEmpty) engageEmpty.style.display = "none"; | |
| } | |
| } | |
| function stopStreamingMode() { | |
| const streamView = $("#streamView"); | |
| if (streamView) { | |
| streamView.src = ""; // Stop connection | |
| streamView.style.display = "none"; | |
| } | |
| if (videoEngage) videoEngage.style.display = "block"; | |
| // If no video loaded yet, might want to show empty? | |
| // But usually we stop streaming when video IS loaded. | |
| } | |
| function cancelReasoning() { | |
| // Stop HF polling if running | |
| if (state.hf.asyncPollInterval) { | |
| clearInterval(state.hf.asyncPollInterval); | |
| state.hf.asyncPollInterval = null; | |
| log("HF polling stopped.", "w"); | |
| } | |
| stopStreamingMode(); | |
| // Cancel backend job if it exists | |
| const jobId = state.hf.asyncJobId; | |
| if (jobId) { | |
| cancelBackendJob(jobId, "cancel button"); | |
| } | |
| // Reset state | |
| state.isReasoning = false; | |
| state.hf.busy = false; | |
| state.hf.asyncJobId = null; | |
| state.hf.asyncStatus = "cancelled"; | |
| // Re-enable Reason button | |
| btnReason.disabled = false; | |
| btnReason.style.opacity = "1"; | |
| btnReason.style.cursor = "pointer"; | |
| // Hide Cancel button | |
| btnCancelReason.style.display = "none"; | |
| setStatus("warn", "CANCELLED · Reasoning stopped"); | |
| setHfStatus("cancelled (stopped by user)"); | |
| log("Reasoning cancelled by user.", "w"); | |
| } | |
| async function pollAsyncJob() { | |
| const pollInterval = 3000; // 3 seconds | |
| const maxAttempts = 200; // 10 minutes max (3s × 200) | |
| let attempts = 0; | |
| let fetchingVideo = false; | |
| return new Promise((resolve, reject) => { | |
| state.hf.asyncPollInterval = setInterval(async () => { | |
| attempts++; | |
| try { | |
| const resp = await fetch(state.hf.statusUrl, { cache: "no-store" }); | |
| if (!resp.ok) { | |
| if (resp.status === 404) { | |
| clearInterval(state.hf.asyncPollInterval); | |
| reject(new Error("Job expired or not found")); | |
| return; | |
| } | |
| throw new Error(`Status check failed: ${resp.statusText}`); | |
| } | |
| const status = await resp.json(); | |
| state.hf.asyncStatus = status.status; | |
| state.hf.asyncProgress = status; | |
| if (status.status === "completed") { | |
| if (fetchingVideo) return; | |
| fetchingVideo = true; | |
| const completedJobId = state.hf.asyncJobId; | |
| log(`✓ Backend job ${completedJobId.substring(0, 8)}: completed successfully`, "g"); | |
| setHfStatus("job completed, fetching video..."); | |
| try { | |
| await fetchProcessedVideo(); | |
| await fetchDepthVideo(); | |
| await fetchDepthFirstFrame(); | |
| clearInterval(state.hf.asyncPollInterval); | |
| // Clear job ID to prevent cancel attempts after completion | |
| state.hf.asyncJobId = null; | |
| setHfStatus("ready"); | |
| stopStreamingMode(); | |
| resolve(); | |
| } catch (err) { | |
| if (err && err.code === "VIDEO_PENDING") { | |
| setHfStatus("job completed, finalizing video..."); | |
| fetchingVideo = false; | |
| return; | |
| } | |
| clearInterval(state.hf.asyncPollInterval); | |
| state.hf.asyncJobId = null; // Clear on error too | |
| stopStreamingMode(); | |
| reject(err); | |
| } | |
| } else if (status.status === "failed") { | |
| clearInterval(state.hf.asyncPollInterval); | |
| const errMsg = status.error || "Processing failed"; | |
| log(`✗ Backend job ${state.hf.asyncJobId.substring(0, 8)}: failed - ${errMsg}`, "e"); | |
| // Clear job ID to prevent cancel attempts after failure | |
| state.hf.asyncJobId = null; | |
| setHfStatus(`error: ${errMsg}`); | |
| stopStreamingMode(); | |
| reject(new Error(errMsg)); | |
| } else { | |
| // Still processing | |
| setHfStatus(`job ${state.hf.asyncJobId.substring(0, 8)}: ${status.status}... (${attempts})`); | |
| } | |
| if (attempts >= maxAttempts) { | |
| clearInterval(state.hf.asyncPollInterval); | |
| reject(new Error("Polling timeout (10 minutes)")); | |
| } | |
| } catch (err) { | |
| clearInterval(state.hf.asyncPollInterval); | |
| reject(err); | |
| } | |
| }, pollInterval); | |
| }); | |
| } | |
| async function fetchProcessedVideo() { | |
| const resp = await fetch(state.hf.videoUrl, { cache: "no-store" }); | |
| if (!resp.ok) { | |
| if (resp.status === 202) { | |
| const err = new Error("Video still processing"); | |
| err.code = "VIDEO_PENDING"; | |
| throw err; | |
| } | |
| throw new Error(`Failed to fetch video: ${resp.statusText}`); | |
| } | |
| const nullOrigin = (window.location && window.location.origin) === "null"; | |
| if (nullOrigin) { | |
| // Avoid blob:null URLs when opened via file:// | |
| state.hf.processedBlob = null; | |
| state.hf.processedUrl = `${state.hf.videoUrl}?t=${Date.now()}`; | |
| btnEngage.disabled = false; | |
| log("Processed video ready (streaming URL)"); | |
| return; | |
| } | |
| const blob = await resp.blob(); | |
| // Revoke old URL if exists | |
| if (state.hf.processedUrl && state.hf.processedUrl.startsWith("blob:")) { | |
| URL.revokeObjectURL(state.hf.processedUrl); | |
| } | |
| state.hf.processedBlob = blob; | |
| state.hf.processedUrl = URL.createObjectURL(blob); | |
| btnEngage.disabled = false; | |
| log(`Processed video ready (${(blob.size / 1024 / 1024).toFixed(1)} MB)`); | |
| } | |
| async function fetchDepthVideo() { | |
| if (!state.hf.depthVideoUrl) { | |
| log("No depth video URL available", "w"); | |
| return; | |
| } | |
| try { | |
| const resp = await fetch(state.hf.depthVideoUrl, { cache: "no-store" }); | |
| if (!resp.ok) { | |
| if (resp.status === 202) { | |
| log("Depth video still processing", "w"); | |
| return; | |
| } | |
| throw new Error(`Failed to fetch depth video: ${resp.statusText}`); | |
| } | |
| const nullOrigin = (window.location && window.location.origin) === "null"; | |
| if (nullOrigin) { | |
| state.hf.depthBlob = null; | |
| state.hf.depthVideoUrl = `${state.hf.depthVideoUrl}?t=${Date.now()}`; | |
| log("Depth video ready (streaming URL)"); | |
| return; | |
| } | |
| const blob = await resp.blob(); | |
| // Store the original URL before creating blob | |
| const originalUrl = state.hf.depthVideoUrl; | |
| state.hf.depthBlob = blob; | |
| const blobUrl = URL.createObjectURL(blob); | |
| state.hf.depthVideoUrl = blobUrl; | |
| log(`Depth video ready (${(blob.size / 1024 / 1024).toFixed(1)} MB) - Click VIEW chip to toggle`, "g"); | |
| updateDepthChip(); | |
| } catch (err) { | |
| log(`Error fetching depth video: ${err.message}`, "e"); | |
| } | |
| } | |
| async function fetchDepthFirstFrame() { | |
| if (!state.hf.depthFirstFrameUrl) { | |
| log("No depth first frame URL available", "w"); | |
| return; | |
| } | |
| try { | |
| const resp = await fetch(state.hf.depthFirstFrameUrl, { cache: "no-store" }); | |
| if (!resp.ok) { | |
| if (resp.status === 202) { | |
| log("Depth first frame still processing", "w"); | |
| return; | |
| } | |
| throw new Error(`Failed to fetch depth first frame: ${resp.statusText}`); | |
| } | |
| // Fetch as blob and create blob URL | |
| const blob = await resp.blob(); | |
| // Store the blob and create a blob URL | |
| state.hf.depthFirstFrameBlob = blob; | |
| const blobUrl = URL.createObjectURL(blob); | |
| // Replace the server URL with the blob URL | |
| const originalUrl = state.hf.depthFirstFrameUrl; | |
| state.hf.depthFirstFrameUrl = blobUrl; | |
| log(`✓ Depth first frame ready (${(blob.size / 1024).toFixed(1)} KB) - Click VIEW chip on Tab 1 to toggle`, "g"); | |
| updateFirstFrameDepthChip(); | |
| } catch (err) { | |
| log(`Error fetching depth first frame: ${err.message}`, "e"); | |
| } | |
| } | |
| function stopAsyncPolling() { | |
| if (state.hf.asyncPollInterval) { | |
| clearInterval(state.hf.asyncPollInterval); | |
| state.hf.asyncPollInterval = null; | |
| } | |
| } | |
| async function setEngageFeed(useProcessed) { | |
| state.useProcessedFeed = !!useProcessed; | |
| renderMissionContext(); | |
| if (!state.videoLoaded) return; | |
| const desired = (state.useProcessedFeed && state.hf.processedUrl) ? state.hf.processedUrl : null; | |
| if (!desired) return; | |
| const wasPlaying = !videoEngage.paused; | |
| const t = videoEngage.currentTime || 0; | |
| try { videoEngage.pause(); } catch (_) { } | |
| if (videoEngage.src !== desired) { | |
| videoEngage.src = desired; | |
| // Mark video element to show/hide based on whether it's processed content | |
| videoEngage.setAttribute('data-processed', state.useProcessedFeed ? 'true' : 'false'); | |
| log(`Video feed switched to: ${state.useProcessedFeed ? 'HF processed' : 'raw'} (data-processed=${state.useProcessedFeed})`, "t"); | |
| videoEngage.load(); | |
| await waitVideoReady(videoEngage); | |
| try { videoEngage.currentTime = Math.min(t, (videoEngage.duration || t)); } catch (_) { } | |
| } | |
| resizeOverlays(); | |
| if (wasPlaying) { | |
| try { await videoEngage.play(); } catch (_) { } | |
| } | |
| } | |
| async function toggleDepthView() { | |
| state.useDepthFeed = !state.useDepthFeed; | |
| updateDepthChip(); | |
| if (!state.videoLoaded) return; | |
| const wasPlaying = !videoEngage.paused; | |
| const t = videoEngage.currentTime || 0; | |
| try { videoEngage.pause(); } catch (_) { } | |
| let desiredSrc; | |
| if (state.useDepthFeed && state.hf.depthVideoUrl) { | |
| desiredSrc = state.hf.depthVideoUrl; | |
| } else if (state.useProcessedFeed && state.hf.processedUrl) { | |
| desiredSrc = state.hf.processedUrl; | |
| } else { | |
| desiredSrc = state.videoUrl; | |
| } | |
| if (videoEngage.src !== desiredSrc) { | |
| videoEngage.src = desiredSrc; | |
| videoEngage.setAttribute('data-depth', state.useDepthFeed ? 'true' : 'false'); | |
| log(`Video view switched to: ${state.useDepthFeed ? 'depth' : 'default'}`, "t"); | |
| videoEngage.load(); | |
| await waitVideoReady(videoEngage); | |
| try { videoEngage.currentTime = Math.min(t, (videoEngage.duration || t)); } catch (_) { } | |
| } | |
| resizeOverlays(); | |
| if (wasPlaying) { | |
| try { await videoEngage.play(); } catch (_) { } | |
| } | |
| } | |
| function updateDepthChip() { | |
| if (chipDepth) { | |
| chipDepth.textContent = state.useDepthFeed ? "VIEW:DEPTH" : "VIEW:DEFAULT"; | |
| } | |
| } | |
| function toggleFirstFrameDepthView() { | |
| state.useFrameDepthView = !state.useFrameDepthView; | |
| updateFirstFrameDepthChip(); | |
| displayFirstFrameWithDepth(); | |
| } | |
| function updateFirstFrameDepthChip() { | |
| if (chipFrameDepth) { | |
| chipFrameDepth.textContent = state.useFrameDepthView ? "VIEW:DEPTH" : "VIEW:DEFAULT"; | |
| } | |
| } | |
| function displayFirstFrameWithDepth() { | |
| // Determine which URL to use based on state | |
| let frameUrl; | |
| if (state.useFrameDepthView && state.hf.depthFirstFrameUrl) { | |
| // Check if we have a blob URL (starts with 'blob:') | |
| if (state.hf.depthFirstFrameUrl.startsWith('blob:')) { | |
| frameUrl = state.hf.depthFirstFrameUrl; | |
| } else { | |
| log("Depth first frame not ready yet. Please wait for processing to complete.", "w"); | |
| state.useFrameDepthView = false; // Revert to default view | |
| updateFirstFrameDepthChip(); | |
| frameUrl = state.hf.firstFrameUrl; | |
| } | |
| } else if (state.hf.firstFrameUrl) { | |
| frameUrl = state.hf.firstFrameUrl; | |
| } else { | |
| log("No first frame URL available", "w"); | |
| return; | |
| } | |
| if (!frameUrl) { | |
| log("No valid frame URL to display", "w"); | |
| return; | |
| } | |
| log(`Displaying ${state.useFrameDepthView ? 'depth' : 'default'} first frame`, "t"); | |
| // Load and display the frame | |
| const img = new Image(); | |
| img.crossOrigin = "anonymous"; | |
| img.src = frameUrl; | |
| img.onload = () => { | |
| frameCanvas.width = img.width; | |
| frameCanvas.height = img.height; | |
| frameOverlay.width = img.width; | |
| frameOverlay.height = img.height; | |
| const ctx = frameCanvas.getContext("2d"); | |
| ctx.clearRect(0, 0, img.width, img.height); | |
| ctx.drawImage(img, 0, 0); | |
| frameEmpty.style.display = "none"; | |
| log(`✓ ${state.useFrameDepthView ? 'Depth' : 'Default'} first frame displayed (${img.width}×${img.height})`, "g"); | |
| }; | |
| img.onerror = (err) => { | |
| console.error(`Failed to load ${state.useFrameDepthView ? 'depth' : 'default'} first frame:`, err); | |
| log(`✗ ${state.useFrameDepthView ? 'Depth' : 'Default'} first frame load failed - reverting to default view`, "e"); | |
| // If depth frame fails, revert to default | |
| if (state.useFrameDepthView) { | |
| state.useFrameDepthView = false; | |
| updateFirstFrameDepthChip(); | |
| displayFirstFrameWithDepth(); // Retry with default view | |
| } | |
| }; | |
| } | |
| async function startHfPipeline() { | |
| if (state.hf.busy) { | |
| log("HF pipeline already running"); | |
| return; | |
| } | |
| const { kind } = getDetectorSelection(); | |
| // Only process if using HF detectors (not local/external) | |
| if (["local", "external"].includes(kind)) { | |
| log("Skipping HF pipeline (not using HF detector)"); | |
| return; | |
| } | |
| state.hf.busy = true; | |
| state.hf.lastError = null; | |
| // Background processing (non-blocking) | |
| (async () => { | |
| try { | |
| // Run async detection (mission text will be used directly as queries if no manual labels provided) | |
| await hfDetectAsync(); | |
| // Auto-switch to processed feed when ready | |
| if (state.hf.processedUrl && !state.useProcessedFeed) { | |
| log("Auto-switching to HF processed video feed (segmentation/detection overlays)", "g"); | |
| await setEngageFeed(true); | |
| } | |
| } catch (err) { | |
| console.error("HF pipeline error:", err); | |
| state.hf.lastError = err.message; | |
| setHfStatus(`error: ${err.message}`); | |
| log(`⚠ HF error: ${err.message}`); | |
| } finally { | |
| state.hf.busy = false; | |
| } | |
| })(); | |
| } | |
| detectorSelect.addEventListener("change", () => { | |
| const sel = getDetectorSelection(); | |
| state.detector.mode = sel.value; | |
| state.detector.kind = sel.kind; | |
| state.hf.detector = sel.value; | |
| // local detector is still used for aimpoint math + tracking; HF Space provides mission-driven video detection. | |
| ensureCocoDetector(); | |
| renderMissionContext(); | |
| log(`Detector mode set to: ${state.detector.mode}`, "t"); | |
| }); | |
| trackerSelect.addEventListener("change", () => { | |
| state.tracker.mode = trackerSelect.value; | |
| log(`Tracker mode set to: ${state.tracker.mode}`, "t"); | |
| }); | |
| async function ensureCocoDetector() { | |
| if (state.detector.loaded || state.detector.loading) return; | |
| if (state.detector.mode !== "coco") return; | |
| state.detector.loading = true; | |
| setStatus("warn", "LOADING · Detector model"); | |
| log("Loading COCO-SSD detector (browser model).", "t"); | |
| try { | |
| await loadScriptOnce("tfjs", "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@4.16.0/dist/tf.min.js"); | |
| await loadScriptOnce("coco-ssd", "https://cdn.jsdelivr.net/npm/@tensorflow-models/coco-ssd@2.2.2/dist/coco-ssd.min.js"); | |
| if (!window.cocoSsd || !window.tf) throw new Error("TF.js or cocoSsd not available."); | |
| state.detector.model = await window.cocoSsd.load(); | |
| state.detector.loaded = true; | |
| log("COCO-SSD detector loaded.", "g"); | |
| setStatus(state.videoLoaded ? "warn" : "warn", state.videoLoaded ? "READY · Video loaded (run Reason)" : "STANDBY · No video loaded"); | |
| } catch (err) { | |
| log(`Detector load failed: ${err.message}. Switch to External detector or use HF models.`, "w"); | |
| setStatus("warn", "READY · Detector not loaded (use External or HF)"); | |
| state.detector.loaded = false; | |
| state.detector.model = null; | |
| } finally { | |
| state.detector.loading = false; | |
| } | |
| } | |
| const loadedScripts = new Map(); | |
| function loadScriptOnce(key, src) { | |
| return new Promise((resolve, reject) => { | |
| if (loadedScripts.get(key) === "loaded") { resolve(); return; } | |
| if (loadedScripts.get(key) === "loading") { | |
| const iv = setInterval(() => { | |
| if (loadedScripts.get(key) === "loaded") { clearInterval(iv); resolve(); } | |
| if (loadedScripts.get(key) === "failed") { clearInterval(iv); reject(new Error("Script failed earlier")); } | |
| }, 50); | |
| return; | |
| } | |
| loadedScripts.set(key, "loading"); | |
| const s = document.createElement("script"); | |
| s.src = src; | |
| s.async = true; | |
| s.onload = () => { loadedScripts.set(key, "loaded"); resolve(); }; | |
| s.onerror = () => { loadedScripts.set(key, "failed"); reject(new Error(`Failed to load ${src}`)); }; | |
| document.head.appendChild(s); | |
| }); | |
| } | |
| // Start loading detector opportunistically if selected. | |
| ensureCocoDetector(); | |
| // ========= Core physics-lite model ========= | |
| function getKnobs() { | |
| const PkW = +helPower.value; | |
| const aperture = +helAperture.value; | |
| const M2 = +helM2.value; | |
| const jitter_urad = +helJitter.value; | |
| const duty = (+helDuty.value) / 100; | |
| const mode = helMode.value; | |
| const vis_km = +atmVis.value; | |
| const cn2 = +atmCn2.value; | |
| const spray = +seaSpray.value; | |
| const ao = +aoQ.value; | |
| const baseRange = +rangeBase.value; | |
| return { PkW, aperture, M2, jitter_urad, duty, mode, vis_km, cn2, spray, ao, baseRange }; | |
| } | |
| // ========= External Hooks (API Integration Points) ========= | |
| /** | |
| * Hook: Object Detection | |
| * @param {Object} input { canvas, width, height } | |
| * @returns {Promise<Array>} [{ bbox:[x,y,w,h], class:"label", score:0.95 }, ...] | |
| */ | |
| async function externalDetect(input) { | |
| // TODO: Call your object detection endpoint here | |
| console.log("externalDetect called", input); | |
| return []; | |
| } | |
| /** | |
| * Hook: Feature Extraction | |
| * @param {Array} detections Array of detection objects | |
| * @param {Object} frameInfo { width, height } | |
| * @returns {Promise<Object>} Map of { "id": { reflectivity:0.5, ... } } | |
| */ | |
| async function externalFeatures(detections, frameInfo) { | |
| // TODO: Call your feature extraction endpoint here | |
| console.log("externalFeatures called for", detections.length, "objects"); | |
| return {}; | |
| } | |
| /** | |
| * Hook: HEL synthesis | |
| * @param {Array} detections Array of detection objects | |
| * @param {Object} knobs HEL/atmosphere knobs | |
| * @returns {Promise<Object>} { targets: {id: {...}}, system: {...} } | |
| */ | |
| async function externalHEL(detections, knobs) { | |
| // TODO: Call your HEL model/service here | |
| console.log("externalHEL called for", detections.length, "objects", knobs); | |
| // Return minimal structure to keep UI running | |
| return { | |
| targets: {}, | |
| system: { maxP_kW: 0, reqP_kW: 0, margin_kW: 0, medianRange_m: 0 } | |
| }; | |
| } | |
| /** | |
| * Hook: External Tracker | |
| * @param {HTMLVideoElement} videoEl | |
| * @returns {Promise<Array>} [{ bbox:[x,y,w,h], class:"label", score:0.95 }, ...] | |
| */ | |
| async function externalTrack(videoEl) { | |
| // TODO: Call your external tracker / vision system here | |
| console.log("externalTrack called"); | |
| return []; | |
| } | |
| /** | |
| * Hook: Mission Intel Summary | |
| * @param {Array} frames Array of dataURLs or canvases | |
| * @returns {Promise<String>} Summary text | |
| */ | |
| async function externalIntel(frames) { | |
| // TODO: Call your VLM / Intel summary endpoint here | |
| console.log("externalIntel called with", frames.length, "frames"); | |
| return "Video processed. No external intel provider connected."; | |
| } | |
| // ========= Core Physics & Logic Adapters ========= | |
| function getKnobs() { | |
| const PkW = +helPower.value; | |
| const aperture = +helAperture.value; | |
| const M2 = +helM2.value; | |
| const jitter_urad = +helJitter.value; | |
| const duty = (+helDuty.value) / 100; | |
| const mode = helMode.value; | |
| const vis_km = +atmVis.value; | |
| const cn2 = +atmCn2.value; | |
| const spray = +seaSpray.value; | |
| const ao = +aoQ.value; | |
| const baseRange = +rangeBase.value; | |
| return { PkW, aperture, M2, jitter_urad, duty, mode, vis_km, cn2, spray, ao, baseRange }; | |
| } | |
| // ========= Safe Stubs for Client-Side Visualization (Tab 2 / Tab 3) ========= | |
| // These functions were removed to allow backend control, but are mocked here | |
| // to prevent UI crashes in the Engagement/Trade tabs until you wire them up. | |
| function maxPowerAtTarget(range_m) { | |
| // Placeholder: return 0 or simple fallback | |
| return { Ptar: 0, Pout: 0, trans: 0, turb: 0, beam: 0 }; | |
| } | |
| function requiredPowerFromFeatures(feat) { return 10; } // Safe default | |
| function requiredDwell(range_m, reqP, maxP, baseDwell) { return 1.0; } // Safe default | |
| function pkillFromMargin(margin_kW, dwell_s, reqDwell_s) { return 0; } | |
| // ========= Aimpoint rules (default) ========= | |
| function defaultAimpoint(label) { | |
| const l = (label || "object").toLowerCase(); | |
| if (l.includes("airplane") || l.includes("drone") || l.includes("uav") || l.includes("kite") || l.includes("bird")) { | |
| return { relx: 0.62, rely: 0.55, label: "engine" }; | |
| } | |
| if (l.includes("helicopter")) { | |
| return { relx: 0.50, rely: 0.45, label: "rotor_hub" }; | |
| } | |
| if (l.includes("boat") || l.includes("ship")) { | |
| return { relx: 0.60, rely: 0.55, label: "bridge/engine" }; | |
| } | |
| if (l.includes("truck") || l.includes("car")) { | |
| return { relx: 0.55, rely: 0.62, label: "engine_block" }; | |
| } | |
| return { relx: 0.50, rely: 0.55, label: "center_mass" }; | |
| } | |
| // ========= Feature generation (hookable) ========= | |
| // (Merged into externalFeatures above) | |
| // [Deleted] synthFeatures, hashString, mulberry32, pick | |
| // ========= Detector hook ========= | |
| // ========= Detector hook ========= | |
| // (This block is merged into externalDetect above, removing old declaration) | |
| function canvasToBlob(canvas, quality = 0.88) { | |
| return new Promise((resolve, reject) => { | |
| if (!canvas.toBlob) { reject(new Error("Canvas.toBlob not supported")); return; } | |
| canvas.toBlob(blob => { | |
| if (!blob) { reject(new Error("Canvas toBlob failed")); return; } | |
| resolve(blob); | |
| }, "image/jpeg", quality); | |
| }); | |
| } | |
| async function callHfObjectDetection(modelId, canvas) { | |
| const proxyBase = (API_CONFIG.PROXY_URL || "").trim(); | |
| if (proxyBase) { | |
| const blob = await canvasToBlob(canvas); | |
| const form = new FormData(); | |
| form.append("model", modelId); | |
| form.append("image", blob, "frame.jpg"); | |
| const resp = await fetch(`${proxyBase.replace(/\/$/, "")}/detect`, { | |
| method: "POST", | |
| body: form | |
| }); | |
| if (!resp.ok) { | |
| let detail = `Proxy inference failed (${resp.status})`; | |
| try { | |
| const err = await resp.json(); | |
| detail = err.detail || err.error || detail; | |
| } catch (_) { } | |
| throw new Error(detail); | |
| } | |
| const payload = await resp.json(); | |
| if (!Array.isArray(payload)) throw new Error("Unexpected proxy response format."); | |
| return payload; | |
| } | |
| const token = API_CONFIG.HF_TOKEN; | |
| if (!token) throw new Error("HF token missing (config.js)."); | |
| const blob = await canvasToBlob(canvas); | |
| const base = (API_CONFIG.HF_INFERENCE_BASE || "https://router.huggingface.co/hf-inference/models").replace(/\/$/, ""); | |
| const resp = await fetch(`${base}/${modelId}`, { | |
| method: "POST", | |
| headers: { Authorization: `Bearer ${token}` }, | |
| body: blob | |
| }); | |
| if (!resp.ok) { | |
| let detail = `HF inference failed (${resp.status})`; | |
| try { | |
| const err = await resp.json(); | |
| detail = err.error || err.detail || detail; | |
| } catch (_) { } | |
| throw new Error(detail); | |
| } | |
| const payload = await resp.json(); | |
| if (!Array.isArray(payload)) throw new Error("Unexpected HF response format."); | |
| return payload.map(p => { | |
| const b = p.box || p.bbox || p.bounding_box || {}; | |
| const xmin = b.xmin ?? b.x ?? 0; | |
| const ymin = b.ymin ?? b.y ?? 0; | |
| const xmax = b.xmax ?? (b.x + (b.w || 0)) ?? 0; | |
| const ymax = b.ymax ?? (b.y + (b.h || 0)) ?? 0; | |
| return { | |
| bbox: [xmin, ymin, Math.max(1, xmax - xmin), Math.max(1, ymax - ymin)], | |
| class: p.label || p.class || "object", | |
| score: p.score ?? p.confidence ?? 0 | |
| }; | |
| }); | |
| } | |
| async function detectWithCoco(inputForModel, applyMissionFilter) { | |
| await ensureCocoDetector(); | |
| if (!state.detector.model) { | |
| log("Detector model not available in this browser. Switch to External detector or use HF models.", "w"); | |
| return []; | |
| } | |
| let preds = await state.detector.model.detect(inputForModel); | |
| if (applyMissionFilter) preds = filterPredsByMission(preds); | |
| const filtered = preds | |
| .filter(p => p.score >= 0.45) | |
| .slice(0, 14) | |
| .map(p => ({ bbox: p.bbox, class: p.class, score: p.score })); | |
| if (!filtered.length) { | |
| log("Detector returned no confident objects for this frame.", "w"); | |
| } | |
| return filtered; | |
| } | |
| async function waitForBackendDetections(timeoutMs = 2000) { | |
| const start = Date.now(); | |
| while ((Date.now() - start) < timeoutMs) { | |
| if (Array.isArray(state.hf.firstFrameDetections)) { | |
| return state.hf.firstFrameDetections; | |
| } | |
| await new Promise(resolve => setTimeout(resolve, 100)); | |
| } | |
| return null; | |
| } | |
| async function runDetectOnFrame() { | |
| const w = state.frame.w, h = state.frame.h; | |
| const inputForModel = frameCanvas; // canvas contains the first frame | |
| const sel = getDetectorSelection(); | |
| const mode = sel.value; | |
| const kind = sel.kind; | |
| if (mode === "coco") { | |
| return await detectWithCoco(inputForModel, false); | |
| } | |
| if (mode === "external") { | |
| try { | |
| const res = await externalDetect({ canvas: frameCanvas, width: w, height: h }); | |
| if (Array.isArray(res)) return res; | |
| log("External detector returned invalid response.", "w"); | |
| return []; | |
| } catch (err) { | |
| log(`External detector failed: ${err.message}`, "w"); | |
| return []; | |
| } | |
| } | |
| if (kind === "segmentation") { | |
| // For segmentation, we don't have instant local inference | |
| // User needs to process full video via HF async endpoint | |
| log("Segmentation requires full video processing via HF backend"); | |
| return []; | |
| } | |
| if (kind === "drone") { | |
| const backendDets = await waitForBackendDetections(); | |
| if (Array.isArray(backendDets) && backendDets.length) { | |
| return backendDets.map(d => { | |
| const bbox = Array.isArray(d.bbox) ? d.bbox : [0, 0, 1, 1]; | |
| const x1 = bbox[0] || 0; | |
| const y1 = bbox[1] || 0; | |
| const x2 = bbox[2] || 0; | |
| const y2 = bbox[3] || 0; | |
| const depthRel = Number.isFinite(d.depth_rel) ? d.depth_rel : null; | |
| return { | |
| bbox: [x1, y1, Math.max(1, x2 - x1), Math.max(1, y2 - y1)], | |
| class: d.label || "drone", | |
| score: d.score ?? 0, | |
| depth_rel: depthRel // Visualization only, GPT handles distance | |
| }; | |
| }); | |
| } | |
| // Same for drone detection | |
| log("Drone detection requires full video processing via HF backend"); | |
| return []; | |
| } | |
| if (kind === "object") { | |
| // HF object detection models | |
| if (["hf_yolov8", "detr_resnet50", "grounding_dino"].includes(mode)) { | |
| const backendDets = await waitForBackendDetections(); | |
| if (Array.isArray(backendDets) && backendDets.length) { | |
| return backendDets.map(d => { | |
| const bbox = Array.isArray(d.bbox) ? d.bbox : [0, 0, 1, 1]; | |
| const x1 = bbox[0] || 0; | |
| const y1 = bbox[1] || 0; | |
| const x2 = bbox[2] || 0; | |
| const y2 = bbox[3] || 0; | |
| const depthRel = Number.isFinite(d.depth_rel) ? d.depth_rel : null; | |
| return { | |
| bbox: [x1, y1, Math.max(1, x2 - x1), Math.max(1, y2 - y1)], | |
| class: d.label || "object", | |
| score: d.score ?? 0, | |
| depth_rel: depthRel // Visualization only, GPT handles distance | |
| }; | |
| }); | |
| } | |
| // For first-frame detection, we can show a placeholder or skip | |
| // The actual detections come from the async endpoint | |
| log(`${mode} requires backend async processing`); | |
| return []; | |
| } else { | |
| // Fallback to COCO if unknown | |
| return await detectWithCoco(inputForModel, false); | |
| } | |
| } | |
| return []; | |
| } | |
| // ========= Render first frame ======== | |
| function drawFirstFrame() { | |
| const ctx = frameCanvas.getContext("2d"); | |
| const w = state.frame.w, h = state.frame.h; | |
| frameCanvas.width = w; frameCanvas.height = h; | |
| frameOverlay.width = w; frameOverlay.height = h; | |
| ctx.clearRect(0, 0, w, h); | |
| // Check if we have HF processed first frame (segmentation or object detection with overlays) | |
| if (state.hf.firstFrameUrl) { | |
| // HF backend will draw the processed frame via displayAsyncFirstFrame() | |
| // Don't draw dark background - let the processed image show through | |
| log("Waiting for HF processed first frame to display...", "t"); | |
| return; | |
| } | |
| // For local detection: show dark background, no original frame | |
| ctx.fillStyle = "#0b1026"; | |
| ctx.fillRect(0, 0, w, h); | |
| if (!state.frame.bitmap) { | |
| ctx.fillStyle = "rgba(255,255,255,.65)"; | |
| ctx.font = "16px " + getComputedStyle(document.body).fontFamily; | |
| ctx.fillText("No frame available", 18, 28); | |
| return; | |
| } | |
| // Original frame bitmap is NOT drawn for local detection - only processed results will be displayed | |
| // ctx.drawImage(state.frame.bitmap, 0, 0, w, h); | |
| } | |
| // ========= Agent cursor (optional, purely visual) ========= | |
| function ensureAgentCursorOverlay() { | |
| if ($("#agentCursor")) return; | |
| const el = document.createElement("div"); | |
| el.id = "agentCursor"; | |
| el.style.position = "fixed"; | |
| el.style.zIndex = "9999"; | |
| el.style.width = "14px"; | |
| el.style.height = "14px"; | |
| el.style.borderRadius = "999px"; | |
| el.style.pointerEvents = "none"; | |
| el.style.background = "radial-gradient(circle at 30% 30%, rgba(34,211,238,.95), rgba(124,58,237,.65))"; | |
| el.style.boxShadow = "0 0 18px rgba(34,211,238,.55), 0 0 46px rgba(124,58,237,.25)"; | |
| el.style.border = "1px solid rgba(255,255,255,.25)"; | |
| el.style.opacity = "0"; | |
| document.body.appendChild(el); | |
| } | |
| function setCursorVisible(v) { | |
| ensureAgentCursorOverlay(); | |
| const el = $("#agentCursor"); | |
| el.style.opacity = v ? "1" : "0"; | |
| state.ui.agentCursor.visible = v; | |
| } | |
| function moveCursorToRect(rect, mode = "glide") { | |
| state.ui.agentCursor.target = rect; | |
| state.ui.agentCursor.mode = mode; | |
| state.ui.agentCursor.t0 = now(); | |
| setCursorVisible(state.ui.cursorMode === "on"); | |
| } | |
| function tickAgentCursor() { | |
| const el = $("#agentCursor"); | |
| if (!el || state.ui.cursorMode !== "on" || !state.ui.agentCursor.visible) return; | |
| const c = state.ui.agentCursor; | |
| if (!c.target) return; | |
| const tx = c.target.left + c.target.width * 0.72; | |
| const ty = c.target.top + c.target.height * 0.50; | |
| // smooth spring | |
| const ease = 0.12; | |
| const dx = tx - (c.x * window.innerWidth); | |
| const dy = ty - (c.y * window.innerHeight); | |
| c.vx = (c.vx + dx * 0.0018) * 0.85; | |
| c.vy = (c.vy + dy * 0.0018) * 0.85; | |
| const px = (c.x * window.innerWidth) + c.vx * 18; | |
| const py = (c.y * window.innerHeight) + c.vy * 18; | |
| c.x = clamp(px / window.innerWidth, 0.02, 0.98); | |
| c.y = clamp(py / window.innerHeight, 0.02, 0.98); | |
| el.style.transform = `translate(${c.x * window.innerWidth}px, ${c.y * window.innerHeight}px)`; | |
| // hide after settle | |
| const settle = Math.hypot(dx, dy); | |
| if (settle < 6 && (now() - c.t0) > 650) { | |
| // keep visible but soften | |
| el.style.opacity = "0.75"; | |
| } | |
| } | |
| cursorMode.addEventListener("change", () => { | |
| state.ui.cursorMode = cursorMode.value; | |
| if (state.ui.cursorMode === "off") setCursorVisible(false); | |
| }); | |
| // ========= Reason pipeline (Tab 1) ========= | |
| btnReason.addEventListener("click", async () => { | |
| if (!state.videoLoaded) { | |
| log("No video loaded. Upload a video first.", "w"); | |
| setStatus("warn", "READY · Upload a video"); | |
| return; | |
| } | |
| // Prevent concurrent executions | |
| if (state.isReasoning) { | |
| log("Reason already in progress. Please wait for it to complete.", "w"); | |
| return; | |
| } | |
| // Lock the Reason process | |
| state.isReasoning = true; | |
| btnReason.disabled = true; | |
| btnReason.style.opacity = "0.5"; | |
| btnReason.style.cursor = "not-allowed"; | |
| // Show Cancel button | |
| btnCancelReason.style.display = "inline-block"; | |
| // Reset previous processed video output before new run | |
| if (state.hf.processedUrl && state.hf.processedUrl.startsWith("blob:")) { | |
| try { URL.revokeObjectURL(state.hf.processedUrl); } catch (_) { } | |
| } | |
| state.hf.processedUrl = null; | |
| state.hf.processedBlob = null; | |
| state.useProcessedFeed = false; | |
| btnEngage.disabled = true; | |
| videoEngage.removeAttribute("src"); | |
| videoEngage.load(); | |
| // Clear previous detections before running new detection | |
| state.detections = []; | |
| state.selectedId = null; | |
| renderFrameTrackList(); | |
| renderFrameOverlay(); | |
| // renderSummary(); // Removed | |
| renderFeatures(null); | |
| renderTrade(); | |
| setStatus("warn", "REASONING · Running perception pipeline"); | |
| // Start mission-driven HF backend (planning → video detection) in parallel. | |
| startHfPipeline(); | |
| log("Reason started: detection → features → HEL synthesis.", "t"); | |
| // a little agent cursor flair | |
| if (state.ui.cursorMode === "on") { | |
| moveCursorToRect(btnReason.getBoundingClientRect()); | |
| setTimeout(() => moveCursorToRect(frameCanvas.getBoundingClientRect()), 260); | |
| setTimeout(() => moveCursorToRect(frameTrackList.getBoundingClientRect()), 560); | |
| // setTimeout(() => moveCursorToRect(summaryTable.getBoundingClientRect()), 880); | |
| } | |
| try { | |
| // Mission objective is optional: | |
| // - If blank: run unbiased detection across all classes immediately (no server wait). | |
| // - If provided: still show immediate first-frame results, while HF computes mission focus in the background. | |
| const missionPromptRaw = (missionText?.value || "").trim(); | |
| if (!missionPromptRaw) { | |
| state.hf.plan = null; | |
| state.hf.missionId = null; | |
| renderMissionContext(); | |
| setHfStatus("processing (all objects, background)…"); | |
| } else { | |
| // Mission objective will be used directly by the detector | |
| setHfStatus("processing (mission-focused, background)…"); | |
| } | |
| await captureFirstFrame(); | |
| drawFirstFrame(); | |
| const dets = await runDetectOnFrame(); | |
| state.detections = dets.map((d, i) => { | |
| const id = `T${String(i + 1).padStart(2, "0")}`; | |
| const ap = defaultAimpoint(d.class); | |
| return { | |
| id, | |
| label: d.class, | |
| score: d.score, | |
| bbox: normBBox(d.bbox, state.frame.w, state.frame.h), | |
| aim: { ...ap }, // rel inside bbox | |
| features: null, | |
| baseRange_m: null, | |
| baseAreaFrac: null, | |
| baseDwell_s: null, | |
| reqP_kW: null, | |
| maxP_kW: null, | |
| pkill: null, | |
| // Depth visualization only | |
| depth_rel: Number.isFinite(d.depth_rel) ? d.depth_rel : null, | |
| // Bind GPT reasoning fields from backend | |
| gpt_distance_m: d.gpt_distance_m || null, | |
| gpt_direction: d.gpt_direction || null, | |
| gpt_description: d.gpt_description || null | |
| }; | |
| }); | |
| // range estimate calibration | |
| // [Deleted] calibrateRanges(); | |
| // feature generation | |
| const featureMap = await externalFeatures(state.detections, { width: state.frame.w, height: state.frame.h }); | |
| if (featureMap) { | |
| state.detections.forEach(d => { | |
| const f = featureMap[d.id] || featureMap[d.label] || null; | |
| if (f) d.features = f; | |
| }); | |
| log("Features populated from external hook.", "g"); | |
| } else { | |
| // Fallback if no external features: empty object | |
| state.detections.forEach(d => d.features = {}); | |
| log("No external features provided.", "t"); | |
| } | |
| // If external features provide aimpoint label, align aimpoint marker | |
| state.detections.forEach(d => { | |
| if (d.features && d.features.aimpoint_label) { | |
| const apLabel = String(d.features.aimpoint_label); | |
| d.aim.label = apLabel; | |
| // keep rel location but slightly adjust by label type | |
| const ap = aimpointByLabel(apLabel); | |
| d.aim.relx = ap.relx; | |
| d.aim.rely = ap.rely; | |
| } | |
| }); | |
| // compute HEL synthesis (now async) | |
| await recomputeHEL(); | |
| // pick default selection | |
| state.selectedId = state.detections[0]?.id || null; | |
| renderFrameTrackList(); | |
| renderFrameOverlay(); | |
| // renderSummary(); // Removed | |
| renderFeatures(getSelected()); | |
| renderTrade(); | |
| state.hasReasoned = true; | |
| setStatus("good", "READY · Reason complete (you can Engage)"); | |
| log("Reason complete.", "g"); | |
| // Pre-seed tracks for Tab 2 so radar shows targets immediately | |
| seedTracksFromTab1(); | |
| renderRadar(); | |
| // Generate intel summary (async) | |
| computeIntelSummary(); | |
| } catch (err) { | |
| setStatus("bad", "ERROR · Reason failed"); | |
| log(`Reason failed: ${err.message}`, "e"); | |
| console.error(err); | |
| } finally { | |
| // Always unlock the Reason process | |
| state.isReasoning = false; | |
| btnReason.disabled = false; | |
| btnReason.style.opacity = "1"; | |
| btnReason.style.cursor = "pointer"; | |
| // Hide Cancel button | |
| btnCancelReason.style.display = "none"; | |
| } | |
| }); | |
| // Cancel button handler | |
| btnCancelReason.addEventListener("click", () => { | |
| cancelReasoning(); | |
| }); | |
| btnRecompute.addEventListener("click", () => { | |
| if (!state.hasReasoned) return; | |
| recomputeHEL(); | |
| // renderSummary(); | |
| renderFrameOverlay(); | |
| renderTrade(); | |
| log("Recomputed HEL metrics using current knobs (no new detection).", "t"); | |
| }); | |
| btnClear.addEventListener("click", () => { | |
| state.detections = []; | |
| state.selectedId = null; | |
| state.hasReasoned = false; | |
| state.isReasoning = false; // Reset reasoning lock | |
| btnReason.disabled = false; // Re-enable button if it was locked | |
| btnReason.style.opacity = "1"; | |
| btnReason.style.cursor = "pointer"; | |
| btnCancelReason.style.display = "none"; // Hide Cancel button | |
| renderFrameTrackList(); | |
| renderFrameOverlay(); | |
| // renderSummary(); | |
| renderFeatures(null); | |
| renderTrade(); | |
| log("Cleared Tab 1 outputs.", "w"); | |
| setStatus("warn", state.videoLoaded ? "READY · Video loaded (run Reason)" : "STANDBY · No video loaded"); | |
| }); | |
| function aimpointByLabel(label) { | |
| const l = String(label || "").toLowerCase(); | |
| if (l.includes("engine") || l.includes("fuel")) return { relx: 0.64, rely: 0.58, label: label }; | |
| if (l.includes("wing")) return { relx: 0.42, rely: 0.52, label: label }; | |
| if (l.includes("nose") || l.includes("sensor")) return { relx: 0.28, rely: 0.48, label: label }; | |
| if (l.includes("rotor")) return { relx: 0.52, rely: 0.42, label: label }; | |
| return { relx: 0.50, rely: 0.55, label: label || "center_mass" }; | |
| } | |
| function normBBox(bbox, w, h) { | |
| const [x, y, bw, bh] = bbox; | |
| return { | |
| x: clamp(x, 0, w - 1), | |
| y: clamp(y, 0, h - 1), | |
| w: clamp(bw, 1, w), | |
| h: clamp(bh, 1, h) | |
| }; | |
| } | |
| // [Deleted] calibrateRanges | |
| async function recomputeHEL() { | |
| if (!state.detections.length) return; | |
| const knobs = getKnobs(); | |
| // summaryStamp.textContent = "Computing..."; | |
| try { | |
| const result = await externalHEL(state.detections, knobs); | |
| const metrics = result.targets || {}; | |
| const sys = result.system || {}; | |
| state.detections.forEach(d => { | |
| const r = metrics[d.id] || {}; | |
| d.maxP_kW = r.maxP || 0; | |
| d.reqP_kW = r.reqP || 0; | |
| d.baseDwell_s = r.dwell || 0; | |
| d.pkill = r.pkill || 0; | |
| }); | |
| // Update system headline stats | |
| mMaxP.textContent = sys.maxP ? `${sys.maxP} kW` : "—"; | |
| mReqP.textContent = sys.reqP ? `${sys.reqP} kW` : "—"; | |
| const margin = sys.margin || 0; | |
| mMargin.textContent = `${margin > 0 ? "+" : ""}${margin} kW`; | |
| mMargin.style.color = margin >= 0 ? "rgba(34,197,94,.95)" : "rgba(239,68,68,.95)"; | |
| mMaxPSub.textContent = "Calculated by external HEL engine"; | |
| // Simple ranking for plan | |
| const ranked = state.detections.slice().sort((a, b) => (b.pkill || 0) - (a.pkill || 0)); | |
| if (ranked.length && ranked[0].pkill > 0) { | |
| mPlan.textContent = `${ranked[0].id} → Engage`; | |
| mPlanSub.textContent = "Highest P(kill) target"; | |
| } else { | |
| mPlan.textContent = "—"; | |
| mPlanSub.textContent = "No viable targets"; | |
| } | |
| } catch (err) { | |
| console.error("HEL recompute failed", err); | |
| } | |
| // summaryStamp.textContent = new Date().toLocaleTimeString(); | |
| // renderSummary(); | |
| refreshTradeTargets(); | |
| } | |
| function getSelected() { | |
| return state.detections.find(d => d.id === state.selectedId) || null; | |
| } | |
| // ========= Rendering: Object list, features, summary table ========= | |
| // ========= Track Cards & Interaction ========= | |
| function selectObject(id) { | |
| state.selectedId = id; | |
| // Highlight Card | |
| $$(".track-card").forEach(el => el.classList.remove("active")); | |
| const card = document.getElementById("card-" + id); | |
| if (card) { | |
| // card.scrollIntoView({ behavior: "smooth", block: "nearest" }); | |
| card.classList.add("active"); | |
| } | |
| // Highlight BBox (via Overlay) | |
| renderFrameOverlay(); | |
| // Highlight Radar uses state.selectedId, loops automatically | |
| } | |
| function renderFrameTrackList() { | |
| if (!frameTrackList || !trackCount) return; | |
| frameTrackList.innerHTML = ""; | |
| const dets = state.detections || []; | |
| trackCount.textContent = dets.length; | |
| if (dets.length === 0) { | |
| frameTrackList.innerHTML = '<div style="font-style:italic; color:var(--text-dim); text-align:center; margin-top:20px;">No objects tracked.</div>'; | |
| return; | |
| } | |
| dets.forEach((det, i) => { | |
| // ID: T01, T02... | |
| // Ensure ID exists | |
| const id = det.id || `T${String(i + 1).padStart(2, '0')}`; | |
| // Resolve Range/Bearing | |
| let rangeStr = "---"; | |
| let bearingStr = "---"; | |
| if (det.gpt_distance_m) { | |
| rangeStr = `${det.gpt_distance_m}m (GPT)`; | |
| } | |
| // No depth_est_m fallback - GPT is the sole source of distance | |
| if (det.gpt_direction) { | |
| bearingStr = det.gpt_direction; | |
| } | |
| const card = document.createElement("div"); | |
| card.className = "track-card"; | |
| if (state.selectedId === id) card.classList.add("active"); | |
| card.id = `card-${id}`; | |
| card.onclick = () => selectObject(id); | |
| const desc = det.gpt_description | |
| ? `<div class="track-card-body"><span class="gpt-text">${det.gpt_description}</span></div>` | |
| : ""; // No description, hide body | |
| const gptBadge = (det.gpt_distance_m || det.gpt_description) | |
| ? `<span class="gpt-badge">GPT</span>` | |
| : ""; | |
| card.innerHTML = ` | |
| <div class="track-card-header"> | |
| <span>${id} · ${det.label}</span> | |
| <span class="badgemini">${(det.score * 100).toFixed(0)}%</span> | |
| </div> | |
| <div class="track-card-meta"> | |
| RANGE: ${rangeStr} | BEARING: ${bearingStr} | |
| </div> | |
| ${desc} | |
| `; | |
| frameTrackList.appendChild(card); | |
| }); | |
| } | |
| function renderFeatures(det) { | |
| selId.textContent = det ? det.id : "—"; | |
| const tbody = featureTable.querySelector("tbody"); | |
| tbody.innerHTML = ""; | |
| if (!det) { | |
| tbody.innerHTML = `<tr><td class="k">—</td><td class="mini">No target selected</td></tr>`; | |
| return; | |
| } | |
| const feats = det.features || {}; | |
| const keys = Object.keys(feats); | |
| const show = keys.slice(0, 12); | |
| show.forEach(k => { | |
| const tr = document.createElement("tr"); | |
| tr.innerHTML = `<td class="k">${escapeHtml(k)}</td><td>${escapeHtml(String(feats[k]))}</td>`; | |
| tbody.appendChild(tr); | |
| }); | |
| if (show.length < 10) { | |
| for (let i = show.length; i < 10; i++) { | |
| const tr = document.createElement("tr"); | |
| tr.innerHTML = `<td class="k">—</td><td class="mini">awaiting additional expert outputs</td>`; | |
| tbody.appendChild(tr); | |
| } | |
| } | |
| } | |
| // renderSummary removed | |
| function escapeHtml(s) { | |
| return s.replace(/[&<>"']/g, m => ({ "&": "&", "<": "<", ">": ">", '"': """, "'": "'" }[m])); | |
| } | |
| // ========= Frame overlay rendering ========= | |
| function renderFrameOverlay() { | |
| const ctx = frameOverlay.getContext("2d"); | |
| const w = frameOverlay.width, h = frameOverlay.height; | |
| ctx.clearRect(0, 0, w, h); | |
| if (!state.detections.length) return; | |
| // subtle scanning effect | |
| const t = now() / 1000; | |
| const scanX = (Math.sin(t * 0.65) * 0.5 + 0.5) * w; | |
| ctx.fillStyle = "rgba(34,211,238,.06)"; | |
| ctx.fillRect(scanX - 8, 0, 16, h); | |
| state.detections.forEach((d, idx) => { | |
| const isSel = d.id === state.selectedId; | |
| const b = d.bbox; | |
| const pad = 2; | |
| // box | |
| ctx.lineWidth = isSel ? 3 : 2; | |
| const isFocus = isMissionFocusLabel(d.label); | |
| ctx.strokeStyle = isSel ? "rgba(34,211,238,.95)" : (isFocus ? "rgba(34,211,238,.70)" : "rgba(124,58,237,.55)"); | |
| ctx.shadowColor = isSel ? "rgba(34,211,238,.40)" : "rgba(124,58,237,.25)"; | |
| ctx.shadowBlur = isSel ? 18 : 10; | |
| roundRect(ctx, b.x, b.y, b.w, b.h, 10, false, true); | |
| // pseudo mask glow (for segmentation-like effect) | |
| ctx.shadowBlur = 0; | |
| const g = ctx.createRadialGradient(b.x + b.w * 0.5, b.y + b.h * 0.5, 10, b.x + b.w * 0.5, b.y + b.h * 0.5, Math.max(b.w, b.h) * 0.75); | |
| g.addColorStop(0, isSel ? "rgba(34,211,238,.16)" : "rgba(124,58,237,.10)"); | |
| g.addColorStop(1, "rgba(0,0,0,0)"); | |
| ctx.fillStyle = g; | |
| ctx.fillRect(b.x, b.y, b.w, b.h); | |
| // aimpoint marker (red circle + crosshair) | |
| const ax = b.x + b.w * d.aim.relx; | |
| const ay = b.y + b.h * d.aim.rely; | |
| drawAimpoint(ctx, ax, ay, isSel); | |
| // no text overlay on first-frame view | |
| }); | |
| // click-to-select on canvas (manual aimpoint override can be added later) | |
| frameOverlay.style.pointerEvents = "auto"; | |
| frameOverlay.onclick = (ev) => { | |
| const rect = frameOverlay.getBoundingClientRect(); | |
| const sx = frameOverlay.width / rect.width; | |
| const sy = frameOverlay.height / rect.height; | |
| const x = (ev.clientX - rect.left) * sx; | |
| const y = (ev.clientY - rect.top) * sy; | |
| const hit = state.detections | |
| .map(d => ({ d, inside: x >= d.bbox.x && x <= d.bbox.x + d.bbox.w && y >= d.bbox.y && y <= d.bbox.y + d.bbox.h })) | |
| .filter(o => o.inside) | |
| .sort((a, b) => (a.d.bbox.w * a.d.bbox.h) - (b.d.bbox.w * b.d.bbox.h))[0]; | |
| if (hit) { | |
| selectObject(hit.d.id); | |
| renderFeatures(hit.d); | |
| renderTrade(); | |
| } | |
| }; | |
| } | |
| function roundRect(ctx, x, y, w, h, r, fill, stroke) { | |
| if (w < 2 * r) r = w / 2; | |
| if (h < 2 * r) r = h / 2; | |
| ctx.beginPath(); | |
| ctx.moveTo(x + r, y); | |
| ctx.arcTo(x + w, y, x + w, y + h, r); | |
| ctx.arcTo(x + w, y + h, x, y + h, r); | |
| ctx.arcTo(x, y + h, x, y, r); | |
| ctx.arcTo(x, y, x + w, y, r); | |
| ctx.closePath(); | |
| if (fill) ctx.fill(); | |
| if (stroke) ctx.stroke(); | |
| } | |
| function drawAimpoint(ctx, x, y, isSel) { | |
| ctx.save(); | |
| ctx.shadowBlur = isSel ? 18 : 12; | |
| ctx.shadowColor = "rgba(239,68,68,.45)"; | |
| ctx.strokeStyle = "rgba(239,68,68,.95)"; | |
| ctx.lineWidth = isSel ? 3 : 2; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, isSel ? 10 : 9, 0, Math.PI * 2); | |
| ctx.stroke(); | |
| ctx.shadowBlur = 0; | |
| ctx.strokeStyle = "rgba(255,255,255,.70)"; | |
| ctx.lineWidth = 1.5; | |
| ctx.beginPath(); | |
| ctx.moveTo(x - 14, y); ctx.lineTo(x - 4, y); | |
| ctx.moveTo(x + 4, y); ctx.lineTo(x + 14, y); | |
| ctx.moveTo(x, y - 14); ctx.lineTo(x, y - 4); | |
| ctx.moveTo(x, y + 4); ctx.lineTo(x, y + 14); | |
| ctx.stroke(); | |
| ctx.fillStyle = "rgba(239,68,68,.95)"; | |
| ctx.beginPath(); | |
| ctx.arc(x, y, 2.5, 0, Math.PI * 2); | |
| ctx.fill(); | |
| ctx.restore(); | |
| } | |
| // ========= Engage tab: tracking + dynamic dwell ========= | |
| btnEngage.addEventListener("click", async () => { | |
| if (!state.videoLoaded) { log("No video loaded for Engage.", "w"); return; } | |
| if (!state.hf.processedUrl) { log("Processed video not ready yet. Wait for completion.", "w"); return; } | |
| if (!state.hasReasoned) { log("Run Reason first to initialize baseline dwell and aimpoints.", "w"); return; } | |
| if (videoEngage.paused) { | |
| try { | |
| await videoEngage.play(); | |
| } catch (err) { | |
| log("Video play failed (browser policy). Click inside the page then try Engage again.", "w"); | |
| return; | |
| } | |
| } | |
| state.tracker.running = true; | |
| state.tracker.beamOn = true; | |
| state.tracker.lastDetTime = 0; | |
| state.tracker.lastFrameTime = now(); | |
| state.tracker.frameCount = 0; | |
| engageNote.textContent = "Running"; | |
| chipBeam.textContent = "BEAM:ON"; | |
| log("Engage started: tracking enabled, dwell accumulation active.", "g"); | |
| // Initialize tracks: | |
| // - Prefer Tab 1 detections if available (same first-frame context) | |
| // - Otherwise, seed from the current video frame (actual detector output) | |
| if (!state.tracker.tracks.length) { | |
| if (state.detections && state.detections.length) { | |
| seedTracksFromTab1(); | |
| } else { | |
| const dets = await detectOnVideoFrame(); | |
| if (dets && dets.length) { | |
| seedTracksFromDetections(dets); | |
| log(`Seeded ${state.tracker.tracks.length} tracks from video-frame detections.`, "t"); | |
| } else { | |
| log("No detections available to seed tracks yet. Tracks will appear as detections arrive.", "w"); | |
| } | |
| } | |
| } | |
| resizeOverlays(); | |
| startLoop(); | |
| }); | |
| btnPause.addEventListener("click", () => { | |
| if (!state.videoLoaded) return; | |
| if (!videoEngage.paused) { | |
| videoEngage.pause(); | |
| log("Video paused.", "t"); | |
| } | |
| state.tracker.beamOn = false; | |
| chipBeam.textContent = "BEAM:OFF"; | |
| }); | |
| btnReset.addEventListener("click", async () => { | |
| if (!state.videoLoaded) return; | |
| videoEngage.pause(); | |
| await seekTo(videoEngage, 0); | |
| state.tracker.tracks.forEach(t => { t.dwellAccum = 0; t.killed = false; t.state = "TRACK"; }); | |
| state.tracker.selectedTrackId = null; | |
| state.tracker.beamOn = false; | |
| state.tracker.running = false; | |
| dwellBar.style.width = "0%"; | |
| dwellText.textContent = "—"; | |
| engageNote.textContent = "paused"; | |
| chipBeam.textContent = "BEAM:OFF"; | |
| log("Engage reset: video rewound, dwell cleared.", "w"); | |
| renderRadar(); | |
| renderTrackCards(); | |
| renderEngageOverlay(); | |
| }); | |
| // Toggle sidebar (radar + live tracks) for fullscreen video | |
| btnToggleSidebar.addEventListener("click", () => { | |
| const engageGrid = $(".engage-grid"); | |
| const isCollapsed = engageGrid.classList.contains("sidebar-collapsed"); | |
| if (isCollapsed) { | |
| engageGrid.classList.remove("sidebar-collapsed"); | |
| btnToggleSidebar.textContent = "◀ Hide Sidebar"; | |
| log("Sidebar expanded.", "t"); | |
| } else { | |
| engageGrid.classList.add("sidebar-collapsed"); | |
| btnToggleSidebar.textContent = "▶ Show Sidebar"; | |
| log("Sidebar collapsed - video fullscreen.", "t"); | |
| } | |
| }); | |
| function seedTracksFromTab1() { | |
| state.tracker.tracks = state.detections.map(d => { | |
| const t = { | |
| id: d.id, | |
| label: d.label, | |
| bbox: { ...d.bbox }, | |
| score: d.score, | |
| aimRel: { relx: d.aim.relx, rely: d.aim.rely, label: d.aim.label }, | |
| baseAreaFrac: d.baseAreaFrac || ((d.bbox.w * d.bbox.h) / (state.frame.w * state.frame.h)), | |
| baseRange_m: d.baseRange_m || +rangeBase.value, | |
| baseDwell_s: d.baseDwell_s || 4.0, | |
| reqP_kW: d.reqP_kW || 35, | |
| // Depth visualization (keep for depth view toggle) | |
| depth_rel: Number.isFinite(d.depth_rel) ? d.depth_rel : null, | |
| // GPT properties - the sole source of distance estimation | |
| gpt_distance_m: d.gpt_distance_m || null, | |
| gpt_direction: d.gpt_direction || null, | |
| gpt_description: d.gpt_description || null, | |
| // Track state | |
| lastSeen: now(), | |
| vx: 0, vy: 0, | |
| dwellAccum: 0, | |
| killed: false, | |
| state: "TRACK", // TRACK -> SETTLE -> FIRE -> ASSESS -> KILL | |
| assessT: 0 | |
| }; | |
| return t; | |
| }); | |
| state.tracker.nextId = state.detections.length + 1; | |
| log(`Seeded ${state.tracker.tracks.length} tracks from Tab 1 detections.`, "t"); | |
| } | |
| function seedTracksFromDetections(dets) { | |
| const w = videoEngage.videoWidth || state.frame.w; | |
| const h = videoEngage.videoHeight || state.frame.h; | |
| state.tracker.tracks = dets.slice(0, 12).map((d, i) => { | |
| const id = `T${String(i + 1).padStart(2, "0")}`; | |
| const ap = defaultAimpoint(d.class); | |
| const bb = normBBox(d.bbox, w, h); | |
| return { | |
| id, | |
| label: d.class, | |
| bbox: { ...bb }, | |
| score: d.score, | |
| aimRel: { relx: ap.relx, rely: ap.rely, label: ap.label }, | |
| baseAreaFrac: (bb.w * bb.h) / (w * h), | |
| baseRange_m: +rangeBase.value, | |
| baseDwell_s: 5.0, | |
| reqP_kW: 40, | |
| // Depth visualization only, GPT handles distance | |
| depth_rel: Number.isFinite(d.depth_rel) ? d.depth_rel : null, | |
| // GPT properties | |
| gpt_distance_m: d.gpt_distance_m || null, | |
| gpt_direction: d.gpt_direction || null, | |
| gpt_description: d.gpt_description || null, | |
| // Track state | |
| lastSeen: now(), | |
| vx: 0, vy: 0, | |
| dwellAccum: 0, | |
| killed: false, | |
| state: "TRACK", | |
| assessT: 0 | |
| }; | |
| }); | |
| state.tracker.nextId = state.tracker.tracks.length + 1; | |
| } | |
| function iou(a, b) { | |
| const ax2 = a.x + a.w, ay2 = a.y + a.h; | |
| const bx2 = b.x + b.w, by2 = b.y + b.h; | |
| const ix1 = Math.max(a.x, b.x), iy1 = Math.max(a.y, b.y); | |
| const ix2 = Math.min(ax2, bx2), iy2 = Math.min(ay2, by2); | |
| const iw = Math.max(0, ix2 - ix1), ih = Math.max(0, iy2 - iy1); | |
| const inter = iw * ih; | |
| const ua = a.w * a.h + b.w * b.h - inter; | |
| return ua <= 0 ? 0 : inter / ua; | |
| } | |
| async function externalTrack(videoEl) { | |
| // Hook for user tracking: should return predictions similar to detector output | |
| if (typeof window.__HEL_TRACK__ === "function") { | |
| return await window.__HEL_TRACK__(videoEl); | |
| } | |
| throw new Error("External tracker hook is not installed."); | |
| } | |
| async function detectOnVideoFrame() { | |
| const mode = state.detector.mode; | |
| if (mode === "external") { | |
| try { return await externalTrack(videoEngage); } | |
| catch (e) { log(`External tracker failed: ${e.message}`, "w"); return []; } | |
| } | |
| if (state.detector.cocoBlocked) { | |
| return []; | |
| } | |
| if (isHfMode(mode)) { | |
| // In HF mode, we DO NOT fall back to local COCO for tracking. | |
| // Why? Because local COCO will overwrite high-quality labels (e.g. "drone") | |
| // with generic ones ("airplane"), breaking the user experience. | |
| // Instead, we rely on the tracker's predictive coasting (predictTracks) | |
| // or wait for sparse updates if we implement backend streaming later. | |
| // For now, return empty to prevent label pollution. | |
| // The tracker will maintain existing tracks via coasting/prediction | |
| // until they time out or we get a new "Reason" update. | |
| return []; | |
| } | |
| if (mode === "coco") { | |
| await ensureCocoDetector(); | |
| if (state.detector.model) { | |
| try { | |
| let preds = await state.detector.model.detect(videoEngage); | |
| return preds | |
| .filter(p => p.score >= 0.45) | |
| .slice(0, 18) | |
| .map(p => ({ bbox: p.bbox, class: p.class, score: p.score })); | |
| } catch (err) { | |
| if (err && err.name === "SecurityError") { | |
| state.detector.cocoBlocked = true; | |
| log("Local COCO tracking blocked by tainted video. Use External tracker or RAW feed.", "w"); | |
| return []; | |
| } | |
| throw err; | |
| } | |
| } | |
| return []; | |
| } | |
| return []; | |
| } | |
| function matchAndUpdateTracks(dets, dtSec) { | |
| // Convert detections to bbox in video coordinates | |
| const w = videoEngage.videoWidth || state.frame.w; | |
| const h = videoEngage.videoHeight || state.frame.h; | |
| const detObjs = dets.map(d => ({ | |
| bbox: normBBox(d.bbox, w, h), | |
| label: d.class, | |
| score: d.score, | |
| depth_rel: Number.isFinite(d.depth_rel) ? d.depth_rel : null // Visualization only | |
| })); | |
| // mark all tracks as unmatched | |
| const tracks = state.tracker.tracks; | |
| const used = new Set(); | |
| for (const tr of tracks) { | |
| let best = null; | |
| let bestI = 0.0; | |
| let bestIdx = -1; | |
| for (let i = 0; i < detObjs.length; i++) { | |
| if (used.has(i)) continue; | |
| const IoU = iou(tr.bbox, detObjs[i].bbox); | |
| if (IoU > bestI) { | |
| bestI = IoU; | |
| best = detObjs[i]; | |
| bestIdx = i; | |
| } | |
| } | |
| // Strict matching threshold | |
| if (best && bestI >= 0.25) { | |
| used.add(bestIdx); | |
| // Velocity with Exponential Moving Average (EMA) for smoothing | |
| const cx0 = tr.bbox.x + tr.bbox.w * 0.5; | |
| const cy0 = tr.bbox.y + tr.bbox.h * 0.5; | |
| const cx1 = best.bbox.x + best.bbox.w * 0.5; | |
| const cy1 = best.bbox.y + best.bbox.h * 0.5; | |
| const rawVx = (cx1 - cx0) / Math.max(1e-3, dtSec); | |
| const rawVy = (cy1 - cy0) / Math.max(1e-3, dtSec); | |
| // Alpha of 0.3 means 30% new value, 70% history | |
| tr.vx = tr.vx * 0.7 + rawVx * 0.3; | |
| tr.vy = tr.vy * 0.7 + rawVy * 0.3; | |
| // smooth bbox update | |
| tr.bbox.x = lerp(tr.bbox.x, best.bbox.x, 0.7); | |
| tr.bbox.y = lerp(tr.bbox.y, best.bbox.y, 0.7); | |
| tr.bbox.w = lerp(tr.bbox.w, best.bbox.w, 0.6); | |
| tr.bbox.h = lerp(tr.bbox.h, best.bbox.h, 0.6); | |
| // Logic: Only update label if the new detection is highly confident | |
| // AND the current track doesn't have a "premium" label (like 'drone'). | |
| // This prevents COCO's 'airplane' from overwriting a custom 'drone' label. | |
| const protectedLabels = ["drone", "uav", "missile"]; | |
| const isProtected = protectedLabels.some(l => (tr.label || "").toLowerCase().includes(l)); | |
| if (!isProtected || (best.label && protectedLabels.some(l => best.label.toLowerCase().includes(l)))) { | |
| tr.label = best.label || tr.label; | |
| } | |
| tr.score = best.score || tr.score; | |
| // Update depth visualization (not for distance) | |
| if (Number.isFinite(best.depth_rel)) { | |
| tr.depth_rel = best.depth_rel; | |
| } | |
| tr.lastSeen = now(); | |
| } else { | |
| // Decay velocity if not seen to prevent "coasting" into infinity | |
| tr.vx *= 0.9; | |
| tr.vy *= 0.9; | |
| } | |
| } | |
| // add unmatched detections as new tracks (optional) | |
| // Limit total tracks to prevent memory leaks/ui lag | |
| if (tracks.length < 50) { | |
| for (let i = 0; i < detObjs.length; i++) { | |
| if (used.has(i)) continue; | |
| // create new track only if big enough (avoid clutter) | |
| const a = detObjs[i].bbox.w * detObjs[i].bbox.h; | |
| if (a < (w * h) * 0.0025) continue; | |
| const newId = `T${String(state.tracker.nextId++).padStart(2, "0")}`; | |
| const ap = defaultAimpoint(detObjs[i].label); | |
| tracks.push({ | |
| id: newId, | |
| label: detObjs[i].label, | |
| bbox: { ...detObjs[i].bbox }, | |
| score: detObjs[i].score, | |
| aimRel: { relx: ap.relx, rely: ap.rely, label: ap.label }, | |
| baseAreaFrac: (detObjs[i].bbox.w * detObjs[i].bbox.h) / (w * h), | |
| baseRange_m: +rangeBase.value, | |
| baseDwell_s: 5.5, | |
| reqP_kW: 42, | |
| // Depth visualization only, GPT handles distance | |
| depth_rel: detObjs[i].depth_rel, | |
| // GPT properties (will be populated by updateTracksWithGPT) | |
| gpt_distance_m: null, | |
| gpt_direction: null, | |
| gpt_description: null, | |
| // Track state | |
| lastSeen: now(), | |
| vx: 0, vy: 0, | |
| dwellAccum: 0, | |
| killed: false, | |
| state: "TRACK", | |
| assessT: 0 | |
| }); | |
| log(`New track created: ${newId} (${detObjs[i].label})`, "t"); | |
| } | |
| } | |
| // prune old tracks if they disappear | |
| const tNow = now(); | |
| state.tracker.tracks = tracks.filter(tr => (tNow - tr.lastSeen) < 1500 || tr.killed); | |
| } | |
| function predictTracks(dtSec) { | |
| const w = videoEngage.videoWidth || state.frame.w; | |
| const h = videoEngage.videoHeight || state.frame.h; | |
| state.tracker.tracks.forEach(tr => { | |
| if (tr.killed) return; | |
| tr.bbox.x = clamp(tr.bbox.x + tr.vx * dtSec * 0.12, 0, w - 1); | |
| tr.bbox.y = clamp(tr.bbox.y + tr.vy * dtSec * 0.12, 0, h - 1); | |
| }); | |
| } | |
| function hasValidDepth(item) { | |
| // Only used for depth VIEW toggle, not distance | |
| return item && Number.isFinite(item.depth_rel); | |
| } | |
| function getDisplayRange(item, fallbackRange) { | |
| // GPT is the ONLY source of distance | |
| if (item && item.gpt_distance_m) { | |
| return { range: item.gpt_distance_m, source: "GPT" }; | |
| } | |
| return { range: fallbackRange, source: "area" }; | |
| } | |
| function getDisplayRel(item) { | |
| if (item && Number.isFinite(item.depth_rel)) { | |
| return item.depth_rel; | |
| } | |
| return null; | |
| } | |
| function rangeFromArea(track) { | |
| // [DELETED] "calculated" depth removed per user request. | |
| // Fallback only if GPT hasn't returned yet. | |
| return 1000; | |
| } | |
| async function updateTracksWithGPT() { | |
| const activeTracks = state.tracker.tracks.filter(t => !t.killed); | |
| if (!activeTracks.length) return; | |
| // Take a snapshot of the current video frame | |
| const c = document.createElement("canvas"); | |
| c.width = videoEngage.videoWidth || state.frame.w; | |
| c.height = videoEngage.videoHeight || state.frame.h; | |
| const ctx = c.getContext("2d"); | |
| ctx.drawImage(videoEngage, 0, 0, c.width, c.height); | |
| const blob = await new Promise(r => c.toBlob(r, 'image/jpeg', 0.85)); | |
| // Prepare tracks payload | |
| // Backend expects: [{"id":..., "bbox":[x,y,w,h], "label":...}] | |
| const tracksPayload = activeTracks.map(t => ({ | |
| id: t.id, | |
| bbox: [Math.round(t.bbox.x), Math.round(t.bbox.y), Math.round(t.bbox.w), Math.round(t.bbox.h)], | |
| label: t.label | |
| })); | |
| const fd = new FormData(); | |
| fd.append("frame", blob, "scan.jpg"); | |
| fd.append("tracks", JSON.stringify(tracksPayload)); | |
| log(`Requesting GPT reasoning for ${activeTracks.length} tracks...`, "t"); | |
| try { | |
| const res = await fetch(`${state.hf.baseUrl}/reason/track`, { | |
| method: "POST", | |
| body: fd | |
| }); | |
| if (res.ok) { | |
| const data = await res.json(); // { "T01": { "distance_m": 450, "description": "..." }, ... } | |
| let updatedCount = 0; | |
| // Merge into state | |
| Object.keys(data).forEach(tid => { | |
| const info = data[tid]; | |
| const track = state.tracker.tracks.find(t => t.id === tid); | |
| if (track) { | |
| if (info.distance_m) track.gpt_distance_m = info.distance_m; | |
| if (info.description) track.gpt_description = info.description; | |
| updatedCount++; | |
| } | |
| }); | |
| log(`GPT updated ${updatedCount} tracks.`, "g"); | |
| renderTrackCards(); // Force refresh UI | |
| } else { | |
| console.warn("GPT reason failed", res.status); | |
| } | |
| } catch (e) { | |
| console.error("GPT reason error", e); | |
| } | |
| } | |
| function getTrackDisplayRange(track) { | |
| // GPT is the ONLY source of distance estimation | |
| if (track.gpt_distance_m) { | |
| return { range: track.gpt_distance_m, source: "GPT" }; | |
| } | |
| // No fallback - return null if GPT hasn't provided distance yet | |
| return { range: null, source: null }; | |
| } | |
| function dwellFromRange(track, range_m) { | |
| const mp = maxPowerAtTarget(range_m); | |
| const baseReq = track.reqP_kW || 40; | |
| const baseD = track.baseDwell_s || 5; | |
| // Use Tab1 baseline as reference; scale by range and power ratio. | |
| const dwell = requiredDwell(range_m, baseReq, mp.Ptar, baseD); | |
| return dwell; | |
| } | |
| function chooseTargetAuto() { | |
| // choose highest (maxP-reqP)/dwell among visible tracks | |
| let best = null; | |
| state.tracker.tracks.forEach(tr => { | |
| if (tr.killed) return; | |
| const range = getTrackDisplayRange(tr).range || 1000; | |
| const mp = maxPowerAtTarget(range); | |
| const margin = mp.Ptar - (tr.reqP_kW || 0); | |
| const dwell = dwellFromRange(tr, range); | |
| const score = margin / Math.max(0.8, dwell); | |
| if (!best || score > best.score) best = { id: tr.id, score, margin, dwell }; | |
| }); | |
| return best ? best.id : null; | |
| } | |
| function updateEngagementState(dtSec) { | |
| const assessS = +assessWindow.value; | |
| let targetId = state.tracker.selectedTrackId; | |
| if (policyMode.value === "auto") { | |
| targetId = chooseTargetAuto(); | |
| state.tracker.selectedTrackId = targetId; | |
| } | |
| if (!state.tracker.beamOn || !targetId) return; | |
| const tr = state.tracker.tracks.find(t => t.id === targetId); | |
| if (!tr || tr.killed) return; | |
| if (!tr || tr.killed) return; | |
| const disp = getTrackDisplayRange(tr); | |
| const range = disp.range || 1000; | |
| const reqD = dwellFromRange(tr, range); | |
| // state machine: TRACK -> SETTLE -> FIRE -> ASSESS -> KILL | |
| if (tr.state === "TRACK") { | |
| tr.state = "SETTLE"; | |
| tr.assessT = 0; | |
| } | |
| if (tr.state === "SETTLE") { | |
| tr.assessT += dtSec; | |
| if (tr.assessT >= 0.25) { tr.state = "FIRE"; tr.assessT = 0; } | |
| } else if (tr.state === "FIRE") { | |
| tr.dwellAccum += dtSec; | |
| if (tr.dwellAccum >= reqD) { | |
| tr.state = "ASSESS"; | |
| tr.assessT = 0; | |
| } | |
| } else if (tr.state === "ASSESS") { | |
| tr.assessT += dtSec; | |
| if (tr.assessT >= assessS) { | |
| if (ENABLE_KILL) { | |
| tr.killed = true; | |
| tr.state = "KILL"; | |
| state.tracker.beamOn = false; // stop beam after kill to make it dramatic | |
| chipBeam.textContent = "BEAM:OFF"; | |
| log(`Target ${tr.id} assessed neutralized.`, "g"); | |
| } else { | |
| tr.state = "ASSESS"; | |
| tr.assessT = 0; | |
| } | |
| } | |
| } | |
| // update dwell bar UI | |
| const pct = clamp(tr.dwellAccum / Math.max(0.001, reqD), 0, 1) * 100; | |
| const displayRange = getTrackDisplayRange(tr); | |
| const rangeLabel = Number.isFinite(displayRange.range) | |
| ? `${Math.round(displayRange.range)}m (${displayRange.source})` | |
| : "—"; | |
| dwellBar.style.width = `${pct.toFixed(0)}%`; | |
| dwellText.textContent = `${tr.id} · ${tr.state} · ${(tr.dwellAccum).toFixed(1)}s / ${reqD.toFixed(1)}s · R=${rangeLabel}`; | |
| } | |
| function pickTrackAt(x, y) { | |
| const hits = state.tracker.tracks | |
| .filter(t => !t.killed) | |
| .filter(t => x >= t.bbox.x && x <= t.bbox.x + t.bbox.w && y >= t.bbox.y && y <= t.bbox.y + t.bbox.h) | |
| .sort((a, b) => (a.bbox.w * a.bbox.h) - (b.bbox.w * b.bbox.h)); | |
| return hits[0] || null; | |
| } | |
| // Main loop | |
| let rafId = null; | |
| async function startLoop() { | |
| if (rafId) cancelAnimationFrame(rafId); | |
| async function tick() { | |
| rafId = requestAnimationFrame(tick); | |
| tickAgentCursor(); | |
| if (!state.tracker.running) return; | |
| const tNow = now(); | |
| const dtSec = (tNow - state.tracker.lastFrameTime) / 1000; | |
| state.tracker.lastFrameTime = tNow; | |
| // detection schedule | |
| let hz = +detHz.value; | |
| // Throttle to ~5Hz (approx every 12 frames) for HF fallback mode to save resources | |
| if (isHfMode(state.detector.mode)) { | |
| hz = Math.min(hz, 5); | |
| } | |
| const period = 1000 / Math.max(1, hz); | |
| if ((tNow - state.tracker.lastDetTime) >= period) { | |
| state.tracker.lastDetTime = tNow; | |
| const dets = await detectOnVideoFrame(); | |
| matchAndUpdateTracks(dets, Math.max(0.016, dtSec)); | |
| } else { | |
| predictTracks(Math.max(0.016, dtSec)); | |
| } | |
| updateEngagementState(Math.max(0.016, dtSec)); | |
| renderEngageOverlay(); | |
| renderRadar(); | |
| renderTrackCards(); | |
| chipTracks.textContent = `TRACKS:${state.tracker.tracks.filter(t => !t.killed).length}`; | |
| liveStamp.textContent = new Date().toLocaleTimeString(); | |
| // GPT Update Loop | |
| state.tracker.frameCount++; | |
| if (state.tracker.frameCount % REASON_INTERVAL === 0) { | |
| updateTracksWithGPT().catch(e => console.error(e)); | |
| } | |
| } | |
| tick(); | |
| } | |
| function renderEngageOverlay() { | |
| if (engageOverlay.style.display === "none") { | |
| return; | |
| } | |
| const ctx = engageOverlay.getContext("2d"); | |
| const w = engageOverlay.width, h = engageOverlay.height; | |
| ctx.clearRect(0, 0, w, h); | |
| if (!state.videoLoaded) return; | |
| // Draw dark background instead of video frame (only processed overlays shown) | |
| ctx.fillStyle = "#0b1026"; | |
| ctx.fillRect(0, 0, w, h); | |
| // draw track boxes and labels | |
| const tNow = now(); | |
| state.tracker.tracks.forEach(tr => { | |
| const isSel = tr.id === state.tracker.selectedTrackId; | |
| const killed = tr.killed; | |
| const b = tr.bbox; | |
| const ax = b.x + b.w * tr.aimRel.relx; | |
| const ay = b.y + b.h * tr.aimRel.rely; | |
| const displayRange = getTrackDisplayRange(tr); | |
| const range = displayRange.range || 1000; | |
| const reqD = dwellFromRange(tr, range); | |
| const mp = maxPowerAtTarget(range); | |
| const margin = mp.Ptar - (tr.reqP_kW || 0); | |
| const color = killed ? "rgba(34,197,94,.55)" : (isSel ? "rgba(34,211,238,.95)" : "rgba(124,58,237,.65)"); | |
| // box | |
| ctx.lineWidth = isSel ? 3 : 2; | |
| ctx.strokeStyle = color; | |
| ctx.shadowBlur = isSel ? 16 : 10; | |
| ctx.shadowColor = color; | |
| roundRect(ctx, b.x, b.y, b.w, b.h, 10, false, true); | |
| ctx.shadowBlur = 0; | |
| // aimpoint | |
| if (!killed) { | |
| drawAimpoint(ctx, ax, ay, isSel); | |
| } else { | |
| // killed marker (no text overlay) | |
| } | |
| // dwell ring | |
| if (!killed) { | |
| const pct = clamp(tr.dwellAccum / Math.max(0.001, reqD), 0, 1); | |
| ctx.beginPath(); | |
| ctx.strokeStyle = "rgba(34,197,94,.85)"; | |
| ctx.lineWidth = 3; | |
| ctx.arc(ax, ay, 16, -Math.PI / 2, -Math.PI / 2 + Math.PI * 2 * pct); | |
| ctx.stroke(); | |
| } | |
| // no text overlay on engage view | |
| // engagement strip indicator near bbox bottom | |
| const st = tr.state || "TRACK"; | |
| const stColor = st === "FIRE" ? "rgba(239,68,68,.92)" : (st === "ASSESS" ? "rgba(245,158,11,.92)" : (st === "KILL" ? "rgba(34,197,94,.92)" : "rgba(34,211,238,.92)")); | |
| ctx.fillStyle = stColor; | |
| ctx.globalAlpha = 0.85; | |
| ctx.fillRect(b.x, b.y + b.h + 4, clamp(b.w * 0.55, 70, b.w), 5); | |
| ctx.globalAlpha = 1; | |
| // no state text overlay on engage view | |
| // beam line to selected aimpoint | |
| if (state.tracker.beamOn && isSel && !killed) { | |
| ctx.strokeStyle = "rgba(239,68,68,.45)"; | |
| ctx.lineWidth = 2; | |
| ctx.setLineDash([6, 6]); | |
| ctx.beginPath(); | |
| ctx.moveTo(w * 0.5, h * 0.98); | |
| ctx.lineTo(ax, ay); | |
| ctx.stroke(); | |
| ctx.setLineDash([]); | |
| } | |
| }); | |
| } | |
| function renderTrackCards() { | |
| trackList.innerHTML = ""; | |
| const alive = state.tracker.tracks.filter(t => !t.killed); | |
| if (!alive.length) { | |
| const div = document.createElement("div"); | |
| div.className = "mini"; | |
| div.style.padding = "8px"; | |
| div.textContent = "No live tracks. Run Engage or adjust detector."; | |
| trackList.appendChild(div); | |
| return; | |
| } | |
| alive.forEach(tr => { | |
| const displayRange = getTrackDisplayRange(tr); | |
| const range = displayRange.range || 1000; | |
| const rangeTxt = Number.isFinite(displayRange.range) | |
| ? `${Math.round(displayRange.range)}m (${displayRange.source})` | |
| : "—"; | |
| const relVal = getDisplayRel(tr); | |
| const relTxt = relVal != null ? relVal.toFixed(2) : "—"; | |
| const reqD = dwellFromRange(tr, range); | |
| const mp = maxPowerAtTarget(range); | |
| const margin = mp.Ptar - (tr.reqP_kW || 0); | |
| const pk = pkillFromMargin(margin, tr.dwellAccum, reqD); | |
| const div = document.createElement("div"); | |
| div.className = "obj" + (tr.id === state.tracker.selectedTrackId ? " active" : ""); | |
| div.innerHTML = ` | |
| <div class="top"> | |
| <div> | |
| <div class="id">${tr.id}</div> | |
| <div class="cls">${escapeHtml(tr.label)}</div> | |
| </div> | |
| <div class="badge"><span class="dot" style="background:${margin >= 0 ? "var(--good)" : "var(--bad)"};box-shadow:none"></span><span>${margin >= 0 ? "+" : ""}${margin.toFixed(1)}kW</span></div> | |
| </div> | |
| <div class="meta"> | |
| <span class="badge">R:${rangeTxt}</span> | |
| <span class="badge">REL:${relTxt}</span> | |
| <span class="badge">DW:${reqD.toFixed(1)}s</span> | |
| <span class="badge">Pk:${Math.round(pk * 100)}%</span> | |
| <span class="badge">AP:${escapeHtml(tr.aimRel.label)}</span> | |
| <span class="badge">STATE:${tr.state}</span> | |
| </div> | |
| `; | |
| div.addEventListener("click", () => { | |
| if (policyMode.value !== "manual") return; | |
| state.tracker.selectedTrackId = tr.id; | |
| state.tracker.beamOn = true; | |
| chipBeam.textContent = "BEAM:ON"; | |
| renderTrackCards(); | |
| }); | |
| trackList.appendChild(div); | |
| }); | |
| } | |
| // ========= Tab 1 Radar ========= | |
| function renderFrameRadar() { | |
| if (!frameRadar) return; | |
| const ctx = frameRadar.getContext("2d"); | |
| const rect = frameRadar.getBoundingClientRect(); | |
| const dpr = devicePixelRatio || 1; | |
| // Resize if needed | |
| const targetW = Math.max(1, Math.floor(rect.width * dpr)); | |
| const targetH = Math.max(1, Math.floor(rect.height * dpr)); | |
| if (frameRadar.width !== targetW || frameRadar.height !== targetH) { | |
| frameRadar.width = targetW; | |
| frameRadar.height = targetH; | |
| } | |
| const w = frameRadar.width, h = frameRadar.height; | |
| const cx = w * 0.5, cy = h * 0.5; | |
| const R = Math.min(w, h) * 0.45; // Max radius | |
| ctx.clearRect(0, 0, w, h); | |
| // --- 1. Background (Tactical Grid) --- | |
| ctx.fillStyle = "#0a0f22"; // Matches --panel2 | |
| ctx.fillRect(0, 0, w, h); | |
| // Grid Rings (Concentric) | |
| ctx.strokeStyle = "rgba(34, 211, 238, 0.1)"; // Cyan faint | |
| ctx.lineWidth = 1; | |
| for (let i = 1; i <= 4; i++) { | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, R * (i / 4), 0, Math.PI * 2); | |
| ctx.stroke(); | |
| } | |
| // Grid Spokes (Cross + Diagonals) | |
| ctx.beginPath(); | |
| // Cardinals | |
| ctx.moveTo(cx - R, cy); ctx.lineTo(cx + R, cy); | |
| ctx.moveTo(cx, cy - R); ctx.lineTo(cx, cy + R); | |
| // Diagonals (optional, maybe too busy? let's stick to cleaning cardinals) | |
| ctx.stroke(); | |
| // --- 2. Sweep Animation --- | |
| const t = now() / 1500; // Slower, more deliberate sweep | |
| const ang = (t * (Math.PI * 2)) % (Math.PI * 2); | |
| const grad = ctx.createConicGradient(ang + Math.PI / 2, cx, cy); // Offset to start at 0 | |
| grad.addColorStop(0, "transparent"); | |
| grad.addColorStop(0.1, "transparent"); | |
| grad.addColorStop(0.8, "rgba(34, 211, 238, 0.0)"); | |
| grad.addColorStop(1, "rgba(34, 211, 238, 0.15)"); // Trailing edge | |
| ctx.fillStyle = grad; | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, R, 0, Math.PI * 2); | |
| ctx.fill(); | |
| // Scan Line | |
| ctx.strokeStyle = "rgba(34, 211, 238, 0.6)"; | |
| ctx.lineWidth = 1.5; | |
| ctx.beginPath(); | |
| ctx.moveTo(cx, cy); | |
| ctx.lineTo(cx + Math.cos(ang) * R, cy + Math.sin(ang) * R); | |
| ctx.stroke(); | |
| // --- 3. Ownship (Center) --- | |
| ctx.fillStyle = "#22d3ee"; // Cyan | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, 3, 0, Math.PI * 2); | |
| ctx.fill(); | |
| // Ring around ownship | |
| ctx.strokeStyle = "rgba(34, 211, 238, 0.5)"; | |
| ctx.lineWidth = 1; | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, 6, 0, Math.PI * 2); | |
| ctx.stroke(); | |
| // --- 4. Render Detections --- | |
| if (state.detections) { | |
| state.detections.forEach(det => { | |
| // Determine Range (pixels) | |
| // Map logical range (meters) to graphical range (0..R) | |
| let rangeVal = 3000; // default max scale in meters | |
| let dist = 1000; // default unknown | |
| if (det.gpt_distance_m) { | |
| dist = det.gpt_distance_m; | |
| } else { | |
| // No GPT yet - show at far distance (unknown) | |
| dist = 3000; | |
| } | |
| // Log scale or Linear? Linear is easier for users to map. | |
| // Let's use linear: 0m -> 0px, 1500m -> R | |
| const maxRangeM = 1500; | |
| const rPx = (clamp(dist, 0, maxRangeM) / maxRangeM) * R; | |
| // Determine Bearing | |
| // box center relative to frame center | |
| const bx = det.bbox.x + det.bbox.w * 0.5; | |
| const fw = state.frame.w || 1280; | |
| const tx = (bx / fw) - 0.5; // -0.5 (left) to 0.5 (right) | |
| // Map x-axis (-0.5 to 0.5) to angle. | |
| // FOV assumption: ~60 degrees? | |
| const fovRad = (60 * Math.PI) / 180; | |
| // Actually canvas 0 is Right (0 rad). | |
| // We want Up (-PI/2) to be center. | |
| // So center (tx=0) should be -PI/2. | |
| // Left (tx=-0.5) => -PI/2 - fov/2. | |
| // Right (tx=0.5) => -PI/2 + fov/2. | |
| const angle = (-Math.PI / 2) + (tx * fovRad); | |
| // --- Draw Blip --- | |
| const px = cx + Math.cos(angle) * rPx; | |
| const py = cy + Math.sin(angle) * rPx; | |
| const isSelected = (state.selectedId === det.id); | |
| // Glow for selected | |
| if (isSelected) { | |
| ctx.shadowBlur = 10; | |
| ctx.shadowColor = "#f59e0b"; // Amber glow | |
| } else { | |
| ctx.shadowBlur = 0; | |
| } | |
| // Blip Color | |
| // If it has GPT data, maybe special color? Or just distinct per class? | |
| let col = "#7c3aed"; // Default violet | |
| if (det.label === 'person') col = "#ef4444"; // Red | |
| if (det.label === 'airplane') col = "#f59e0b"; // Amber | |
| if (isSelected) col = "#ffffff"; // White for selected | |
| ctx.fillStyle = col; | |
| ctx.beginPath(); | |
| ctx.arc(px, py, isSelected ? 5 : 3.5, 0, Math.PI * 2); | |
| ctx.fill(); | |
| // Blip Label (if selected or hovered - just show ID) | |
| // Just Show ID for all? Might clutter. Show for selected. | |
| if (isSelected) { | |
| ctx.fillStyle = "#fff"; | |
| ctx.font = "bold 11px monospace"; | |
| ctx.fillText(det.id, px + 8, py + 3); | |
| // Connected Line to center | |
| ctx.strokeStyle = "rgba(255, 255, 255, 0.4)"; | |
| ctx.lineWidth = 1; | |
| ctx.setLineDash([2, 2]); // Optional: dashed line for "targeting" feel | |
| ctx.beginPath(); | |
| ctx.moveTo(cx, cy); | |
| ctx.lineTo(px, py); | |
| ctx.stroke(); | |
| ctx.setLineDash([]); // Reset | |
| // Distance Label on Line | |
| const mx = (cx + px) * 0.5; | |
| const my = (cy + py) * 0.5; | |
| const distStr = `${Math.round(dist)}m`; | |
| ctx.font = "10px monospace"; | |
| const tm = ctx.measureText(distStr); | |
| const tw = tm.width; | |
| const th = 10; | |
| // Label Background | |
| ctx.fillStyle = "rgba(10, 15, 34, 0.85)"; | |
| ctx.fillRect(mx - tw / 2 - 3, my - th / 2 - 2, tw + 6, th + 4); | |
| // Label Text | |
| ctx.fillStyle = "#22d3ee"; // Cyan | |
| ctx.textAlign = "center"; | |
| ctx.textBaseline = "middle"; | |
| ctx.fillText(distStr, mx, my); | |
| // Reset text alignment | |
| ctx.textAlign = "start"; | |
| ctx.textBaseline = "alphabetic"; | |
| } | |
| ctx.shadowBlur = 0; // reset | |
| }); | |
| } | |
| requestAnimationFrame(renderFrameRadar); | |
| } | |
| // Start loop immediately | |
| requestAnimationFrame(renderFrameRadar); | |
| // ========= Radar rendering (Tab 2) - Aligned with Tab 1 Scale/FOV ========= | |
| function renderRadar() { | |
| const ctx = radarCanvas.getContext("2d"); | |
| const rect = radarCanvas.getBoundingClientRect(); | |
| const dpr = devicePixelRatio || 1; | |
| const targetW = Math.max(1, Math.floor(rect.width * dpr)); | |
| const targetH = Math.max(1, Math.floor(rect.height * dpr)); | |
| if (radarCanvas.width !== targetW || radarCanvas.height !== targetH) { | |
| radarCanvas.width = targetW; | |
| radarCanvas.height = targetH; | |
| } | |
| const w = radarCanvas.width, h = radarCanvas.height; | |
| ctx.clearRect(0, 0, w, h); | |
| // Background (Matches Tab 1) | |
| ctx.fillStyle = "#0a0f22"; | |
| ctx.fillRect(0, 0, w, h); | |
| const cx = w * 0.5, cy = h * 0.5; | |
| const R = Math.min(w, h) * 0.45; // Match Tab 1 Radius factor | |
| // Rings (Matches Tab 1 style) | |
| ctx.strokeStyle = "rgba(34, 211, 238, 0.1)"; | |
| ctx.lineWidth = 1; | |
| for (let i = 1; i <= 4; i++) { | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, R * (i / 4), 0, Math.PI * 2); | |
| ctx.stroke(); | |
| } | |
| // Cross | |
| ctx.beginPath(); | |
| ctx.moveTo(cx - R, cy); ctx.lineTo(cx + R, cy); | |
| ctx.moveTo(cx, cy - R); ctx.lineTo(cx, cy + R); | |
| ctx.stroke(); | |
| // Sweep Animation | |
| const t = now() / 1500; // Match Tab 1 speed (slower) | |
| const ang = (t * (Math.PI * 2)) % (Math.PI * 2); | |
| // Gradient Sweep | |
| const grad = ctx.createConicGradient(ang + Math.PI / 2, cx, cy); | |
| grad.addColorStop(0, "transparent"); | |
| grad.addColorStop(0.1, "transparent"); | |
| grad.addColorStop(0.8, "rgba(34, 211, 238, 0.0)"); | |
| grad.addColorStop(1, "rgba(34, 211, 238, 0.15)"); | |
| ctx.fillStyle = grad; | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, R, 0, Math.PI * 2); | |
| ctx.fill(); | |
| // Scan Line | |
| ctx.strokeStyle = "rgba(34, 211, 238, 0.6)"; | |
| ctx.lineWidth = 1.5; | |
| ctx.beginPath(); | |
| ctx.moveTo(cx, cy); | |
| ctx.lineTo(cx + Math.cos(ang) * R, cy + Math.sin(ang) * R); | |
| ctx.stroke(); | |
| // Ownship (Center) | |
| ctx.fillStyle = "#22d3ee"; | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, 3, 0, Math.PI * 2); | |
| ctx.fill(); | |
| ctx.strokeStyle = "rgba(34, 211, 238, 0.5)"; | |
| ctx.lineWidth = 1; | |
| ctx.beginPath(); | |
| ctx.arc(cx, cy, 6, 0, Math.PI * 2); | |
| ctx.stroke(); | |
| // Render Tracks (Tab 2 Source, Tab 1 Logic) | |
| const tracks = state.tracker.tracks; | |
| tracks.forEach(tr => { | |
| // Range Logic (Matches Tab 1) | |
| const displayRange = getTrackDisplayRange(tr); | |
| let dist = 3000; | |
| if (Number.isFinite(displayRange.range)) dist = displayRange.range; | |
| // else remain at default "unknown" distance (far out) until GPT returns | |
| // Scale: 0 -> 1500m (Matches Tab 1) | |
| const maxRangeM = 1500; | |
| const rPx = (clamp(dist, 0, maxRangeM) / maxRangeM) * R; | |
| // Bearing Logic (Matches Tab 1 FOV=60) | |
| // We need normalized X center (-0.5 to 0.5) | |
| // tracks store pixel coordinates on current frame scale | |
| const vw = videoEngage.videoWidth || state.frame.w || 1280; | |
| const bx = tr.bbox.x + tr.bbox.w * 0.5; | |
| const tx = (bx / vw) - 0.5; // -0.5 (left) to 0.5 (right) | |
| const fovRad = (60 * Math.PI) / 180; | |
| const angle = (-Math.PI / 2) + (tx * fovRad); | |
| const px = cx + Math.cos(angle) * rPx; | |
| const py = cy + Math.sin(angle) * rPx; | |
| // Styling based on State | |
| const isSelected = (state.tracker.selectedTrackId === tr.id); | |
| const killed = tr.killed; | |
| const col = killed ? "rgba(148,163,184,.65)" : | |
| (tr.state === "FIRE" ? "rgba(239,68,68,.9)" : | |
| (tr.state === "ASSESS" ? "rgba(245,158,11,.9)" : | |
| (isSelected ? "#f59e0b" : "rgba(34, 211, 238, 0.9)"))); // Cyan default | |
| if (isSelected) { | |
| ctx.shadowBlur = 10; | |
| ctx.shadowColor = col; | |
| } else { | |
| ctx.shadowBlur = 0; | |
| } | |
| ctx.fillStyle = col; | |
| ctx.beginPath(); | |
| ctx.arc(px, py, 5, 0, Math.PI * 2); | |
| ctx.fill(); | |
| // Label | |
| if (!killed && (isSelected || tracks.length < 5)) { | |
| ctx.fillStyle = "rgba(255,255,255,.75)"; | |
| ctx.font = "11px " + getComputedStyle(document.body).fontFamily; | |
| ctx.fillText(tr.id, px + 8, py + 4); | |
| } | |
| }); | |
| // Legend | |
| ctx.shadowBlur = 0; | |
| ctx.fillStyle = "rgba(255,255,255,.55)"; | |
| ctx.font = "11px " + getComputedStyle(document.body).fontFamily; | |
| ctx.fillText("LIVE TRACKING: 60° FOV, 1500m SCALE", 10, 18); | |
| } | |
| // ========= Resizing overlays to match video viewports ========= | |
| function resizeOverlays() { | |
| // Engage overlay matches displayed video size | |
| const rect = videoEngage.getBoundingClientRect(); | |
| if (rect.width > 0 && rect.height > 0) { | |
| const w = Math.round(rect.width * devicePixelRatio); | |
| const h = Math.round(rect.height * devicePixelRatio); | |
| engageOverlay.width = w; | |
| engageOverlay.height = h; | |
| engageOverlay.style.width = rect.width + "px"; | |
| engageOverlay.style.height = rect.height + "px"; | |
| // scale track bboxes if video intrinsic differs from overlay (we keep bboxes in intrinsic coords) | |
| // rendering code assumes overlay coords = intrinsic coords. Therefore we remap by setting ctx transform each render. | |
| // Instead, we store bboxes in intrinsic and draw by transform. | |
| // We will implement by setting ctx.setTransform in renderEngageOverlay. | |
| } | |
| // Frame overlay uses intrinsic, but we still keep canvas scaled by CSS. No action needed. | |
| } | |
| window.addEventListener("resize", resizeOverlays); | |
| // Adjust engage overlay transform for drawing in intrinsic coordinates | |
| const _renderEngageOverlay = renderEngageOverlay; | |
| renderEngageOverlay = function () { | |
| const ctx = engageOverlay.getContext("2d"); | |
| const rect = videoEngage.getBoundingClientRect(); | |
| const vw = videoEngage.videoWidth || state.frame.w; | |
| const vh = videoEngage.videoHeight || state.frame.h; | |
| const pxW = engageOverlay.width; | |
| const pxH = engageOverlay.height; | |
| ctx.setTransform(1, 0, 0, 1, 0, 0); | |
| ctx.clearRect(0, 0, pxW, pxH); | |
| if (!rect.width || !rect.height) return; | |
| const sx = pxW / vw; | |
| const sy = pxH / vh; | |
| ctx.setTransform(sx, 0, 0, sy, 0, 0); | |
| _renderEngageOverlay(); | |
| ctx.setTransform(1, 0, 0, 1, 0, 0); | |
| }; | |
| // Also adjust click picking because overlay is scaled | |
| engageOverlay.addEventListener("click", (ev) => { | |
| if (!state.videoLoaded) return; | |
| if (policyMode.value !== "manual") return; | |
| const rect = engageOverlay.getBoundingClientRect(); | |
| const vw = videoEngage.videoWidth || state.frame.w; | |
| const vh = videoEngage.videoHeight || state.frame.h; | |
| const pxW = engageOverlay.width; | |
| const pxH = engageOverlay.height; | |
| const xPx = (ev.clientX - rect.left) * devicePixelRatio; | |
| const yPx = (ev.clientY - rect.top) * devicePixelRatio; | |
| // inverse transform | |
| const x = xPx * (vw / pxW); | |
| const y = yPx * (vh / pxH); | |
| const tr = pickTrackAt(x, y); | |
| if (tr) { | |
| state.tracker.selectedTrackId = tr.id; | |
| state.tracker.beamOn = true; | |
| chipBeam.textContent = "BEAM:ON"; | |
| log(`Manual target selected: ${tr.id}`, "t"); | |
| renderTrackCards(); | |
| } | |
| }, { passive: true }); | |
| // ========= Trade-space rendering ========= | |
| function refreshTradeTargets() { | |
| const sel = tradeTarget.value; | |
| tradeTarget.innerHTML = ""; | |
| const ids = state.detections.map(d => d.id); | |
| if (!ids.length) { | |
| const opt = document.createElement("option"); | |
| opt.value = ""; | |
| opt.textContent = "No targets"; | |
| tradeTarget.appendChild(opt); | |
| return; | |
| } | |
| ids.forEach(id => { | |
| const opt = document.createElement("option"); | |
| opt.value = id; | |
| opt.textContent = id; | |
| tradeTarget.appendChild(opt); | |
| }); | |
| if (sel && ids.includes(sel)) tradeTarget.value = sel; | |
| else tradeTarget.value = state.selectedId || ids[0]; | |
| } | |
| btnReplot.addEventListener("click", renderTrade); | |
| tradeTarget.addEventListener("change", renderTrade); | |
| btnSnap.addEventListener("click", () => { | |
| if (!state.detections.length) return; | |
| const id = tradeTarget.value; | |
| const d = state.detections.find(x => x.id === id) || state.detections[0]; | |
| const snap = { | |
| target: id, | |
| helPower_kW: +helPower.value, | |
| vis_km: +atmVis.value, | |
| cn2: +atmCn2.value, | |
| ao: +aoQ.value, | |
| baseRange_m: d.baseRange_m, | |
| reqP_kW: d.reqP_kW, | |
| baseDwell_s: d.baseDwell_s | |
| }; | |
| log("SNAPSHOT: " + JSON.stringify(snap), "t"); | |
| }); | |
| function renderTrade() { | |
| const ctx = tradeCanvas.getContext("2d"); | |
| const W = tradeCanvas.width, H = tradeCanvas.height; | |
| ctx.clearRect(0, 0, W, H); | |
| // background | |
| ctx.fillStyle = "rgba(0,0,0,.32)"; | |
| ctx.fillRect(0, 0, W, H); | |
| if (!state.detections.length) { | |
| ctx.fillStyle = "rgba(255,255,255,.75)"; | |
| ctx.font = "14px " + getComputedStyle(document.body).fontFamily; | |
| ctx.fillText("Run Reason to populate trade-space curves.", 18, 34); | |
| return; | |
| } | |
| const id = tradeTarget.value || state.selectedId || state.detections[0].id; | |
| const d = state.detections.find(x => x.id === id) || state.detections[0]; | |
| const r0 = Math.max(50, +rMin.value || 200); | |
| const r1 = Math.max(r0 + 50, +rMax.value || 6000); | |
| // margins | |
| const padL = 64, padR = 18, padT = 18, padB = 52; | |
| const plotW = W - padL - padR; | |
| const plotH = H - padT - padB; | |
| // compute sweep | |
| const N = 120; | |
| const xs = []; | |
| let maxY = 0; | |
| let minY = Infinity; | |
| for (let i = 0; i <= N; i++) { | |
| const r = r0 + (r1 - r0) * (i / N); | |
| const mp = maxPowerAtTarget(r).Ptar; | |
| const reqP = d.reqP_kW || 40; | |
| const reqD = requiredDwell(r, reqP, mp, d.baseDwell_s || 5); | |
| xs.push({ r, mp, reqP, reqD }); | |
| maxY = Math.max(maxY, mp, reqP); | |
| minY = Math.min(minY, mp, reqP); | |
| } | |
| maxY = Math.max(maxY, 20); | |
| minY = Math.max(0, minY - 10); | |
| // axes | |
| ctx.strokeStyle = "rgba(255,255,255,.14)"; | |
| ctx.lineWidth = 1; | |
| ctx.beginPath(); | |
| ctx.moveTo(padL, padT); | |
| ctx.lineTo(padL, padT + plotH); | |
| ctx.lineTo(padL + plotW, padT + plotH); | |
| ctx.stroke(); | |
| // grid lines | |
| ctx.strokeStyle = "rgba(255,255,255,.07)"; | |
| for (let i = 1; i <= 5; i++) { | |
| const y = padT + plotH * (i / 5); | |
| ctx.beginPath(); ctx.moveTo(padL, y); ctx.lineTo(padL + plotW, y); ctx.stroke(); | |
| } | |
| for (let i = 1; i <= 6; i++) { | |
| const x = padL + plotW * (i / 6); | |
| ctx.beginPath(); ctx.moveTo(x, padT); ctx.lineTo(x, padT + plotH); ctx.stroke(); | |
| } | |
| // helpers | |
| const xMap = (r) => padL + (r - r0) / (r1 - r0) * plotW; | |
| const yMap = (p) => padT + (1 - (p - minY) / (maxY - minY)) * plotH; | |
| // curve: max power at target | |
| ctx.strokeStyle = "rgba(34,211,238,.95)"; | |
| ctx.lineWidth = 2.5; | |
| ctx.beginPath(); | |
| xs.forEach((pt, i) => { | |
| const x = xMap(pt.r); | |
| const y = yMap(pt.mp); | |
| if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y); | |
| }); | |
| ctx.stroke(); | |
| // curve: required power | |
| ctx.strokeStyle = "rgba(239,68,68,.90)"; | |
| ctx.lineWidth = 2.5; | |
| ctx.beginPath(); | |
| xs.forEach((pt, i) => { | |
| const x = xMap(pt.r); | |
| const y = yMap(pt.reqP); | |
| if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y); | |
| }); | |
| ctx.stroke(); | |
| // annotate margin zones | |
| ctx.fillStyle = "rgba(34,197,94,.08)"; | |
| ctx.beginPath(); | |
| xs.forEach((pt, i) => { | |
| const x = xMap(pt.r); | |
| const y = yMap(Math.max(pt.reqP, pt.mp)); | |
| if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y); | |
| }); | |
| for (let i = xs.length - 1; i >= 0; i--) { | |
| const x = xMap(xs[i].r); | |
| const y = yMap(Math.min(xs[i].reqP, xs[i].mp)); | |
| ctx.lineTo(x, y); | |
| } | |
| ctx.closePath(); | |
| ctx.fill(); | |
| // second axis for dwell (scaled) | |
| const dwellMax = Math.max(...xs.map(p => p.reqD)); | |
| const yMapD = (dwell) => padT + (1 - (dwell / Math.max(1e-6, dwellMax))) * plotH; | |
| ctx.strokeStyle = "rgba(124,58,237,.85)"; | |
| ctx.lineWidth = 2.2; | |
| ctx.beginPath(); | |
| xs.forEach((pt, i) => { | |
| const x = xMap(pt.r); | |
| const y = yMapD(pt.reqD); | |
| if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y); | |
| }); | |
| ctx.stroke(); | |
| // optional pkill band | |
| if (showPk.value === "on") { | |
| ctx.fillStyle = "rgba(245,158,11,.08)"; | |
| ctx.beginPath(); | |
| xs.forEach((pt, i) => { | |
| const x = xMap(pt.r); | |
| const mp = pt.mp; | |
| const margin = mp - pt.reqP; | |
| const pk = pkillFromMargin(margin, d.baseDwell_s || 5, pt.reqD); | |
| const y = padT + plotH * (1 - pk); | |
| if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y); | |
| }); | |
| ctx.lineTo(padL + plotW, padT + plotH); | |
| ctx.lineTo(padL, padT + plotH); | |
| ctx.closePath(); | |
| ctx.fill(); | |
| } | |
| // labels | |
| ctx.fillStyle = "rgba(255,255,255,.84)"; | |
| ctx.font = "bold 14px " + getComputedStyle(document.body).fontFamily; | |
| ctx.fillText(`Target: ${id} (${d.label})`, padL, 16); | |
| ctx.fillStyle = "rgba(34,211,238,.95)"; | |
| ctx.fillText("Max P@Target (kW)", padL + 10, padT + plotH + 30); | |
| ctx.fillStyle = "rgba(239,68,68,.92)"; | |
| ctx.fillText("Required P@Target (kW)", padL + 190, padT + plotH + 30); | |
| ctx.fillStyle = "rgba(124,58,237,.90)"; | |
| ctx.fillText(`Required Dwell (s, scaled)`, padL + 420, padT + plotH + 30); | |
| ctx.fillStyle = "rgba(255,255,255,.55)"; | |
| ctx.font = "11px " + getComputedStyle(document.body).fontFamily; | |
| ctx.fillText(`Range (m)`, padL + plotW - 64, padT + plotH + 46); | |
| // axis ticks | |
| ctx.fillStyle = "rgba(255,255,255,.55)"; | |
| ctx.font = "11px " + getComputedStyle(document.body).fontFamily; | |
| for (let i = 0; i <= 5; i++) { | |
| const p = minY + (maxY - minY) * (1 - i / 5); | |
| const y = padT + plotH * (i / 5); | |
| ctx.fillText(p.toFixed(0), 12, y + 4); | |
| } | |
| for (let i = 0; i <= 6; i++) { | |
| const r = r0 + (r1 - r0) * (i / 6); | |
| const x = padL + plotW * (i / 6); | |
| ctx.fillText(r.toFixed(0), x - 14, padT + plotH + 18); | |
| } | |
| // marker at baseline range | |
| const baseR = d.baseRange_m || +rangeBase.value; | |
| const xb = xMap(clamp(baseR, r0, r1)); | |
| ctx.strokeStyle = "rgba(255,255,255,.28)"; | |
| ctx.setLineDash([6, 6]); | |
| ctx.beginPath(); | |
| ctx.moveTo(xb, padT); | |
| ctx.lineTo(xb, padT + plotH); | |
| ctx.stroke(); | |
| ctx.setLineDash([]); | |
| } | |
| // ========= Helpers: keep drawing when idle ========= | |
| function idleLoop() { | |
| requestAnimationFrame(idleLoop); | |
| tickAgentCursor(); | |
| } | |
| idleLoop(); | |
| // ========= Init ========= | |
| unloadVideo(); | |
| log("Console initialized. Upload a video to begin.", "t"); | |
| })(); | |