Spaces:
Paused
Paused
Zhen Ye
commited on
Commit
·
2fb805f
1
Parent(s):
8094b21
chore(forensics): finalize verification and trace validation\n\n- Fix unreachable background reasoning in inference.py\n- Enable enabling GPT reasoning by default in frontend\n- Verify correct data mapping for 15 naval fields\n- Clean up legacy code paths and ensure backward compatibility
Browse files- frontend/index.html +1 -1
- inference.py +4 -7
frontend/index.html
CHANGED
|
@@ -97,7 +97,7 @@
|
|
| 97 |
<span>Enable Legacy Depth Map (Slow)</span>
|
| 98 |
</label>
|
| 99 |
<label class="checkbox-row" for="enableGPTToggle" style="margin-top: 4px;">
|
| 100 |
-
<input type="checkbox" id="enableGPTToggle">
|
| 101 |
<span style="color: var(--accent-light);">Enable GPT Reasoning</span>
|
| 102 |
</label>
|
| 103 |
<label class="checkbox-row" for="enableStreamToggle" style="margin-top: 4px;">
|
|
|
|
| 97 |
<span>Enable Legacy Depth Map (Slow)</span>
|
| 98 |
</label>
|
| 99 |
<label class="checkbox-row" for="enableGPTToggle" style="margin-top: 4px;">
|
| 100 |
+
<input type="checkbox" id="enableGPTToggle" checked>
|
| 101 |
<span style="color: var(--accent-light);">Enable GPT Reasoning</span>
|
| 102 |
</label>
|
| 103 |
<label class="checkbox-row" for="enableStreamToggle" style="margin-top: 4px;">
|
inference.py
CHANGED
|
@@ -758,12 +758,9 @@ def process_first_frame(
|
|
| 758 |
det["depth_rel"] = None
|
| 759 |
det["depth_valid"] = False
|
| 760 |
|
| 761 |
-
return processed, detections, depth_map
|
| 762 |
-
|
| 763 |
-
|
| 764 |
# 2. GPT-based Distance/Direction Estimation (Explicitly enabled)
|
| 765 |
if enable_gpt:
|
| 766 |
-
# We need to save the frame temporarily to pass to GPT (or refactor
|
| 767 |
# For now, write to temp file
|
| 768 |
try:
|
| 769 |
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp_img:
|
|
@@ -776,7 +773,7 @@ def process_first_frame(
|
|
| 776 |
# GPT returns { "T01": { "distance_m": ..., "direction": ... } }
|
| 777 |
# Detections are list of dicts. We assume T01 maps to index 0, T02 to index 1...
|
| 778 |
for i, det in enumerate(detections):
|
| 779 |
-
# ID format matches what we constructed in
|
| 780 |
obj_id = f"T{str(i+1).zfill(2)}"
|
| 781 |
if obj_id in gpt_results:
|
| 782 |
info = gpt_results[obj_id]
|
|
@@ -791,9 +788,9 @@ def process_first_frame(
|
|
| 791 |
det["gpt_raw"] = info
|
| 792 |
|
| 793 |
except Exception as e:
|
| 794 |
-
logging.error(f"GPT
|
| 795 |
|
| 796 |
-
return processed, detections
|
| 797 |
|
| 798 |
|
| 799 |
def run_inference(
|
|
|
|
| 758 |
det["depth_rel"] = None
|
| 759 |
det["depth_valid"] = False
|
| 760 |
|
|
|
|
|
|
|
|
|
|
| 761 |
# 2. GPT-based Distance/Direction Estimation (Explicitly enabled)
|
| 762 |
if enable_gpt:
|
| 763 |
+
# We need to save the frame temporarily to pass to GPT (or refactor gpt_reasoning to take buffer)
|
| 764 |
# For now, write to temp file
|
| 765 |
try:
|
| 766 |
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp_img:
|
|
|
|
| 773 |
# GPT returns { "T01": { "distance_m": ..., "direction": ... } }
|
| 774 |
# Detections are list of dicts. We assume T01 maps to index 0, T02 to index 1...
|
| 775 |
for i, det in enumerate(detections):
|
| 776 |
+
# ID format matches what we constructed in gpt_reasoning.py
|
| 777 |
obj_id = f"T{str(i+1).zfill(2)}"
|
| 778 |
if obj_id in gpt_results:
|
| 779 |
info = gpt_results[obj_id]
|
|
|
|
| 788 |
det["gpt_raw"] = info
|
| 789 |
|
| 790 |
except Exception as e:
|
| 791 |
+
logging.error(f"GPT Threat estimation failed: {e}")
|
| 792 |
|
| 793 |
+
return processed, detections, depth_map
|
| 794 |
|
| 795 |
|
| 796 |
def run_inference(
|