Spaces:
Paused
Paused
fix: add missing /detect/verdicts endpoint and clean up imports
Browse filesThe frontend was calling GET /detect/verdicts/{job_id} but no route
existed — verdicts were computed but never reached the UI. Also
consolidates inline storage imports to top-level and removes stale
design-note comments.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
app.py
CHANGED
|
@@ -53,6 +53,9 @@ from jobs.storage import (
|
|
| 53 |
get_job_directory,
|
| 54 |
get_job_storage,
|
| 55 |
get_output_video_path,
|
|
|
|
|
|
|
|
|
|
| 56 |
)
|
| 57 |
from models.segmenters.model_loader import get_segmenter_detector
|
| 58 |
from pydantic import BaseModel
|
|
@@ -506,8 +509,6 @@ async def detect_status(job_id: str):
|
|
| 506 |
@app.get("/detect/tracks/{job_id}/summary")
|
| 507 |
async def get_track_summary_endpoint(job_id: str):
|
| 508 |
"""Return per-frame detection counts for timeline heatmap."""
|
| 509 |
-
from jobs.storage import get_track_summary, get_job_storage
|
| 510 |
-
import cv2
|
| 511 |
|
| 512 |
job = get_job_storage().get(job_id)
|
| 513 |
if not job:
|
|
@@ -549,17 +550,19 @@ async def get_frame_tracks(job_id: str, frame_idx: int):
|
|
| 549 |
# Let's use a global cache in memory for active jobs?
|
| 550 |
# See inference.py: 'all_detections_map' is local to that function.
|
| 551 |
|
| 552 |
-
# BETTER APPROACH for this demo:
|
| 553 |
-
# Use a simple shared dictionary in jobs/storage.py or app.py used by inference.
|
| 554 |
-
# We will pass a callback or shared dict to run_inference.
|
| 555 |
-
|
| 556 |
-
# For now, let's just return 404 if not implemented, but I need to implement it.
|
| 557 |
-
# I'll add a cache in app.py for active job tracks?
|
| 558 |
-
from jobs.storage import get_track_data
|
| 559 |
data = get_track_data(job_id, frame_idx)
|
| 560 |
return data or []
|
| 561 |
|
| 562 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 563 |
@app.delete("/detect/job/{job_id}")
|
| 564 |
async def cancel_job(job_id: str):
|
| 565 |
"""Cancel a running job."""
|
|
|
|
| 53 |
get_job_directory,
|
| 54 |
get_job_storage,
|
| 55 |
get_output_video_path,
|
| 56 |
+
get_track_data,
|
| 57 |
+
get_track_summary,
|
| 58 |
+
get_verdicts,
|
| 59 |
)
|
| 60 |
from models.segmenters.model_loader import get_segmenter_detector
|
| 61 |
from pydantic import BaseModel
|
|
|
|
| 509 |
@app.get("/detect/tracks/{job_id}/summary")
|
| 510 |
async def get_track_summary_endpoint(job_id: str):
|
| 511 |
"""Return per-frame detection counts for timeline heatmap."""
|
|
|
|
|
|
|
| 512 |
|
| 513 |
job = get_job_storage().get(job_id)
|
| 514 |
if not job:
|
|
|
|
| 550 |
# Let's use a global cache in memory for active jobs?
|
| 551 |
# See inference.py: 'all_detections_map' is local to that function.
|
| 552 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 553 |
data = get_track_data(job_id, frame_idx)
|
| 554 |
return data or []
|
| 555 |
|
| 556 |
|
| 557 |
+
@app.get("/detect/verdicts/{job_id}")
|
| 558 |
+
async def get_verdicts_endpoint(job_id: str):
|
| 559 |
+
"""Return all assessment verdicts for a job, keyed by track_id."""
|
| 560 |
+
job = get_job_storage().get(job_id)
|
| 561 |
+
if not job:
|
| 562 |
+
raise HTTPException(status_code=404, detail="Job not found or expired.")
|
| 563 |
+
return get_verdicts(job_id)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
@app.delete("/detect/job/{job_id}")
|
| 567 |
async def cancel_job(job_id: str):
|
| 568 |
"""Cancel a running job."""
|