Spaces:
Paused
Paused
Zhen Ye
commited on
Commit
·
ecbbe4e
1
Parent(s):
4977275
Jobs: forward cached GPT results, reduce stream buffer to 60, wire up first_frame_gpt_results
Browse files- app.py +2 -1
- jobs/background.py +1 -0
- jobs/models.py +1 -0
- jobs/streaming.py +1 -1
app.py
CHANGED
|
@@ -405,7 +405,7 @@ async def detect_async_endpoint(
|
|
| 405 |
active_depth = depth_estimator if enable_depth else None
|
| 406 |
|
| 407 |
try:
|
| 408 |
-
processed_frame, detections, depth_map = process_first_frame(
|
| 409 |
str(input_path),
|
| 410 |
query_list,
|
| 411 |
mode=mode,
|
|
@@ -455,6 +455,7 @@ async def detect_async_endpoint(
|
|
| 455 |
enable_gpt=enable_gpt,
|
| 456 |
mission_spec=mission_spec,
|
| 457 |
mission_mode=mission_mode,
|
|
|
|
| 458 |
)
|
| 459 |
get_job_storage().create(job)
|
| 460 |
asyncio.create_task(process_video_async(job_id))
|
|
|
|
| 405 |
active_depth = depth_estimator if enable_depth else None
|
| 406 |
|
| 407 |
try:
|
| 408 |
+
processed_frame, detections, depth_map, first_frame_gpt_results = process_first_frame(
|
| 409 |
str(input_path),
|
| 410 |
query_list,
|
| 411 |
mode=mode,
|
|
|
|
| 455 |
enable_gpt=enable_gpt,
|
| 456 |
mission_spec=mission_spec,
|
| 457 |
mission_mode=mission_mode,
|
| 458 |
+
first_frame_gpt_results=first_frame_gpt_results,
|
| 459 |
)
|
| 460 |
get_job_storage().create(job)
|
| 461 |
asyncio.create_task(process_video_async(job_id))
|
jobs/background.py
CHANGED
|
@@ -53,6 +53,7 @@ async def process_video_async(job_id: str) -> None:
|
|
| 53 |
job.enable_gpt,
|
| 54 |
stream_queue,
|
| 55 |
job.mission_spec, # Forward mission spec for relevance gating
|
|
|
|
| 56 |
)
|
| 57 |
detection_path, detections_list = result_pkg
|
| 58 |
|
|
|
|
| 53 |
job.enable_gpt,
|
| 54 |
stream_queue,
|
| 55 |
job.mission_spec, # Forward mission spec for relevance gating
|
| 56 |
+
job.first_frame_gpt_results, # Avoid duplicate GPT call on frame 0
|
| 57 |
)
|
| 58 |
detection_path, detections_list = result_pkg
|
| 59 |
|
jobs/models.py
CHANGED
|
@@ -37,3 +37,4 @@ class JobInfo:
|
|
| 37 |
# Mission specification (None = LEGACY mode)
|
| 38 |
mission_spec: Optional[Any] = None # utils.schemas.MissionSpecification
|
| 39 |
mission_mode: str = "LEGACY" # "MISSION" or "LEGACY"
|
|
|
|
|
|
| 37 |
# Mission specification (None = LEGACY mode)
|
| 38 |
mission_spec: Optional[Any] = None # utils.schemas.MissionSpecification
|
| 39 |
mission_mode: str = "LEGACY" # "MISSION" or "LEGACY"
|
| 40 |
+
first_frame_gpt_results: Optional[Dict[str, Any]] = None # Cached GPT results from process_first_frame
|
jobs/streaming.py
CHANGED
|
@@ -13,7 +13,7 @@ def create_stream(job_id: str) -> queue.Queue:
|
|
| 13 |
# standard Queue, thread-safe
|
| 14 |
# maxsize to prevent memory explosion if consumer is slow
|
| 15 |
# Buffer increased to 300 (approx 10s at 30fps) for smooth streaming
|
| 16 |
-
q = queue.Queue(maxsize=
|
| 17 |
_STREAMS[job_id] = q
|
| 18 |
return q
|
| 19 |
|
|
|
|
| 13 |
# standard Queue, thread-safe
|
| 14 |
# maxsize to prevent memory explosion if consumer is slow
|
| 15 |
# Buffer increased to 300 (approx 10s at 30fps) for smooth streaming
|
| 16 |
+
q = queue.Queue(maxsize=60)
|
| 17 |
_STREAMS[job_id] = q
|
| 18 |
return q
|
| 19 |
|