Spaces:
Running on Zero
Running on Zero
Add ZeroGPU support for HF Spaces deployment
Browse files- Add spaces>=0.30.0 to requirements
- Add conditional @spaces.GPU decorator that works on both local and HF Spaces
- Decorate analyze_streaming (180s) and transcribe_audio (60s) for GPU access
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
- requirements.txt +1 -0
- ui/callbacks.py +20 -0
requirements.txt
CHANGED
|
@@ -9,3 +9,4 @@ numpy>=1.24.0
|
|
| 9 |
scipy>=1.10.0
|
| 10 |
json-repair>=0.30.0
|
| 11 |
llm-output-parser>=0.3.0
|
|
|
|
|
|
| 9 |
scipy>=1.10.0
|
| 10 |
json-repair>=0.30.0
|
| 11 |
llm-output-parser>=0.3.0
|
| 12 |
+
spaces>=0.30.0
|
ui/callbacks.py
CHANGED
|
@@ -9,6 +9,22 @@ from PIL import Image
|
|
| 9 |
from agents.graph import stream_pipeline
|
| 10 |
from config import DEMO_CASES_DIR, ENABLE_MEDASR
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
logger = logging.getLogger(__name__)
|
| 13 |
|
| 14 |
# Agent display info
|
|
@@ -88,10 +104,12 @@ DEMO_CASES = {
|
|
| 88 |
}
|
| 89 |
|
| 90 |
|
|
|
|
| 91 |
def analyze_streaming(image: Image.Image | None, diagnosis: str, context: str, modality: str):
|
| 92 |
"""
|
| 93 |
Generator: run pipeline and yield single HTML output after each agent step.
|
| 94 |
Each agent's output appears inline below its progress header.
|
|
|
|
| 95 |
"""
|
| 96 |
if image is None:
|
| 97 |
yield '<div class="pipeline-error">Please upload a medical image.</div>'
|
|
@@ -371,12 +389,14 @@ def _format_consultant(state: dict) -> str:
|
|
| 371 |
return "".join(parts)
|
| 372 |
|
| 373 |
|
|
|
|
| 374 |
def transcribe_audio(audio, existing_context: str = ""):
|
| 375 |
"""
|
| 376 |
Transcribe audio input using MedASR.
|
| 377 |
|
| 378 |
Generator that yields (context_text, status_html) for streaming UI feedback.
|
| 379 |
Appends transcribed text to any existing context.
|
|
|
|
| 380 |
"""
|
| 381 |
def _status_html(cls: str, text: str) -> str:
|
| 382 |
return f'<div class="voice-status {cls}">{text}</div>'
|
|
|
|
| 9 |
from agents.graph import stream_pipeline
|
| 10 |
from config import DEMO_CASES_DIR, ENABLE_MEDASR
|
| 11 |
|
| 12 |
+
# ZeroGPU support: use @spaces.GPU when running on HF Spaces, no-op locally
|
| 13 |
+
try:
|
| 14 |
+
import spaces
|
| 15 |
+
SPACES_AVAILABLE = True
|
| 16 |
+
except ImportError:
|
| 17 |
+
SPACES_AVAILABLE = False
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def gpu_decorator(duration: int = 180):
|
| 21 |
+
"""Decorator that uses @spaces.GPU on HF Spaces, no-op locally."""
|
| 22 |
+
def decorator(fn):
|
| 23 |
+
if SPACES_AVAILABLE:
|
| 24 |
+
return spaces.GPU(duration=duration)(fn)
|
| 25 |
+
return fn
|
| 26 |
+
return decorator
|
| 27 |
+
|
| 28 |
logger = logging.getLogger(__name__)
|
| 29 |
|
| 30 |
# Agent display info
|
|
|
|
| 104 |
}
|
| 105 |
|
| 106 |
|
| 107 |
+
@gpu_decorator(duration=180)
|
| 108 |
def analyze_streaming(image: Image.Image | None, diagnosis: str, context: str, modality: str):
|
| 109 |
"""
|
| 110 |
Generator: run pipeline and yield single HTML output after each agent step.
|
| 111 |
Each agent's output appears inline below its progress header.
|
| 112 |
+
Uses @spaces.GPU on HF Spaces for ZeroGPU support.
|
| 113 |
"""
|
| 114 |
if image is None:
|
| 115 |
yield '<div class="pipeline-error">Please upload a medical image.</div>'
|
|
|
|
| 389 |
return "".join(parts)
|
| 390 |
|
| 391 |
|
| 392 |
+
@gpu_decorator(duration=60)
|
| 393 |
def transcribe_audio(audio, existing_context: str = ""):
|
| 394 |
"""
|
| 395 |
Transcribe audio input using MedASR.
|
| 396 |
|
| 397 |
Generator that yields (context_text, status_html) for streaming UI feedback.
|
| 398 |
Appends transcribed text to any existing context.
|
| 399 |
+
Uses @spaces.GPU on HF Spaces for ZeroGPU support.
|
| 400 |
"""
|
| 401 |
def _status_html(cls: str, text: str) -> str:
|
| 402 |
return f'<div class="voice-status {cls}">{text}</div>'
|