Spaces:
Running
Running
Update src/ai_processor.py
Browse files- src/ai_processor.py +3 -3
src/ai_processor.py
CHANGED
|
@@ -29,7 +29,7 @@ def _log_kv(prefix: str, kv: Dict):
|
|
| 29 |
# --- Spaces GPU decorator (REQUIRED) ---
|
| 30 |
from spaces import GPU as _SPACES_GPU
|
| 31 |
|
| 32 |
-
@_SPACES_GPU(enable_queue=
|
| 33 |
def smartheal_gpu_stub(ping: int = 0) -> str:
|
| 34 |
return "ready"
|
| 35 |
|
|
@@ -152,7 +152,7 @@ def _vlm_infer_gpu(messages, model_id: str, max_new_tokens: int, token: Optional
|
|
| 152 |
pipe = pipeline(
|
| 153 |
task="image-text-to-text",
|
| 154 |
model=model_id,
|
| 155 |
-
device_map="
|
| 156 |
token=token,
|
| 157 |
trust_remote_code=True,
|
| 158 |
model_kwargs={"low_cpu_mem_usage": True},
|
|
@@ -217,7 +217,7 @@ def load_yolo_model():
|
|
| 217 |
|
| 218 |
def load_segmentation_model():
|
| 219 |
load_model = _import_tf_loader()
|
| 220 |
-
return load_model(SEG_MODEL_PATH, compile=False)
|
| 221 |
|
| 222 |
def load_classification_pipeline():
|
| 223 |
pipe = _import_hf_cls()
|
|
|
|
| 29 |
# --- Spaces GPU decorator (REQUIRED) ---
|
| 30 |
from spaces import GPU as _SPACES_GPU
|
| 31 |
|
| 32 |
+
@_SPACES_GPU(enable_queue=True)
|
| 33 |
def smartheal_gpu_stub(ping: int = 0) -> str:
|
| 34 |
return "ready"
|
| 35 |
|
|
|
|
| 152 |
pipe = pipeline(
|
| 153 |
task="image-text-to-text",
|
| 154 |
model=model_id,
|
| 155 |
+
device_map="cpu", # CUDA init happens here, safely in GPU worker
|
| 156 |
token=token,
|
| 157 |
trust_remote_code=True,
|
| 158 |
model_kwargs={"low_cpu_mem_usage": True},
|
|
|
|
| 217 |
|
| 218 |
def load_segmentation_model():
|
| 219 |
load_model = _import_tf_loader()
|
| 220 |
+
return load_model(SEG_MODEL_PATH, compile=False, custom_objects={'InputLayer': tf.keras.layers.InputLayer})
|
| 221 |
|
| 222 |
def load_classification_pipeline():
|
| 223 |
pipe = _import_hf_cls()
|