Spaces:
Running
on
Zero
Running
on
Zero
URA is now 0.3.1 with torch 2.9
Browse files
app.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
import sys
|
| 6 |
-
import threading
|
| 7 |
from pathlib import Path
|
| 8 |
|
| 9 |
# Allow importing unreflectanything when run from gradio_space (e.g. HF Space with root dir)
|
|
@@ -67,29 +66,27 @@ def _get_sample_images():
|
|
| 67 |
# Single model instance; loaded in background at app start or on first inference.
|
| 68 |
_cached_ura_model = None
|
| 69 |
_cached_device = None
|
| 70 |
-
_model_load_lock = threading.Lock()
|
| 71 |
|
| 72 |
|
| 73 |
def _get_model(device: str):
|
| 74 |
"""Return the pretrained model, loading it once and reusing. Ensures weights exist (downloads if missing)."""
|
| 75 |
global _cached_ura_model, _cached_device
|
| 76 |
weights_path, config_path = _ensure_weights()
|
| 77 |
-
|
| 78 |
-
if _cached_ura_model is not None and _cached_device == device:
|
| 79 |
-
return _cached_ura_model
|
| 80 |
-
from unreflectanything import model
|
| 81 |
-
|
| 82 |
-
_cached_ura_model = model(
|
| 83 |
-
pretrained=True,
|
| 84 |
-
# weights_path=os.path.join(os.path.dirname(__file__), ".cache", "weights", "full_model_weights.pt"),
|
| 85 |
-
# config_path=os.path.join(os.path.dirname(__file__), ".cache", "configs", "pretrained_config.yaml"),
|
| 86 |
-
weights_path=weights_path,
|
| 87 |
-
config_path=config_path,
|
| 88 |
-
device=device,
|
| 89 |
-
verbose=False,
|
| 90 |
-
)
|
| 91 |
-
_cached_device = device
|
| 92 |
return _cached_ura_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
|
| 95 |
def build_ui():
|
|
@@ -97,7 +94,8 @@ def build_ui():
|
|
| 97 |
|
| 98 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 99 |
# Start loading the model in the background so it is ready (or nearly ready) by first use.
|
| 100 |
-
|
|
|
|
| 101 |
|
| 102 |
def run_inference(image: np.ndarray | None) -> np.ndarray | None:
|
| 103 |
"""Run reflection removal using the cached model. Returns RGB numpy [H,W,3] in 0–255 or None."""
|
|
@@ -211,7 +209,6 @@ def _launch_allowed_paths():
|
|
| 211 |
|
| 212 |
if __name__ == "__main__":
|
| 213 |
demo.launch(
|
| 214 |
-
share=True,
|
| 215 |
allowed_paths=_launch_allowed_paths(),
|
| 216 |
theme=gr.themes.Soft(primary_hue="orange", secondary_hue="blue"),
|
| 217 |
)
|
|
|
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
import sys
|
|
|
|
| 6 |
from pathlib import Path
|
| 7 |
|
| 8 |
# Allow importing unreflectanything when run from gradio_space (e.g. HF Space with root dir)
|
|
|
|
| 66 |
# Single model instance; loaded in background at app start or on first inference.
|
| 67 |
_cached_ura_model = None
|
| 68 |
_cached_device = None
|
|
|
|
| 69 |
|
| 70 |
|
| 71 |
def _get_model(device: str):
|
| 72 |
"""Return the pretrained model, loading it once and reusing. Ensures weights exist (downloads if missing)."""
|
| 73 |
global _cached_ura_model, _cached_device
|
| 74 |
weights_path, config_path = _ensure_weights()
|
| 75 |
+
if _cached_ura_model is not None and _cached_device == device:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
return _cached_ura_model
|
| 77 |
+
from unreflectanything import model
|
| 78 |
+
|
| 79 |
+
_cached_ura_model = model(
|
| 80 |
+
pretrained=True,
|
| 81 |
+
# weights_path=os.path.join(os.path.dirname(__file__), ".cache", "weights", "full_model_weights.pt"),
|
| 82 |
+
# config_path=os.path.join(os.path.dirname(__file__), ".cache", "configs", "pretrained_config.yaml"),
|
| 83 |
+
weights_path=weights_path,
|
| 84 |
+
config_path=config_path,
|
| 85 |
+
device=device,
|
| 86 |
+
verbose=False,
|
| 87 |
+
)
|
| 88 |
+
_cached_device = device
|
| 89 |
+
return _cached_ura_model
|
| 90 |
|
| 91 |
|
| 92 |
def build_ui():
|
|
|
|
| 94 |
|
| 95 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 96 |
# Start loading the model in the background so it is ready (or nearly ready) by first use.
|
| 97 |
+
print(f"Initializing model on {device}...")
|
| 98 |
+
_get_model(device)
|
| 99 |
|
| 100 |
def run_inference(image: np.ndarray | None) -> np.ndarray | None:
|
| 101 |
"""Run reflection removal using the cached model. Returns RGB numpy [H,W,3] in 0–255 or None."""
|
|
|
|
| 209 |
|
| 210 |
if __name__ == "__main__":
|
| 211 |
demo.launch(
|
|
|
|
| 212 |
allowed_paths=_launch_allowed_paths(),
|
| 213 |
theme=gr.themes.Soft(primary_hue="orange", secondary_hue="blue"),
|
| 214 |
)
|