Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# %%writefile app.py
|
| 2 |
import os
|
| 3 |
import torch
|
| 4 |
from PIL import Image, ImageOps
|
|
@@ -10,20 +9,25 @@ import sys
|
|
| 10 |
import traceback
|
| 11 |
from datetime import datetime
|
| 12 |
|
| 13 |
-
APP_ROOT = "
|
| 14 |
OUTPUT_DIR = os.path.join(APP_ROOT, "outputs")
|
| 15 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 16 |
print(f"--- Output directory set to: {OUTPUT_DIR} ---")
|
|
|
|
| 17 |
GROUNDING_DINO_LOCAL_PATH = os.path.join(APP_ROOT, "groundingdino_local")
|
| 18 |
if os.path.exists(GROUNDING_DINO_LOCAL_PATH) and GROUNDING_DINO_LOCAL_PATH not in sys.path:
|
| 19 |
sys.path.insert(0, GROUNDING_DINO_LOCAL_PATH)
|
| 20 |
print(f"✅ Added vendorized GroundingDINO to PYTHONPATH: {GROUNDING_DINO_LOCAL_PATH}")
|
|
|
|
| 21 |
from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
|
| 22 |
from transformers import pipeline as hf_pipeline
|
| 23 |
try:
|
| 24 |
from groundingdino.util.inference import load_model as load_gdino_model, predict as predict_gdino
|
| 25 |
import groundingdino.datasets.transforms as T
|
| 26 |
-
except ImportError as e:
|
|
|
|
|
|
|
|
|
|
| 27 |
HF_USERNAME = "Nightfury16"
|
| 28 |
BASE_SD_MODEL = "runwayml/stable-diffusion-v1-5"
|
| 29 |
CONTROLNET_INPAINT_REPO = f"{HF_USERNAME}/virtual-staging-controlnet"
|
|
@@ -39,6 +43,7 @@ def box_cxcywh_to_xyxy(x: torch.Tensor, width: int, height: int) -> torch.Tensor
|
|
| 39 |
if x.nelement() == 0: return x
|
| 40 |
x_c, y_c, w, h = x.unbind(1); b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
|
| 41 |
b = torch.stack(b, dim=1); b[:, [0, 2]] *= width; b[:, [1, 3]] *= height; return b
|
|
|
|
| 42 |
def resize_and_pad(image: Image.Image, target_size: tuple[int, int], background_color: tuple[int, int, int] = (0, 0, 0)) -> tuple[Image.Image, tuple[int, int, int, int]]:
|
| 43 |
original_width, original_height = image.size; target_width, target_height = target_size
|
| 44 |
ratio_w, ratio_h = target_width / original_width, target_height / original_height
|
|
@@ -49,6 +54,7 @@ def resize_and_pad(image: Image.Image, target_size: tuple[int, int], background_
|
|
| 49 |
paste_x, paste_y = (target_width - new_width) // 2, (target_height - new_height) // 2
|
| 50 |
new_image.paste(image, (paste_x, paste_y)); crop_box = (paste_x, paste_y, paste_x + new_width, paste_y + new_height)
|
| 51 |
return new_image, crop_box
|
|
|
|
| 52 |
class SAMModel:
|
| 53 |
def __init__(self, device: str = 'cuda:0'): self.device, self.model = device, None
|
| 54 |
def load(self, model_path: str = SAM_CHECKPOINT):
|
|
@@ -61,6 +67,7 @@ class SAMModel:
|
|
| 61 |
final_mask = np.zeros((image.height, image.width), dtype=np.uint8)
|
| 62 |
for mask_data in results[0].masks.data: final_mask = np.maximum(final_mask, mask_data.cpu().numpy().astype(np.uint8) * 255)
|
| 63 |
return final_mask
|
|
|
|
| 64 |
class DinoSamGrounding:
|
| 65 |
def __init__(self, device: str = 'cuda:0'):
|
| 66 |
if predict_gdino is None: raise ImportError("GroundingDINO not accessible.")
|
|
@@ -89,7 +96,6 @@ try:
|
|
| 89 |
except Exception as e:
|
| 90 |
print(f"FATAL ERROR during model loading: {e}"); global_models["loading_error"] = str(e)
|
| 91 |
|
| 92 |
-
|
| 93 |
def run_virtual_staging(
|
| 94 |
input_image: Image.Image, prompt: str, negative_prompt: str, use_canny: bool, use_depth: bool, use_lora: bool, seed: int, progress=gr.Progress()
|
| 95 |
):
|
|
@@ -138,6 +144,7 @@ def run_virtual_staging(
|
|
| 138 |
error_message = traceback.format_exc(); print(f"!!! AN ERROR OCCURRED !!!\n{error_message}")
|
| 139 |
raise gr.Error(f"An error occurred: {e}")
|
| 140 |
|
|
|
|
| 141 |
with gr.Blocks(css="footer {display: none !important}") as demo:
|
| 142 |
gr.Markdown("# Virtual Staging AI")
|
| 143 |
gr.Markdown("All models are pre-loaded. Configure your generation and click 'Generate Staging'.")
|
|
@@ -171,4 +178,4 @@ with gr.Blocks(css="footer {display: none !important}") as demo:
|
|
| 171 |
inputs=[input_image, prompt, negative_prompt, use_canny, use_depth, use_lora, seed_input]
|
| 172 |
)
|
| 173 |
|
| 174 |
-
demo.queue().launch(
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import torch
|
| 3 |
from PIL import Image, ImageOps
|
|
|
|
| 9 |
import traceback
|
| 10 |
from datetime import datetime
|
| 11 |
|
| 12 |
+
APP_ROOT = "."
|
| 13 |
OUTPUT_DIR = os.path.join(APP_ROOT, "outputs")
|
| 14 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 15 |
print(f"--- Output directory set to: {OUTPUT_DIR} ---")
|
| 16 |
+
|
| 17 |
GROUNDING_DINO_LOCAL_PATH = os.path.join(APP_ROOT, "groundingdino_local")
|
| 18 |
if os.path.exists(GROUNDING_DINO_LOCAL_PATH) and GROUNDING_DINO_LOCAL_PATH not in sys.path:
|
| 19 |
sys.path.insert(0, GROUNDING_DINO_LOCAL_PATH)
|
| 20 |
print(f"✅ Added vendorized GroundingDINO to PYTHONPATH: {GROUNDING_DINO_LOCAL_PATH}")
|
| 21 |
+
|
| 22 |
from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
|
| 23 |
from transformers import pipeline as hf_pipeline
|
| 24 |
try:
|
| 25 |
from groundingdino.util.inference import load_model as load_gdino_model, predict as predict_gdino
|
| 26 |
import groundingdino.datasets.transforms as T
|
| 27 |
+
except ImportError as e:
|
| 28 |
+
print("Could not import GroundingDINO. Make sure the 'groundingdino_local' directory is in your repository.")
|
| 29 |
+
raise e
|
| 30 |
+
|
| 31 |
HF_USERNAME = "Nightfury16"
|
| 32 |
BASE_SD_MODEL = "runwayml/stable-diffusion-v1-5"
|
| 33 |
CONTROLNET_INPAINT_REPO = f"{HF_USERNAME}/virtual-staging-controlnet"
|
|
|
|
| 43 |
if x.nelement() == 0: return x
|
| 44 |
x_c, y_c, w, h = x.unbind(1); b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
|
| 45 |
b = torch.stack(b, dim=1); b[:, [0, 2]] *= width; b[:, [1, 3]] *= height; return b
|
| 46 |
+
|
| 47 |
def resize_and_pad(image: Image.Image, target_size: tuple[int, int], background_color: tuple[int, int, int] = (0, 0, 0)) -> tuple[Image.Image, tuple[int, int, int, int]]:
|
| 48 |
original_width, original_height = image.size; target_width, target_height = target_size
|
| 49 |
ratio_w, ratio_h = target_width / original_width, target_height / original_height
|
|
|
|
| 54 |
paste_x, paste_y = (target_width - new_width) // 2, (target_height - new_height) // 2
|
| 55 |
new_image.paste(image, (paste_x, paste_y)); crop_box = (paste_x, paste_y, paste_x + new_width, paste_y + new_height)
|
| 56 |
return new_image, crop_box
|
| 57 |
+
|
| 58 |
class SAMModel:
|
| 59 |
def __init__(self, device: str = 'cuda:0'): self.device, self.model = device, None
|
| 60 |
def load(self, model_path: str = SAM_CHECKPOINT):
|
|
|
|
| 67 |
final_mask = np.zeros((image.height, image.width), dtype=np.uint8)
|
| 68 |
for mask_data in results[0].masks.data: final_mask = np.maximum(final_mask, mask_data.cpu().numpy().astype(np.uint8) * 255)
|
| 69 |
return final_mask
|
| 70 |
+
|
| 71 |
class DinoSamGrounding:
|
| 72 |
def __init__(self, device: str = 'cuda:0'):
|
| 73 |
if predict_gdino is None: raise ImportError("GroundingDINO not accessible.")
|
|
|
|
| 96 |
except Exception as e:
|
| 97 |
print(f"FATAL ERROR during model loading: {e}"); global_models["loading_error"] = str(e)
|
| 98 |
|
|
|
|
| 99 |
def run_virtual_staging(
|
| 100 |
input_image: Image.Image, prompt: str, negative_prompt: str, use_canny: bool, use_depth: bool, use_lora: bool, seed: int, progress=gr.Progress()
|
| 101 |
):
|
|
|
|
| 144 |
error_message = traceback.format_exc(); print(f"!!! AN ERROR OCCURRED !!!\n{error_message}")
|
| 145 |
raise gr.Error(f"An error occurred: {e}")
|
| 146 |
|
| 147 |
+
|
| 148 |
with gr.Blocks(css="footer {display: none !important}") as demo:
|
| 149 |
gr.Markdown("# Virtual Staging AI")
|
| 150 |
gr.Markdown("All models are pre-loaded. Configure your generation and click 'Generate Staging'.")
|
|
|
|
| 178 |
inputs=[input_image, prompt, negative_prompt, use_canny, use_depth, use_lora, seed_input]
|
| 179 |
)
|
| 180 |
|
| 181 |
+
demo.queue().launch()
|