Fix painting clip
Browse files- backend/runner/config.py +1 -1
- backend/runner/inference.py +13 -3
backend/runner/config.py
CHANGED
|
@@ -74,7 +74,7 @@ MODELS_DIR = DATA_READ_ROOT / "models"
|
|
| 74 |
MARKER_DIR = DATA_READ_ROOT / "marker_output"
|
| 75 |
|
| 76 |
# Model directories
|
| 77 |
-
PAINTINGCLIP_MODEL_DIR = MODELS_DIR / "
|
| 78 |
|
| 79 |
# Writable directories (outside repo)
|
| 80 |
OUTPUTS_DIR = WRITE_ROOT / "outputs"
|
|
|
|
| 74 |
MARKER_DIR = DATA_READ_ROOT / "marker_output"
|
| 75 |
|
| 76 |
# Model directories
|
| 77 |
+
PAINTINGCLIP_MODEL_DIR = MODELS_DIR / "PaintingClip" # Note the capital C
|
| 78 |
|
| 79 |
# Writable directories (outside repo)
|
| 80 |
OUTPUTS_DIR = WRITE_ROOT / "outputs"
|
backend/runner/inference.py
CHANGED
|
@@ -59,7 +59,7 @@ MODEL_CONFIG = {
|
|
| 59 |
"paintingclip": {
|
| 60 |
"model_id": "openai/clip-vit-base-patch32",
|
| 61 |
"use_lora": True,
|
| 62 |
-
"lora_dir": PAINTINGCLIP_MODEL_DIR,
|
| 63 |
},
|
| 64 |
}
|
| 65 |
|
|
@@ -179,9 +179,19 @@ def _initialize_pipeline():
|
|
| 179 |
processor = CLIPProcessor.from_pretrained(config["model_id"], use_fast=False)
|
| 180 |
base_model = CLIPModel.from_pretrained(config["model_id"])
|
| 181 |
|
| 182 |
-
# Apply LoRA adapter if configured
|
| 183 |
if config["use_lora"] and config["lora_dir"]:
|
| 184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
else:
|
| 186 |
model = base_model
|
| 187 |
|
|
|
|
| 59 |
"paintingclip": {
|
| 60 |
"model_id": "openai/clip-vit-base-patch32",
|
| 61 |
"use_lora": True,
|
| 62 |
+
"lora_dir": PAINTINGCLIP_MODEL_DIR, # This should now point to the correct path
|
| 63 |
},
|
| 64 |
}
|
| 65 |
|
|
|
|
| 179 |
processor = CLIPProcessor.from_pretrained(config["model_id"], use_fast=False)
|
| 180 |
base_model = CLIPModel.from_pretrained(config["model_id"])
|
| 181 |
|
| 182 |
+
# Apply LoRA adapter if configured and available
|
| 183 |
if config["use_lora"] and config["lora_dir"]:
|
| 184 |
+
lora_path = Path(config["lora_dir"])
|
| 185 |
+
adapter_config_path = lora_path / "adapter_config.json"
|
| 186 |
+
|
| 187 |
+
if adapter_config_path.exists():
|
| 188 |
+
print(f"✅ Loading LoRA adapter from {lora_path}")
|
| 189 |
+
model = PeftModel.from_pretrained(base_model, str(lora_path))
|
| 190 |
+
else:
|
| 191 |
+
print(f"⚠️ LoRA adapter not found at {lora_path}")
|
| 192 |
+
print(f"⚠️ Missing file: {adapter_config_path}")
|
| 193 |
+
print(f"⚠️ Falling back to base CLIP model without LoRA adapter")
|
| 194 |
+
model = base_model
|
| 195 |
else:
|
| 196 |
model = base_model
|
| 197 |
|