Spaces:
Runtime error
Runtime error
Fix: Force CPU mode for LaMa + robust JSON parsing
Browse files
app.py
CHANGED
|
@@ -39,24 +39,39 @@ executor = ThreadPoolExecutor(max_workers=min(NUM_CPUS, 4))
|
|
| 39 |
# MODEL PRELOADING (Load once at startup, not per-request)
|
| 40 |
# ============================================================
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
print("🚀 BubbleScribe starting up...")
|
| 43 |
print(f" CPU Threads: {NUM_CPUS}")
|
|
|
|
| 44 |
|
| 45 |
# Global LaMa model (loaded once)
|
| 46 |
_lama_model = None
|
| 47 |
_lama_lock = threading.Lock()
|
| 48 |
|
| 49 |
def get_lama_model():
|
| 50 |
-
"""Get or initialize LaMa model (singleton pattern)."""
|
| 51 |
global _lama_model
|
| 52 |
if _lama_model is None:
|
| 53 |
with _lama_lock:
|
| 54 |
if _lama_model is None:
|
| 55 |
-
print("📦 Loading LaMa inpainting model...")
|
| 56 |
try:
|
| 57 |
from simple_lama_inpainting import SimpleLama
|
| 58 |
-
_lama_model = SimpleLama()
|
| 59 |
-
print("✅ LaMa model loaded!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
except Exception as e:
|
| 61 |
print(f"⚠️ LaMa failed to load: {e}")
|
| 62 |
_lama_model = "fallback"
|
|
@@ -152,6 +167,90 @@ def scale_bbox(bbox: list, original_size: tuple, processed_size: tuple) -> list:
|
|
| 152 |
int(bbox[3] * scale_y)
|
| 153 |
]
|
| 154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
# ============================================================
|
| 156 |
# DETECTION & TRANSLATION
|
| 157 |
# ============================================================
|
|
@@ -222,11 +321,10 @@ Important:
|
|
| 222 |
elif hasattr(msg, 'reasoning_content') and msg.reasoning_content:
|
| 223 |
result_text = msg.reasoning_content
|
| 224 |
|
| 225 |
-
# Parse JSON from response
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
# Scale bboxes back to original size if needed
|
| 231 |
if original_size != processed_size:
|
| 232 |
for det in detections:
|
|
|
|
| 39 |
# MODEL PRELOADING (Load once at startup, not per-request)
|
| 40 |
# ============================================================
|
| 41 |
|
| 42 |
+
# Force CPU mode (no GPU available)
|
| 43 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
| 44 |
+
import torch
|
| 45 |
+
torch.set_default_device('cpu')
|
| 46 |
+
|
| 47 |
print("🚀 BubbleScribe starting up...")
|
| 48 |
print(f" CPU Threads: {NUM_CPUS}")
|
| 49 |
+
print(f" Device: CPU (forced)")
|
| 50 |
|
| 51 |
# Global LaMa model (loaded once)
|
| 52 |
_lama_model = None
|
| 53 |
_lama_lock = threading.Lock()
|
| 54 |
|
| 55 |
def get_lama_model():
|
| 56 |
+
"""Get or initialize LaMa model (singleton pattern) - CPU only."""
|
| 57 |
global _lama_model
|
| 58 |
if _lama_model is None:
|
| 59 |
with _lama_lock:
|
| 60 |
if _lama_model is None:
|
| 61 |
+
print("📦 Loading LaMa inpainting model (CPU mode)...")
|
| 62 |
try:
|
| 63 |
from simple_lama_inpainting import SimpleLama
|
| 64 |
+
_lama_model = SimpleLama(device=torch.device('cpu'))
|
| 65 |
+
print("✅ LaMa model loaded on CPU!")
|
| 66 |
+
except TypeError:
|
| 67 |
+
# Older version without device parameter
|
| 68 |
+
try:
|
| 69 |
+
from simple_lama_inpainting import SimpleLama
|
| 70 |
+
_lama_model = SimpleLama()
|
| 71 |
+
print("✅ LaMa model loaded!")
|
| 72 |
+
except Exception as e:
|
| 73 |
+
print(f"⚠️ LaMa failed to load: {e}")
|
| 74 |
+
_lama_model = "fallback"
|
| 75 |
except Exception as e:
|
| 76 |
print(f"⚠️ LaMa failed to load: {e}")
|
| 77 |
_lama_model = "fallback"
|
|
|
|
| 167 |
int(bbox[3] * scale_y)
|
| 168 |
]
|
| 169 |
|
| 170 |
+
# ============================================================
|
| 171 |
+
# JSON REPAIR (Handle malformed model responses)
|
| 172 |
+
# ============================================================
|
| 173 |
+
|
| 174 |
+
def repair_json(text: str) -> str:
|
| 175 |
+
"""Attempt to repair common JSON issues from LLM responses."""
|
| 176 |
+
# Remove any markdown code blocks
|
| 177 |
+
text = re.sub(r'```json\s*', '', text)
|
| 178 |
+
text = re.sub(r'```\s*', '', text)
|
| 179 |
+
|
| 180 |
+
# Fix unescaped newlines in strings
|
| 181 |
+
text = re.sub(r'(?<!\\)\n(?=[^"]*"[^"]*(?:"[^"]*"[^"]*)*$)', '\\n', text)
|
| 182 |
+
|
| 183 |
+
# Fix trailing commas before ] or }
|
| 184 |
+
text = re.sub(r',\s*([}\]])', r'\1', text)
|
| 185 |
+
|
| 186 |
+
# Fix missing commas between objects
|
| 187 |
+
text = re.sub(r'\}\s*\{', '},{', text)
|
| 188 |
+
|
| 189 |
+
# Fix unescaped quotes inside strings (rough heuristic)
|
| 190 |
+
# Replace Japanese quotes with escaped ones
|
| 191 |
+
text = text.replace('「', '\\"').replace('」', '\\"')
|
| 192 |
+
text = text.replace('『', '\\"').replace('』', '\\"')
|
| 193 |
+
|
| 194 |
+
return text
|
| 195 |
+
|
| 196 |
+
def safe_parse_json(text: str) -> list:
|
| 197 |
+
"""Safely parse JSON with multiple fallback strategies."""
|
| 198 |
+
# Strategy 1: Direct parse
|
| 199 |
+
try:
|
| 200 |
+
json_match = re.search(r'\[[\s\S]*\]', text)
|
| 201 |
+
if json_match:
|
| 202 |
+
return json.loads(json_match.group())
|
| 203 |
+
except json.JSONDecodeError:
|
| 204 |
+
pass
|
| 205 |
+
|
| 206 |
+
# Strategy 2: Repair and parse
|
| 207 |
+
try:
|
| 208 |
+
repaired = repair_json(text)
|
| 209 |
+
json_match = re.search(r'\[[\s\S]*\]', repaired)
|
| 210 |
+
if json_match:
|
| 211 |
+
return json.loads(json_match.group())
|
| 212 |
+
except json.JSONDecodeError:
|
| 213 |
+
pass
|
| 214 |
+
|
| 215 |
+
# Strategy 3: Extract individual objects
|
| 216 |
+
try:
|
| 217 |
+
objects = re.findall(r'\{[^{}]*\}', text)
|
| 218 |
+
results = []
|
| 219 |
+
for obj in objects:
|
| 220 |
+
try:
|
| 221 |
+
parsed = json.loads(repair_json(obj))
|
| 222 |
+
if 'bbox' in parsed:
|
| 223 |
+
results.append(parsed)
|
| 224 |
+
except:
|
| 225 |
+
continue
|
| 226 |
+
if results:
|
| 227 |
+
return results
|
| 228 |
+
except:
|
| 229 |
+
pass
|
| 230 |
+
|
| 231 |
+
# Strategy 4: Manual extraction with regex
|
| 232 |
+
try:
|
| 233 |
+
results = []
|
| 234 |
+
# Find bbox patterns
|
| 235 |
+
bbox_matches = re.findall(r'"bbox"\s*:\s*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\]', text)
|
| 236 |
+
original_matches = re.findall(r'"original"\s*:\s*"([^"]*)"', text)
|
| 237 |
+
translated_matches = re.findall(r'"translated"\s*:\s*"([^"]*)"', text)
|
| 238 |
+
|
| 239 |
+
for i, bbox in enumerate(bbox_matches):
|
| 240 |
+
result = {
|
| 241 |
+
"bbox": [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])],
|
| 242 |
+
"original": original_matches[i] if i < len(original_matches) else "",
|
| 243 |
+
"translated": translated_matches[i] if i < len(translated_matches) else ""
|
| 244 |
+
}
|
| 245 |
+
results.append(result)
|
| 246 |
+
|
| 247 |
+
if results:
|
| 248 |
+
return results
|
| 249 |
+
except:
|
| 250 |
+
pass
|
| 251 |
+
|
| 252 |
+
return []
|
| 253 |
+
|
| 254 |
# ============================================================
|
| 255 |
# DETECTION & TRANSLATION
|
| 256 |
# ============================================================
|
|
|
|
| 321 |
elif hasattr(msg, 'reasoning_content') and msg.reasoning_content:
|
| 322 |
result_text = msg.reasoning_content
|
| 323 |
|
| 324 |
+
# Parse JSON from response with robust error handling
|
| 325 |
+
detections = safe_parse_json(result_text)
|
| 326 |
+
|
| 327 |
+
if detections:
|
|
|
|
| 328 |
# Scale bboxes back to original size if needed
|
| 329 |
if original_size != processed_size:
|
| 330 |
for det in detections:
|