Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -516,24 +516,52 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
| 516 |
"""
|
| 517 |
VRAM optimized generation with enhanced error reporting
|
| 518 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 519 |
try:
|
| 520 |
-
print("=" * 80)
|
| 521 |
print("Starting generation...")
|
| 522 |
print("Custom LoRA:", custom_lora)
|
| 523 |
custom_lora_path = custom_lora[0] if custom_lora else None
|
| 524 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
|
| 526 |
# Validate selection immediately
|
| 527 |
-
if
|
| 528 |
error_msg = "β You must select a style before generating"
|
| 529 |
print(error_msg)
|
| 530 |
-
return
|
| 531 |
|
| 532 |
-
# Validate selected_state_index is valid
|
| 533 |
-
if not custom_lora_path and
|
| 534 |
error_msg = f"β Invalid style selection (index: {selected_state_index}, available: {len(sdxl_loras)})"
|
| 535 |
print(error_msg)
|
| 536 |
-
return
|
| 537 |
|
| 538 |
st = time.time()
|
| 539 |
|
|
@@ -602,7 +630,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
| 602 |
if repo_name not in state_dicts:
|
| 603 |
error_msg = f"β LoRA not loaded: {repo_name}\nAvailable: {list(state_dicts.keys())[:5]}"
|
| 604 |
print(error_msg)
|
| 605 |
-
return
|
| 606 |
full_path_lora = state_dicts[repo_name]["saved_name"]
|
| 607 |
|
| 608 |
repo_name = repo_name.rstrip("/").lower()
|
|
@@ -651,7 +679,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
| 651 |
print(error_msg)
|
| 652 |
print("=" * 80)
|
| 653 |
torch.cuda.empty_cache()
|
| 654 |
-
return
|
| 655 |
except RuntimeError as e:
|
| 656 |
if "out of memory" in str(e).lower():
|
| 657 |
error_msg = (
|
|
@@ -668,7 +696,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
| 668 |
print(error_msg)
|
| 669 |
print("=" * 80)
|
| 670 |
torch.cuda.empty_cache()
|
| 671 |
-
return
|
| 672 |
except Exception as e:
|
| 673 |
error_msg = f"Generation failed: {str(e)}\n\nFull error:\n{traceback.format_exc()}"
|
| 674 |
print("=" * 80)
|
|
@@ -676,7 +704,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
| 676 |
print(error_msg)
|
| 677 |
print("=" * 80)
|
| 678 |
torch.cuda.empty_cache()
|
| 679 |
-
return
|
| 680 |
|
| 681 |
|
| 682 |
def generate_image_inline(prompt, negative, face_emb, face_image, face_kps, image_strength,
|
|
|
|
| 516 |
"""
|
| 517 |
VRAM optimized generation with enhanced error reporting
|
| 518 |
"""
|
| 519 |
+
print("=" * 80)
|
| 520 |
+
print("π FUNCTION CALLED: run_lora")
|
| 521 |
+
print(f"π Inputs received:")
|
| 522 |
+
print(f" - face_image: {type(face_image)} - {face_image.size if face_image else 'None'}")
|
| 523 |
+
print(f" - prompt: '{prompt}'")
|
| 524 |
+
print(f" - selected_state: {type(selected_state)} - {selected_state}")
|
| 525 |
+
print(f" - custom_lora: {custom_lora}")
|
| 526 |
+
print(f" - use_multiple_faces: {use_multiple_faces}")
|
| 527 |
+
print("=" * 80)
|
| 528 |
+
|
| 529 |
try:
|
|
|
|
| 530 |
print("Starting generation...")
|
| 531 |
print("Custom LoRA:", custom_lora)
|
| 532 |
custom_lora_path = custom_lora[0] if custom_lora else None
|
| 533 |
+
|
| 534 |
+
# Extract index from selected_state (handle Gradio SelectData object)
|
| 535 |
+
if selected_state:
|
| 536 |
+
print(f" selected_state exists: {selected_state}")
|
| 537 |
+
print(f" selected_state type: {type(selected_state)}")
|
| 538 |
+
print(f" selected_state dir: {dir(selected_state)}")
|
| 539 |
+
if hasattr(selected_state, 'index'):
|
| 540 |
+
selected_state_index = selected_state.index
|
| 541 |
+
print(f" β Extracted index: {selected_state_index}")
|
| 542 |
+
else:
|
| 543 |
+
selected_state_index = -1
|
| 544 |
+
print(f" β No index attribute, using -1")
|
| 545 |
+
else:
|
| 546 |
+
selected_state_index = -1
|
| 547 |
+
print(f" β selected_state is None or False")
|
| 548 |
+
|
| 549 |
+
print(f"π VALIDATION CHECK:")
|
| 550 |
+
print(f" - selected_state_index: {selected_state_index}")
|
| 551 |
+
print(f" - custom_lora_path: {custom_lora_path}")
|
| 552 |
+
print(f" - len(sdxl_loras): {len(sdxl_loras)}")
|
| 553 |
|
| 554 |
# Validate selection immediately
|
| 555 |
+
if (selected_state_index is None or selected_state_index < 0) and not custom_lora_path:
|
| 556 |
error_msg = "β You must select a style before generating"
|
| 557 |
print(error_msg)
|
| 558 |
+
return gr.update(), gr.update(visible=False), gr.update(visible=True, value=error_msg)
|
| 559 |
|
| 560 |
+
# Validate selected_state_index is valid (only check positive indices)
|
| 561 |
+
if not custom_lora_path and selected_state_index >= 0 and selected_state_index >= len(sdxl_loras):
|
| 562 |
error_msg = f"β Invalid style selection (index: {selected_state_index}, available: {len(sdxl_loras)})"
|
| 563 |
print(error_msg)
|
| 564 |
+
return gr.update(), gr.update(visible=False), gr.update(visible=True, value=error_msg)
|
| 565 |
|
| 566 |
st = time.time()
|
| 567 |
|
|
|
|
| 630 |
if repo_name not in state_dicts:
|
| 631 |
error_msg = f"β LoRA not loaded: {repo_name}\nAvailable: {list(state_dicts.keys())[:5]}"
|
| 632 |
print(error_msg)
|
| 633 |
+
return gr.update(), gr.update(visible=False), gr.update(visible=True, value=error_msg)
|
| 634 |
full_path_lora = state_dicts[repo_name]["saved_name"]
|
| 635 |
|
| 636 |
repo_name = repo_name.rstrip("/").lower()
|
|
|
|
| 679 |
print(error_msg)
|
| 680 |
print("=" * 80)
|
| 681 |
torch.cuda.empty_cache()
|
| 682 |
+
return gr.update(), gr.update(visible=False), gr.update(visible=True, value=error_msg)
|
| 683 |
except RuntimeError as e:
|
| 684 |
if "out of memory" in str(e).lower():
|
| 685 |
error_msg = (
|
|
|
|
| 696 |
print(error_msg)
|
| 697 |
print("=" * 80)
|
| 698 |
torch.cuda.empty_cache()
|
| 699 |
+
return gr.update(), gr.update(visible=False), gr.update(visible=True, value=error_msg)
|
| 700 |
except Exception as e:
|
| 701 |
error_msg = f"Generation failed: {str(e)}\n\nFull error:\n{traceback.format_exc()}"
|
| 702 |
print("=" * 80)
|
|
|
|
| 704 |
print(error_msg)
|
| 705 |
print("=" * 80)
|
| 706 |
torch.cuda.empty_cache()
|
| 707 |
+
return gr.update(), gr.update(visible=False), gr.update(visible=True, value=error_msg)
|
| 708 |
|
| 709 |
|
| 710 |
def generate_image_inline(prompt, negative, face_emb, face_image, face_kps, image_strength,
|