Husr commited on
Commit
bd7033b
·
1 Parent(s): 70e4b9d
Files changed (1) hide show
  1. app.py +33 -5
app.py CHANGED
@@ -160,6 +160,7 @@ attention_backend_error: str | None = None
160
  aoti_error: str | None = None
161
  transformer_compiled: bool = False
162
  transformer_compile_attempted: bool = False
 
163
  inductor_configured: bool = False
164
 
165
  SCHEDULERS = {"FlowMatch Euler": FlowMatchEulerDiscreteScheduler}
@@ -413,14 +414,19 @@ def configure_inductor_for_compile() -> None:
413
 
414
 
415
  def maybe_compile_transformer() -> None:
416
- global transformer_compiled, transformer_compile_attempted
417
  if not ENABLE_COMPILE or transformer_compile_attempted:
418
  return
419
  if pipe is None or getattr(pipe, "transformer", None) is None:
420
  return
421
 
422
  transformer_compile_attempted = True
 
423
  configure_inductor_for_compile()
 
 
 
 
424
 
425
  try:
426
  if getattr(pipe, "vae", None) is not None and hasattr(pipe.vae, "disable_tiling"):
@@ -434,6 +440,7 @@ def maybe_compile_transformer() -> None:
434
  transformer_compiled = True
435
  except Exception as exc: # noqa: BLE001
436
  transformer_compiled = False
 
437
  print(f"torch.compile failed (continuing without compile): {exc}")
438
 
439
 
@@ -513,7 +520,6 @@ def generate_image(
513
  max_shift: float,
514
  ) -> Tuple[torch.Tensor, int]:
515
  width, height = parse_resolution(resolution)
516
- generator = torch.Generator("cuda").manual_seed(seed)
517
  set_scheduler(
518
  pipeline,
519
  str(scheduler_name),
@@ -533,8 +539,9 @@ def generate_image(
533
  except Exception as exc: # noqa: BLE001
534
  print(f"LoRA scale update failed (continuing without changing LoRA state): {exc}")
535
 
536
- with torch.inference_mode():
537
- image = pipeline(
 
538
  prompt=prompt,
539
  height=height,
540
  width=width,
@@ -543,6 +550,25 @@ def generate_image(
543
  generator=generator,
544
  max_sequence_length=int(max_sequence_length),
545
  ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546
  return image, seed
547
 
548
 
@@ -709,7 +735,7 @@ with gr.Blocks(title="Z-Image + LoRA") as demo:
709
  gr.Markdown(
710
  """<div align="center">
711
 
712
- # Z-Image Generation (No SD fallback)
713
 
714
  </div>"""
715
  )
@@ -728,6 +754,8 @@ with gr.Blocks(title="Z-Image + LoRA") as demo:
728
  details_md_blocks.append(f"**Attention backend error**\n```\n{attention_backend_error}\n```")
729
  if aoti_error:
730
  details_md_blocks.append(f"**AoTI error**\n```\n{aoti_error}\n```")
 
 
731
 
732
  with gr.Column(elem_id="floating_status_panel"):
733
  with gr.Accordion("Status / Debug", open=False):
 
160
  aoti_error: str | None = None
161
  transformer_compiled: bool = False
162
  transformer_compile_attempted: bool = False
163
+ compile_error: str | None = None
164
  inductor_configured: bool = False
165
 
166
  SCHEDULERS = {"FlowMatch Euler": FlowMatchEulerDiscreteScheduler}
 
414
 
415
 
416
  def maybe_compile_transformer() -> None:
417
+ global transformer_compiled, transformer_compile_attempted, compile_error
418
  if not ENABLE_COMPILE or transformer_compile_attempted:
419
  return
420
  if pipe is None or getattr(pipe, "transformer", None) is None:
421
  return
422
 
423
  transformer_compile_attempted = True
424
+ compile_error = None
425
  configure_inductor_for_compile()
426
+ try:
427
+ torch._dynamo.config.suppress_errors = True
428
+ except Exception: # noqa: BLE001
429
+ pass
430
 
431
  try:
432
  if getattr(pipe, "vae", None) is not None and hasattr(pipe.vae, "disable_tiling"):
 
440
  transformer_compiled = True
441
  except Exception as exc: # noqa: BLE001
442
  transformer_compiled = False
443
+ compile_error = str(exc)
444
  print(f"torch.compile failed (continuing without compile): {exc}")
445
 
446
 
 
520
  max_shift: float,
521
  ) -> Tuple[torch.Tensor, int]:
522
  width, height = parse_resolution(resolution)
 
523
  set_scheduler(
524
  pipeline,
525
  str(scheduler_name),
 
539
  except Exception as exc: # noqa: BLE001
540
  print(f"LoRA scale update failed (continuing without changing LoRA state): {exc}")
541
 
542
+ def run_pipeline() -> torch.Tensor:
543
+ generator = torch.Generator("cuda").manual_seed(seed)
544
+ return pipeline(
545
  prompt=prompt,
546
  height=height,
547
  width=width,
 
550
  generator=generator,
551
  max_sequence_length=int(max_sequence_length),
552
  ).images[0]
553
+
554
+ try:
555
+ with torch.inference_mode():
556
+ image = run_pipeline()
557
+ except Exception as exc: # noqa: BLE001
558
+ transformer = getattr(pipeline, "transformer", None)
559
+ message = str(exc)
560
+ is_dynamo_error = isinstance(exc, AssertionError) or "torch._dynamo" in message or "ConstantVariable" in message
561
+ if transformer is not None and hasattr(transformer, "_orig_mod") and is_dynamo_error:
562
+ global transformer_compiled, transformer_compile_attempted, compile_error
563
+ compile_error = message
564
+ transformer_compiled = False
565
+ transformer_compile_attempted = True
566
+ pipeline.transformer = transformer._orig_mod
567
+ print(f"torch.compile runtime failed; falling back to eager: {exc}")
568
+ with torch.inference_mode():
569
+ image = run_pipeline()
570
+ else:
571
+ raise
572
  return image, seed
573
 
574
 
 
735
  gr.Markdown(
736
  """<div align="center">
737
 
738
+ # Z-Image with LoRA
739
 
740
  </div>"""
741
  )
 
754
  details_md_blocks.append(f"**Attention backend error**\n```\n{attention_backend_error}\n```")
755
  if aoti_error:
756
  details_md_blocks.append(f"**AoTI error**\n```\n{aoti_error}\n```")
757
+ if compile_error:
758
+ details_md_blocks.append(f"**torch.compile error**\n```\n{compile_error}\n```")
759
 
760
  with gr.Column(elem_id="floating_status_panel"):
761
  with gr.Accordion("Status / Debug", open=False):