rahul7star commited on
Commit
ecc6da9
·
verified ·
1 Parent(s): 32ac096

Update app_lora.py

Browse files
Files changed (1) hide show
  1. app_lora.py +52 -51
app_lora.py CHANGED
@@ -688,69 +688,70 @@ def generate_image_all_latents(prompt, height, width, steps, seed, guidance_scal
688
 
689
  @spaces.GPU
690
  def generate_image(prompt, height, width, steps, seed, guidance_scale=7.5):
 
 
 
 
691
  LOGS = []
692
- device = "cuda"
693
- generator = torch.Generator(device).manual_seed(int(seed))
694
-
695
  placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
696
  latent_gallery = []
 
 
 
 
 
 
 
 
 
 
 
 
 
697
 
698
  try:
699
- # -------------------
700
- # Prepare latents
701
- # -------------------
702
- batch_size = 1
703
- if hasattr(pipe, "vae") and hasattr(pipe.vae, "config"):
704
- num_channels = pipe.vae.config.latent_channels
705
- else:
706
- num_channels = 4
707
-
708
- latents = torch.randn(
709
- (batch_size, num_channels, height // 8, width // 8),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
710
  generator=generator,
711
- device=device,
712
  )
713
-
714
- # -------------------
715
- # Encode prompt
716
- # -------------------
717
- text_embeddings = pipe._encode_prompt(prompt)
718
-
719
- # -------------------
720
- # Scheduler loop
721
- # -------------------
722
- num_previews = min(10, steps)
723
- preview_indices = torch.linspace(0, steps - 1, num_previews).long()
724
-
725
- for t_idx in range(steps):
726
- t = pipe.scheduler.timesteps[t_idx]
727
-
728
- with torch.no_grad():
729
- latents = pipe.unet(latents, t, encoder_hidden_states=text_embeddings).sample
730
- latents = pipe.scheduler.step(latents, t, latents).prev_sample
731
-
732
- if t_idx in preview_indices:
733
- try:
734
- decoded = pipe.decode_latents(latents)
735
- latent_gallery.append(decoded)
736
- except Exception as e:
737
- LOGS.append(f"⚠️ Preview decode failed: {e}")
738
- latent_gallery.append(placeholder)
739
-
740
- yield None, latent_gallery[-5:], LOGS
741
-
742
- # -------------------
743
- # Final image
744
- # -------------------
745
- final_img = pipe.decode_latents(latents)
746
  latent_gallery.append(final_img)
747
- LOGS.append("✅ Pipeline generation completed successfully.")
748
- yield final_img, latent_gallery[-5:] + [final_img], LOGS
 
749
 
750
  except Exception as e:
751
  LOGS.append(f"❌ Generation failed: {e}")
752
  latent_gallery.append(placeholder)
753
- yield placeholder, latent_gallery[-5:] + [placeholder], LOGS
 
754
 
755
 
756
  def generate_image1(prompt, height, width, steps, seed, guidance_scale=0.0):
 
688
 
689
  @spaces.GPU
690
  def generate_image(prompt, height, width, steps, seed, guidance_scale=7.5):
691
+ """
692
+ Generate image using ZImagePipeline with optional LoRA adapter.
693
+ Shows step previews and final image.
694
+ """
695
  LOGS = []
696
+ generator = torch.Generator("cuda").manual_seed(int(seed))
697
+
 
698
  placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
699
  latent_gallery = []
700
+ final_gallery = []
701
+
702
+ # Determine active LoRA adapter
703
+ active_adapter = None
704
+ active_strength = 1.0
705
+ if loaded_loras:
706
+ active_adapter = list(loaded_loras.keys())[-1]
707
+ active_strength = loaded_loras[active_adapter + "_strength"] if loaded_loras.get(active_adapter + "_strength") else 1.0
708
+ pipe.set_adapters([active_adapter], [active_strength])
709
+ LOGS.append(f"🧩 Using LoRA adapter: {active_adapter} (strength={active_strength})")
710
+ else:
711
+ pipe.set_adapters([], [])
712
+ LOGS.append("⚡ No LoRA applied")
713
 
714
  try:
715
+ # Generate small preview steps
716
+ num_preview_steps = min(5, steps)
717
+ for i in range(num_preview_steps):
718
+ step = i + 1
719
+ try:
720
+ preview_output = pipe(
721
+ prompt=prompt,
722
+ height=height // 4, # small preview
723
+ width=width // 4,
724
+ num_inference_steps=step,
725
+ guidance_scale=guidance_scale,
726
+ generator=generator,
727
+ )
728
+ img = preview_output.images[0].resize((width, height))
729
+ latent_gallery.append(img)
730
+ except Exception as e:
731
+ LOGS.append(f"⚠️ Preview step {step} failed: {e}")
732
+ latent_gallery.append(placeholder)
733
+
734
+ # --- Final image ---
735
+ output = pipe(
736
+ prompt=prompt,
737
+ height=height,
738
+ width=width,
739
+ num_inference_steps=steps,
740
+ guidance_scale=guidance_scale,
741
  generator=generator,
 
742
  )
743
+ final_img = output.images[0]
744
+ final_gallery.append(final_img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745
  latent_gallery.append(final_img)
746
+ LOGS.append("✅ Image generation completed.")
747
+
748
+ yield final_img, latent_gallery, LOGS
749
 
750
  except Exception as e:
751
  LOGS.append(f"❌ Generation failed: {e}")
752
  latent_gallery.append(placeholder)
753
+ final_gallery.append(placeholder)
754
+ yield placeholder, latent_gallery, LOGS
755
 
756
 
757
  def generate_image1(prompt, height, width, steps, seed, guidance_scale=0.0):