linoyts HF Staff commited on
Commit
af81fa5
·
verified ·
1 Parent(s): a442327

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -4
app.py CHANGED
@@ -10,16 +10,25 @@ pipe = ZImagePipeline.from_pretrained(
10
  torch_dtype=torch.bfloat16,
11
  low_cpu_mem_usage=False,
12
  )
 
 
 
 
 
13
  pipe.load_lora_weights("bdsqlsz/qinglong_DetailedEyes_Z-Image", weight_name="qinglong_detailedeye_z-imageV2(comfy).safetensors", adapter_name="lora")
14
- pipe.set_adapters(["lora",], adapter_weights=[1.1])
15
- pipe.fuse_lora(adapter_names=["lora"], lora_scale=1.)
16
  pipe.unload_lora_weights()
17
  pipe.to("cuda")
 
18
 
19
  # ======== AoTI compilation + FA3 ========
20
  pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
21
  spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
22
 
 
 
 
23
  print("Pipeline loaded!")
24
 
25
  @spaces.GPU
@@ -37,8 +46,17 @@ def generate_image(prompt, height, width, num_inference_steps, seed, randomize_s
37
  guidance_scale=0.0, # Guidance should be 0 for Turbo models
38
  generator=generator,
39
  ).images[0]
 
 
 
 
 
 
 
 
 
40
 
41
- return image, seed
42
 
43
 
44
  # Example prompts
@@ -109,7 +127,7 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
109
  generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
110
 
111
  with gr.Column(scale=1):
112
- output_image = gr.Image(
113
  label="Generated Image",
114
  type="pil",
115
  )
 
10
  torch_dtype=torch.bfloat16,
11
  low_cpu_mem_usage=False,
12
  )
13
+ pipe_no_lora = ZImagePipeline.from_pretrained(
14
+ "Tongyi-MAI/Z-Image-Turbo",
15
+ torch_dtype=torch.bfloat16,
16
+ low_cpu_mem_usage=False,
17
+ )
18
  pipe.load_lora_weights("bdsqlsz/qinglong_DetailedEyes_Z-Image", weight_name="qinglong_detailedeye_z-imageV2(comfy).safetensors", adapter_name="lora")
19
+ pipe.set_adapters(["lora",], adapter_weights=[1.])
20
+ pipe.fuse_lora(adapter_names=["lora"], lora_scale=1.15)
21
  pipe.unload_lora_weights()
22
  pipe.to("cuda")
23
+ pipe_no_lora.to("cuda")
24
 
25
  # ======== AoTI compilation + FA3 ========
26
  pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
27
  spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
28
 
29
+ pipe_no_lora.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
30
+ spaces.aoti_blocks_load(pipe_no_lora.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
31
+
32
  print("Pipeline loaded!")
33
 
34
  @spaces.GPU
 
46
  guidance_scale=0.0, # Guidance should be 0 for Turbo models
47
  generator=generator,
48
  ).images[0]
49
+
50
+ image_no_lora = pipe_no_lora(
51
+ prompt=prompt,
52
+ height=int(height),
53
+ width=int(width),
54
+ num_inference_steps=int(num_inference_steps),
55
+ guidance_scale=0.0, # Guidance should be 0 for Turbo models
56
+ generator=generator,
57
+ ).images[0]
58
 
59
+ return (image_no_lora,image), seed
60
 
61
 
62
  # Example prompts
 
127
  generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
128
 
129
  with gr.Column(scale=1):
130
+ output_image = gr.ImageSlider(
131
  label="Generated Image",
132
  type="pil",
133
  )