prithivMLmods commited on
Commit
3c16ea1
·
verified ·
1 Parent(s): 504f85b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -15
app.py CHANGED
@@ -6,8 +6,6 @@ import torch
6
  import random
7
  from PIL import Image
8
  from typing import Iterable
9
-
10
- # --- Gradio Theme ---
11
  from gradio.themes import Soft
12
  from gradio.themes.utils import colors, fonts, sizes
13
 
@@ -67,7 +65,6 @@ class SteelBlueTheme(Soft):
67
 
68
  steel_blue_theme = SteelBlueTheme()
69
 
70
- # --- Model Loading ---
71
  from diffusers import FlowMatchEulerDiscreteScheduler
72
  # from optimization import optimize_pipeline_ # Assuming this is a custom file, if not available, comment out the call
73
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
@@ -88,7 +85,6 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
88
  torch_dtype=dtype
89
  ).to(device)
90
 
91
- # Load all LoRA adapters
92
  pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
93
  weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
94
  adapter_name="anime")
@@ -105,7 +101,6 @@ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
105
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
106
  MAX_SEED = np.iinfo(np.int32).max
107
 
108
- # --- Main Inference Function (Corrected) ---
109
  @spaces.GPU
110
  def infer(
111
  input_image,
@@ -120,7 +115,6 @@ def infer(
120
  if input_image is None:
121
  raise gr.Error("Please upload an image to edit.")
122
 
123
- # Dynamically set the adapter
124
  if lora_adapter == "Photo-to-Anime":
125
  pipe.set_adapters(["anime"], adapter_weights=[1.0])
126
  elif lora_adapter == "Multiple-Angles":
@@ -136,7 +130,6 @@ def infer(
136
  generator = torch.Generator(device=device).manual_seed(seed)
137
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
138
 
139
- # *** FIX: Get dimensions directly from the input image to preserve aspect ratio ***
140
  original_image = input_image.convert("RGB")
141
  width, height = original_image.size
142
 
@@ -153,18 +146,15 @@ def infer(
153
 
154
  return result, seed
155
 
156
- # --- Wrapper for Examples (Corrected) ---
157
  @spaces.GPU
158
  def infer_example(input_image, prompt, lora_adapter):
159
  input_pil = input_image.convert("RGB")
160
- # Set reasonable default values for example inference
161
  guidance_scale = 1.0
162
  steps = 4
163
- # Call the main infer function
164
  result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
165
  return result, seed
166
 
167
- # --- UI Layout ---
168
  css="""
169
  #col-container {
170
  margin: 0 auto;
@@ -226,13 +216,10 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
226
  label="Examples"
227
  )
228
 
229
- # --- Event Handlers (Corrected) ---
230
  run_button.click(
231
  fn=infer,
232
  inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
233
  outputs=[output_image, seed]
234
  )
235
 
236
- # No longer need the upload handler for dimensions
237
-
238
- demo.launch()
 
6
  import random
7
  from PIL import Image
8
  from typing import Iterable
 
 
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
11
 
 
65
 
66
  steel_blue_theme = SteelBlueTheme()
67
 
 
68
  from diffusers import FlowMatchEulerDiscreteScheduler
69
  # from optimization import optimize_pipeline_ # Assuming this is a custom file, if not available, comment out the call
70
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
 
85
  torch_dtype=dtype
86
  ).to(device)
87
 
 
88
  pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
89
  weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
90
  adapter_name="anime")
 
101
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
102
  MAX_SEED = np.iinfo(np.int32).max
103
 
 
104
  @spaces.GPU
105
  def infer(
106
  input_image,
 
115
  if input_image is None:
116
  raise gr.Error("Please upload an image to edit.")
117
 
 
118
  if lora_adapter == "Photo-to-Anime":
119
  pipe.set_adapters(["anime"], adapter_weights=[1.0])
120
  elif lora_adapter == "Multiple-Angles":
 
130
  generator = torch.Generator(device=device).manual_seed(seed)
131
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
132
 
 
133
  original_image = input_image.convert("RGB")
134
  width, height = original_image.size
135
 
 
146
 
147
  return result, seed
148
 
 
149
  @spaces.GPU
150
  def infer_example(input_image, prompt, lora_adapter):
151
  input_pil = input_image.convert("RGB")
 
152
  guidance_scale = 1.0
153
  steps = 4
 
154
  result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
155
  return result, seed
156
 
157
+
158
  css="""
159
  #col-container {
160
  margin: 0 auto;
 
216
  label="Examples"
217
  )
218
 
 
219
  run_button.click(
220
  fn=infer,
221
  inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
222
  outputs=[output_image, seed]
223
  )
224
 
225
+ demo.launch(mcp_server=True, ssr_mode=False, show_error=True)