linoyts HF Staff commited on
Commit
09a6cb1
·
verified ·
1 Parent(s): d23b320

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +294 -77
app.py CHANGED
@@ -1,87 +1,304 @@
1
- with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
2
- gr.HTML("<h1 style='text-align: center'>Qwen-Image with InstantX Inpainting ControlNet</style>")
3
- gr.Markdown(
4
- "Generate images with the [InstantX/Qwen-Image-ControlNet-Inpainting](https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting) that takes depth, pose and canny conditionings"
5
- )
6
- with gr.Row():
7
- with gr.Column():
8
- edit_image = gr.ImageEditor(
9
- label='Upload and draw mask for inpainting',
10
- type='pil',
11
- sources=["upload", "webcam"],
12
- image_mode='RGB',
13
- layers=False,
14
- brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"),
15
- height=600
16
- )
17
- prompt = gr.Text(
18
- label="Prompt",
19
- show_label=False,
20
- max_lines=1,
21
- placeholder="Enter your prompt (e.g., 'change the hat to red')",
22
- container=False,
23
- )
24
- negative_prompt = gr.Text(
25
- label="Negative Prompt",
26
- show_label=True,
27
- max_lines=1,
28
- placeholder="Enter what you don't want (optional)",
29
- container=False,
30
- value="",
31
- visible=False
32
- )
33
- run_button = gr.Button("Run")
34
-
35
- with gr.Column():
36
- result = gr.ImageSlider(label="Result", show_label=False, interactive=False)
37
- use_as_input_button = gr.Button("🔄 Use as Input Image", visible=False, variant="secondary")
38
-
39
- with gr.Accordion("Advanced Settings", open=False):
40
-
41
- seed = gr.Slider(
42
- label="Seed",
43
- minimum=0,
44
- maximum=MAX_SEED,
45
- step=1,
46
- value=42,
 
 
 
47
  )
48
 
49
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  with gr.Row():
53
- strength = gr.Slider(
54
- label="Conditioning Scale",
55
- minimum=0.0,
56
- maximum=1.0,
57
- step=0.1,
58
- value=1.0,
59
- info="Controls how much the inpainted region should change"
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- true_cfg_scale = gr.Slider(
63
- label="True CFG Scale",
64
- minimum=1.0,
65
- maximum=10.0,
66
- step=0.5,
67
- value=4.0,
68
- info="Classifier-free guidance scale"
69
- )
70
-
71
- num_inference_steps = gr.Slider(
72
- label="Number of inference steps",
73
- minimum=1,
74
- maximum=50,
75
  step=1,
76
- value=30,
77
  )
78
 
79
- rewrite_prompt = gr.Checkbox(
80
- label="Enhance prompt (using HF Inference)",
81
- value=True
82
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
- # Event handlers for reuse functionality (MUST be inside gr.Blocks context with 4 spaces)
85
  use_as_input_button.click(
86
  fn=use_output_as_input,
87
  inputs=[result],
@@ -97,9 +314,9 @@ with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
97
  outputs=result,
98
  show_api=False
99
  ).then(
100
- fn=infer,
101
- inputs=[edit_image, prompt, negative_prompt, seed, randomize_seed, strength, num_inference_steps, true_cfg_scale, rewrite_prompt],
102
- outputs=[result, seed]
103
  ).then(
104
  fn=lambda: gr.update(visible=True),
105
  inputs=None,
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import spaces
4
+ import torch
5
+ import random
6
+ import os
7
+
8
+ # from diffusers import QwenImageEditInpaintPipeline
9
+ from optimization import optimize_pipeline_
10
+ from diffusers.utils import load_image
11
+
12
+ from diffusers import QwenImageControlNetModel, QwenImageControlNetInpaintPipeline
13
+
14
+ import math
15
+ from huggingface_hub import InferenceClient
16
+
17
+ from PIL import Image
18
+
19
+ # Set environment variable for parallel loading
20
+ # os.environ["HF_ENABLE_PARALLEL_LOADING"] = "YES"
21
+
22
+ # --- Prompt Enhancement using Hugging Face InferenceClient ---
23
+ def polish_prompt_hf(original_prompt, system_prompt):
24
+ """
25
+ Rewrites the prompt using a Hugging Face InferenceClient.
26
+ """
27
+ # Ensure HF_TOKEN is set
28
+ api_key = os.environ.get("HF_TOKEN")
29
+ if not api_key:
30
+ print("Warning: HF_TOKEN not set. Falling back to original prompt.")
31
+ return original_prompt
32
+
33
+ try:
34
+ # Initialize the client
35
+ client = InferenceClient(
36
+ provider="cerebras",
37
+ api_key=api_key,
38
+ )
39
+
40
+ # Format the messages for the chat completions API
41
+ messages = [
42
+ {"role": "system", "content": system_prompt},
43
+ {"role": "user", "content": original_prompt}
44
+ ]
45
+
46
+ # Call the API
47
+ completion = client.chat.completions.create(
48
+ model="Qwen/Qwen3-235B-A22B-Instruct-2507",
49
+ messages=messages,
50
  )
51
 
52
+ # Parse the response
53
+ result = completion.choices[0].message.content
54
 
55
+ # Try to extract JSON if present
56
+ if '{"Rewritten"' in result:
57
+ try:
58
+ # Clean up the response
59
+ result = result.replace('```json', '').replace('```', '')
60
+ result_json = json.loads(result)
61
+ polished_prompt = result_json.get('Rewritten', result)
62
+ except:
63
+ polished_prompt = result
64
+ else:
65
+ polished_prompt = result
66
+
67
+ polished_prompt = polished_prompt.strip().replace("\n", " ")
68
+ return polished_prompt
69
 
70
+ except Exception as e:
71
+ print(f"Error during API call to Hugging Face: {e}")
72
+ # Fallback to original prompt if enhancement fails
73
+ return original_prompt
74
+
75
+
76
+ def polish_prompt(prompt, img):
77
+ """
78
+ Main function to polish prompts for image editing using HF inference.
79
+ """
80
+ SYSTEM_PROMPT = '''
81
+ # Edit Instruction Rewriter
82
+ You are a professional edit instruction rewriter. Your task is to generate a precise, concise, and visually achievable professional-level edit instruction based on the user-provided instruction and the image to be edited.
83
+ Please strictly follow the rewriting rules below:
84
+ ## 1. General Principles
85
+ - Keep the rewritten prompt **concise**. Avoid overly long sentences and reduce unnecessary descriptive language.
86
+ - If the instruction is contradictory, vague, or unachievable, prioritize reasonable inference and correction, and supplement details when necessary.
87
+ - Keep the core intention of the original instruction unchanged, only enhancing its clarity, rationality, and visual feasibility.
88
+ - All added objects or modifications must align with the logic and style of the edited input image's overall scene.
89
+ ## 2. Task Type Handling Rules
90
+ ### 1. Add, Delete, Replace Tasks
91
+ - If the instruction is clear (already includes task type, target entity, position, quantity, attributes), preserve the original intent and only refine the grammar.
92
+ - If the description is vague, supplement with minimal but sufficient details (category, color, size, orientation, position, etc.). For example:
93
+ > Original: "Add an animal"
94
+ > Rewritten: "Add a light-gray cat in the bottom-right corner, sitting and facing the camera"
95
+ - Remove meaningless instructions: e.g., "Add 0 objects" should be ignored or flagged as invalid.
96
+ - For replacement tasks, specify "Replace Y with X" and briefly describe the key visual features of X.
97
+ ### 2. Text Editing Tasks
98
+ - All text content must be enclosed in English double quotes " ". Do not translate or alter the original language of the text, and do not change the capitalization.
99
+ - **For text replacement tasks, always use the fixed template:**
100
+ - Replace "xx" to "yy".
101
+ - Replace the xx bounding box to "yy".
102
+ - If the user does not specify text content, infer and add concise text based on the instruction and the input image's context. For example:
103
+ > Original: "Add a line of text" (poster)
104
+ > Rewritten: "Add text "LIMITED EDITION" at the top center with slight shadow"
105
+ - Specify text position, color, and layout in a concise way.
106
+ ### 3. Human Editing Tasks
107
+ - Maintain the person's core visual consistency (ethnicity, gender, age, hairstyle, expression, outfit, etc.).
108
+ - If modifying appearance (e.g., clothes, hairstyle), ensure the new element is consistent with the original style.
109
+ - **For expression changes, they must be natural and subtle, never exaggerated.**
110
+ - If deletion is not specifically emphasized, the most important subject in the original image (e.g., a person, an animal) should be preserved.
111
+ - For background change tasks, emphasize maintaining subject consistency at first.
112
+ - Example:
113
+ > Original: "Change the person's hat"
114
+ > Rewritten: "Replace the man's hat with a dark brown beret; keep smile, short hair, and gray jacket unchanged"
115
+ ### 4. Style Transformation or Enhancement Tasks
116
+ - If a style is specified, describe it concisely with key visual traits. For example:
117
+ > Original: "Disco style"
118
+ > Rewritten: "1970s disco: flashing lights, disco ball, mirrored walls, colorful tones"
119
+ - If the instruction says "use reference style" or "keep current style," analyze the input image, extract main features (color, composition, texture, lighting, art style), and integrate them concisely.
120
+ - **For coloring tasks, including restoring old photos, always use the fixed template:** "Restore old photograph, remove scratches, reduce noise, enhance details, high resolution, realistic, natural skin tones, clear facial features, no distortion, vintage photo restoration"
121
+ - If there are other changes, place the style description at the end.
122
+ ## 3. Rationality and Logic Checks
123
+ - Resolve contradictory instructions: e.g., "Remove all trees but keep all trees" should be logically corrected.
124
+ - Add missing key information: if position is unspecified, choose a reasonable area based on composition (near subject, empty space, center/edges).
125
+ # Output Format
126
+ Return only the rewritten instruction text directly, without JSON formatting or any other wrapper.
127
+ '''
128
+
129
+ # Note: We're not actually using the image in the HF version,
130
+ # but keeping the interface consistent
131
+ full_prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {prompt}\n\nRewritten Prompt:"
132
+
133
+ return polish_prompt_hf(full_prompt, SYSTEM_PROMPT)
134
+
135
+
136
+ MAX_SEED = np.iinfo(np.int32).max
137
+ MAX_IMAGE_SIZE = 2048
138
+
139
+ # --- Helper functions for reuse feature ---
140
+ def clear_result():
141
+ """Clears the result image."""
142
+ return gr.update(value=None)
143
+
144
+ def use_output_as_input(output_image):
145
+ """Sets the generated output as the new input image."""
146
+ if output_image is not None:
147
+ return gr.update(value=output_image[1])
148
+ return gr.update()
149
+
150
+
151
+ base_model = "Qwen/Qwen-Image"
152
+ controlnet_model = "InstantX/Qwen-Image-ControlNet-Inpainting"
153
+
154
+ controlnet = QwenImageControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
155
+
156
+ pipe = QwenImageControlNetInpaintPipeline.from_pretrained(
157
+ base_model, controlnet=controlnet, torch_dtype=torch.bfloat16
158
+ )
159
+ pipe.to("cuda")
160
+
161
+
162
+ @spaces.GPU(duration=120)
163
+ def infer(edit_images,
164
+ prompt,
165
+ negative_prompt=" ",
166
+ seed=42,
167
+ randomize_seed=False,
168
+ strength=1.0,
169
+ num_inference_steps=30,
170
+ true_cfg_scale=4.0,
171
+ rewrite_prompt=True,
172
+ progress=gr.Progress(track_tqdm=True)):
173
+
174
+ image = edit_images["background"]
175
+ mask = edit_images["layers"][0]
176
+
177
+ if randomize_seed:
178
+ seed = random.randint(0, MAX_SEED)
179
+
180
+ if rewrite_prompt:
181
+ prompt = polish_prompt(prompt, image)
182
+ print(f"Rewritten Prompt: {prompt}")
183
+
184
+ # Generate image using Qwen pipeline
185
+ result_image = pipe(
186
+ prompt=prompt,
187
+ negative_prompt=negative_prompt,
188
+ control_image=image,
189
+ control_mask=mask_image,
190
+ controlnet_conditioning_scale=strength,
191
+ num_inference_steps=num_inference_steps,
192
+ true_cfg_scale=true_cfg_scale,
193
+ generator=torch.Generator(device="cuda").manual_seed(seed)
194
+ ).images[0]
195
+
196
+ return [image,result_image], seed
197
+
198
+ examples = [
199
+ "change the hat to red",
200
+ "make the background a beautiful sunset",
201
+ "replace the object with a flower vase",
202
+ ]
203
+
204
+ css = """
205
+ #col-container {
206
+ margin: 0 auto;
207
+ max-width: 1024px;
208
+ }
209
+ #logo-title {
210
+ text-align: center;
211
+ }
212
+ #logo-title img {
213
+ width: 400px;
214
+ }
215
+ #edit_text{margin-top: -62px !important}
216
+ """
217
+
218
+
219
+ with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
220
+ gr.HTML("<h1 style='text-align: center'>Qwen-Image with InstantX Inpainting ControlNet</style>")
221
+ gr.Markdown(
222
+ "Generate images with the [InstantX/Qwen-Image-ControlNet-Inpainting](https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting) that takes depth, pose and canny conditionings"
223
+ )
224
  with gr.Row():
225
+ with gr.Column():
226
+ edit_image = gr.ImageEditor(
227
+ label='Upload and draw mask for inpainting',
228
+ type='pil',
229
+ sources=["upload", "webcam"],
230
+ image_mode='RGB',
231
+ layers=False,
232
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"),
233
+ height=600
234
+ )
235
+ prompt = gr.Text(
236
+ label="Prompt",
237
+ show_label=False,
238
+ max_lines=1,
239
+ placeholder="Enter your prompt (e.g., 'change the hat to red')",
240
+ container=False,
241
+ )
242
+ negative_prompt = gr.Text(
243
+ label="Negative Prompt",
244
+ show_label=True,
245
+ max_lines=1,
246
+ placeholder="Enter what you don't want (optional)",
247
+ container=False,
248
+ value="",
249
+ visible=False
250
+ )
251
+ run_button = gr.Button("Run")
252
+
253
+ with gr.Column():
254
+ result = gr.ImageSlider(label="Result", show_label=False, interactive=False)
255
+ use_as_input_button = gr.Button("🔄 Use as Input Image", visible=False, variant="secondary")
256
+
257
+ with gr.Accordion("Advanced Settings", open=False):
258
 
259
+ seed = gr.Slider(
260
+ label="Seed",
261
+ minimum=0,
262
+ maximum=MAX_SEED,
 
 
 
 
 
 
 
 
 
263
  step=1,
264
+ value=42,
265
  )
266
 
267
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
268
+
269
+
270
+ with gr.Row():
271
+ strength = gr.Slider(
272
+ label="Conditioning Scale",
273
+ minimum=0.0,
274
+ maximum=1.0,
275
+ step=0.1,
276
+ value=1.0,
277
+ info="Controls how much the inpainted region should change"
278
+ )
279
+
280
+ true_cfg_scale = gr.Slider(
281
+ label="True CFG Scale",
282
+ minimum=1.0,
283
+ maximum=10.0,
284
+ step=0.5,
285
+ value=4.0,
286
+ info="Classifier-free guidance scale"
287
+ )
288
+
289
+ num_inference_steps = gr.Slider(
290
+ label="Number of inference steps",
291
+ minimum=1,
292
+ maximum=50,
293
+ step=1,
294
+ value=30,
295
+ )
296
+ rewrite_prompt = gr.Checkbox(
297
+ label="Enhance prompt (using HF Inference)",
298
+ value=True
299
+ )
300
 
301
+ # Event handlers for reuse functionality
302
  use_as_input_button.click(
303
  fn=use_output_as_input,
304
  inputs=[result],
 
314
  outputs=result,
315
  show_api=False
316
  ).then(
317
+ fn = infer,
318
+ inputs = [edit_image, prompt, negative_prompt, seed, randomize_seed, strength, num_inference_steps, true_cfg_scale, rewrite_prompt],
319
+ outputs = [result, seed]
320
  ).then(
321
  fn=lambda: gr.update(visible=True),
322
  inputs=None,