naykun's picture
init update
584b08a
import gradio as gr
import numpy as np
import random
import torch
import spaces
from PIL import Image
from diffusers import QwenImageEditPlusPipeline
import os
import base64
import json
from huggingface_hub import login
login(token=os.environ.get('hf'))
SYSTEM_PROMPT = '''
# Edit Prompt Enhancer
You are a professional edit prompt enhancer. Your task is to generate a direct and specific edit prompt based on the user-provided instruction and the image input conditions.
Please strictly follow the enhancing rules below:
## 1. General Principles
- Keep the enhanced prompt **direct and specific**.
- If the instruction is contradictory, vague, or unachievable, prioritize reasonable inference and correction, and supplement details when necessary.
- Keep the core intention of the original instruction unchanged, only enhancing its clarity, rationality, and visual feasibility.
- All added objects or modifications must align with the logic and style of the edited input image’s overall scene.
## 2. Task-Type Handling Rules
### 1. Add, Delete, Replace Tasks
- If the instruction is clear (already includes task type, target entity, position, quantity, attributes), preserve the original intent and only refine the grammar.
- If the description is vague, supplement with minimal but sufficient details (category, color, size, orientation, position, etc.). For example:
> Original: "Add an animal"
> Rewritten: "Add a light-gray cat in the bottom-right corner, sitting and facing the camera"
- Remove meaningless instructions: e.g., "Add 0 objects" should be ignored or flagged as invalid.
- For replacement tasks, specify "Replace Y with X" and briefly describe the key visual features of X.
### 2. Text Editing Tasks
- All text content must be enclosed in English double quotes `" "`. Keep the original language of the text, and keep the capitalization.
- Both adding new text and replacing existing text are text replacement tasks, For example:
- Replace "xx" to "yy"
- Replace the mask / bounding box to "yy"
- Replace the visual object to "yy"
- Specify text position, color, and layout only if user has required.
- If font is specified, keep the original language of the font.
### 3. Human (ID) Editing Tasks
- Emphasize maintaining the person’s core visual consistency (ethnicity, gender, age, hairstyle, expression, outfit, etc.).
- If modifying appearance (e.g., clothes, hairstyle), ensure the new element is consistent with the original style.
- **For expression changes / beauty / make up changes, they must be natural and subtle, never exaggerated.**
- Example:
> Original: "Change the person’s hat"
> Rewritten: "Replace the man’s hat with a dark brown beret; keep smile, short hair, and gray jacket unchanged"
### 4. Style Conversion or Enhancement Tasks
- If a style is specified, describe it concisely using key visual features. For example:
> Original: "Disco style"
> Rewritten: "1970s disco style: flashing lights, disco ball, mirrored walls, colorful tones"
- For style reference, analyze the original image and extract key characteristics (color, composition, texture, lighting, artistic style, etc.), integrating them into the instruction.
- **Colorization tasks (including old photo restoration) must use the fixed template:**
"Restore and colorize the photo."
- Clearly specify the object to be modified. For example:
> Original: Modify the subject in Picture 1 to match the style of Picture 2.
> Rewritten: Change the girl in Picture 1 to the ink-wash style of Picture 2 — rendered in black-and-white watercolor with soft color transitions.
- If there are other changes, place the style description at the end.
### 5. Content Filling Tasks
- For inpainting tasks, always use the fixed template: "Perform inpainting on this image. The original caption is: ".
- For outpainting tasks, always use the fixed template: ""Extend the image beyond its boundaries using outpainting. The original caption is: ".
### 6. Multi-Image Tasks
- Rewritten prompts must clearly point out which image’s element is being modified. For example:
> Original: "Replace the subject of picture 1 with the subject of picture 2"
> Rewritten: "Replace the girl of picture 1 with the boy of picture 2, keeping picture 2’s background unchanged"
- For stylization tasks, describe the reference image’s style in the rewritten prompt, while preserving the visual content of the source image.
## 3. Rationale and Logic Checks
- Resolve contradictory instructions: e.g., "Remove all trees but keep all trees" should be logically corrected.
- Add missing key information: e.g., if position is unspecified, choose a reasonable area based on composition (near subject, empty space, center/edge, etc.).
# Output Format Example
```json
{
"Rewritten": "..."
}
'''
def polish_prompt(prompt, img):
prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {prompt}\n\nRewritten Prompt:"
success=False
while not success:
try:
result = api(prompt, [img])
# print(f"Result: {result}")
# print(f"Polished Prompt: {polished_prompt}")
if isinstance(result, str):
result = result.replace('```json','')
result = result.replace('```','')
result = json.loads(result)
else:
result = json.loads(result)
polished_prompt = result['Rewritten']
polished_prompt = polished_prompt.strip()
polished_prompt = polished_prompt.replace("\n", " ")
success = True
except Exception as e:
print(f"[Warning] Error during API call: {e}")
return polished_prompt
def encode_image(pil_image):
import io
buffered = io.BytesIO()
pil_image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def api(prompt, img_list, model="qwen-vl-max-latest", kwargs={}):
import dashscope
api_key = os.environ.get('DASH_API_KEY')
if not api_key:
raise EnvironmentError("DASH_API_KEY is not set")
assert model in ["qwen-vl-max-latest"], f"Not implemented model {model}"
sys_promot = "you are a helpful assistant, you should provide useful answers to users."
messages = [
{"role": "system", "content": sys_promot},
{"role": "user", "content": []}]
for img in img_list:
messages[1]["content"].append(
{"image": f"data:image/png;base64,{encode_image(img)}"})
messages[1]["content"].append({"text": f"{prompt}"})
response_format = kwargs.get('response_format', None)
response = dashscope.MultiModalConversation.call(
api_key=api_key,
model=model, # For example, use qwen-plus here. You can change the model name as needed. Model list: https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages,
result_format='message',
response_format=response_format,
)
if response.status_code == 200:
return response.output.choices[0].message.content[0]['text']
else:
raise Exception(f'Failed to post: {response}')
# --- Model Loading ---
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load the model pipeline
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2511", torch_dtype=dtype).to(device)
# --- UI Constants and Helpers ---
MAX_SEED = np.iinfo(np.int32).max
# --- Main Inference Function (with hardcoded negative prompt) ---
@spaces.GPU(duration=180)
def infer(
images,
prompt,
seed=42,
randomize_seed=False,
true_guidance_scale=1.0,
num_inference_steps=50,
height=None,
width=None,
rewrite_prompt=True,
num_images_per_prompt=1,
progress=gr.Progress(track_tqdm=True),
):
"""
Generates an image using the local Qwen-Image diffusers pipeline.
"""
# Hardcode the negative prompt as requested
negative_prompt = " "
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Set up the generator for reproducibility
generator = torch.Generator(device=device).manual_seed(seed)
# Load input images into PIL Images
pil_images = []
if images is not None:
for item in images:
try:
if isinstance(item[0], Image.Image):
pil_images.append(item[0].convert("RGB"))
elif isinstance(item[0], str):
pil_images.append(Image.open(item[0]).convert("RGB"))
elif hasattr(item, "name"):
pil_images.append(Image.open(item.name).convert("RGB"))
except Exception:
continue
if height==256 and width==256:
height, width = None, None
print(f"Calling pipeline with prompt: '{prompt}'")
print(f"Negative Prompt: '{negative_prompt}'")
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
if rewrite_prompt and len(pil_images) > 0:
prompt = polish_prompt(prompt, pil_images[0])
print(f"Rewritten Prompt: {prompt}")
# Generate the image
image = pipe(
image=pil_images if len(pil_images) > 0 else None,
prompt=prompt,
height=height,
width=width,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
generator=generator,
true_cfg_scale=true_guidance_scale,
num_images_per_prompt=num_images_per_prompt,
).images
return image, seed
# --- Examples and UI Layout ---
examples = []
css = """
#col-container {
margin: 0 auto;
max-width: 1024px;
}
#edit_text{margin-top: -62px !important}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.HTML('<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Logo" width="400" style="display: block; margin: 0 auto;">')
gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.")
with gr.Row():
with gr.Column():
input_images = gr.Gallery(label="Input Images", show_label=False, type="pil", interactive=True)
# result = gr.Image(label="Result", show_label=False, type="pil")
result = gr.Gallery(label="Result", show_label=False, type="pil")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
placeholder="describe the edit instruction",
container=False,
)
run_button = gr.Button("Edit!", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
# Negative prompt UI element is removed here
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
true_guidance_scale = gr.Slider(
label="True guidance scale",
minimum=1.0,
maximum=10.0,
step=0.1,
value=4.0
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=40,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=2048,
step=8,
value=None,
)
width = gr.Slider(
label="Width",
minimum=256,
maximum=2048,
step=8,
value=None,
)
rewrite_prompt = gr.Checkbox(label="Rewrite prompt", value=True)
# gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
input_images,
prompt,
seed,
randomize_seed,
true_guidance_scale,
num_inference_steps,
height,
width,
rewrite_prompt,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()