LPX55's picture
Update app_local.py
3a64b85 verified
raw
history blame
15 kB
import gradio as gr
import numpy as np
import random
import torch
import spaces
from PIL import Image
from diffusers import QwenImageEditPipeline
from diffusers.utils import is_xformers_available
import os
import re
import gc
import json # Added json import
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
#############################
os.environ.setdefault('GRADIO_ANALYTICS_ENABLED', 'False')
os.environ.setdefault('HF_HUB_DISABLE_TELEMETRY', '1')
# Model configuration
REWRITER_MODEL = "Qwen/Qwen1.5-7B-Chat" # Upgraded to 7B for better JSON handling
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
# Preload enhancement model at startup
print("🔄 Loading prompt enhancement model...")
rewriter_tokenizer = AutoTokenizer.from_pretrained(REWRITER_MODEL)
# Quantization configuration
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True
)
rewriter_model = AutoModelForCausalLM.from_pretrained(
REWRITER_MODEL,
torch_dtype=dtype,
device_map="auto",
quantization_config=bnb_config,
max_memory={0: "48GiB"}, # Reserve adequate memory
)
print("✅ Enhancement model loaded and ready!")
SYSTEM_PROMPT_EDIT = '''
# Edit Instruction Rewriter
You are a professional edit instruction rewriter. Your task is to generate a precise, concise, and visually achievable instruction based on the user's intent and the input image.
## 1. General Principles
- Keep the rewritten instruction **concise** and clear.
- Avoid contradictions, vagueness, or unachievable instructions.
- Maintain the core logic of the original instruction; only enhance clarity and feasibility.
- Ensure new added elements or modifications align with the image's original context and art style.
## 2. Task Types
### Add, Delete, Replace:
- When the input is detailed, only refine grammar and clarity.
- For vague instructions, infer minimal but sufficient details.
- For replacement, use the format: `"Replace X with Y"`.
### Text Editing (e.g., text replacement):
- Enclose text content in quotes, e.g., `Replace "abc" with "xyz"`.
- Preserving the original structure and language—**do not translate** or alter style.
### Human Editing (e.g., change a person’s face/hair):
- Preserve core visual identity (gender, ethnic features).
- Describe expressions in subtle and natural terms.
- Maintain key clothing or styling details unless explicitly replaced.
### Style Transformation:
- If a style is specified, e.g., `Disco style`, rewrite it to encapsulate the essential visual traits.
- Use a fixed template for **coloring/restoration**:
`"Restore old photograph, remove scratches, reduce noise, enhance details, high resolution, realistic, natural skin tones, clear facial features, no distortion, vintage photo restoration"`
if applicable.
## 4. Output Format
Please provide the rewritten instruction in a clean `json` format as:
{
"Rewritten": "..."
}
'''
def extract_json_response(model_output: str) -> str:
"""Extract rewritten instruction from potentially messy JSON output"""
# New: Remove code block markers first
model_output = re.sub(r'```(?:json)?\s*', '', model_output)
try:
# Try to find the JSON portion in the output
start_idx = model_output.find('{')
end_idx = model_output.rfind('}')
if start_idx == -1 or end_idx == -1:
return None
# Expand to the full object including outer braces
end_idx += 1 # Include the closing brace
json_str = model_output[start_idx:end_idx]
# Improved quote handling for values
json_str = re.sub(r'(\w+)\s*:', r'"\1":', json_str) # Quote keys
json_str = re.sub(r':\s*([^"\s{[]+)', r': "\1"', json_str) # Quote unquoted string values
# Parse JSON
data = json.loads(json_str)
# Extract rewritten prompt from possible key variations
possible_keys = [
"Rewritten", "rewritten", "Rewrited", "rewrited", "Rewrittent",
"Output", "output", "Enhanced", "enhanced"
]
for key in possible_keys:
if key in data:
return data[key].strip()
# Try nested path
if "Response" in data and "Rewritten" in data["Response"]:
return data["Response"]["Rewritten"].strip()
# Handle nested JSON objects (additional protection)
if isinstance(data, dict):
for value in data.values():
if isinstance(value, dict) and "Rewritten" in value:
return value["Rewritten"].strip()
# Try to find any string value that looks like an instruction
str_values = [v for v in data.values() if isinstance(v, str) and 10 < len(v) < 500]
if str_values:
return str_values[0].strip()
except Exception as e:
print(f"JSON parse error: {str(e)}")
return None
def polish_prompt(original_prompt: str) -> str:
"""Enhanced prompt rewriting using original system prompt with JSON handling"""
load_rewriter()
# Format as Qwen chat
messages = [
{"role": "system", "content": SYSTEM_PROMPT_EDIT},
{"role": "user", "content": original_prompt}
]
text = rewriter_tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = rewriter_tokenizer(text, return_tensors="pt").to(device)
with torch.no_grad():
generated_ids = rewriter_model.generate(
**model_inputs,
max_new_tokens=256, # Reduced for better quality
do_sample=True,
temperature=0.5, # Less creative but more focused
top_p=0.9,
no_repeat_ngram_size=3,
pad_token_id=rewriter_tokenizer.eos_token_id
)
# Extract and clean response
enhanced = rewriter_tokenizer.decode(
generated_ids[0][model_inputs.input_ids.shape[1]:],
skip_special_tokens=True
).strip()
# New: Last-resort JSON content extraction
json_str = enhanced
if '```' in enhanced:
parts = enhanced.split('```')
if len(parts) >= 3:
json_str = parts[1] # Take content between first set of ```
# Try to extract JSON content
rewritten_prompt = extract_json_response(json_str if '```' in enhanced else enhanced)
if rewritten_prompt:
# Clean up remaining artifacts
rewritten_prompt = re.sub(r'(Replace|Change|Add) "(.*?)"', r'\1 \2', rewritten_prompt)
rewritten_prompt = rewritten_prompt.replace('\\"', '"').replace('\\n', ' ')
return rewritten_prompt
# Fallback cleanup if JSON extraction fails
if '```' in enhanced:
# Extract content from code blocks
parts = enhanced.split('```')
if len(parts) >= 3:
rewritten_prompt = parts[1].strip()
else:
rewritten_prompt = enhanced
else:
rewritten_prompt = enhanced
# Improved cleaning of fallback output
rewritten_prompt = re.sub(r'.*{.*}.*', '', rewritten_prompt)
rewritten_prompt = re.sub(r'\s\s+', ' ', rewritten_prompt).strip()
if ': ' in rewritten_prompt:
rewritten_prompt = rewritten_prompt.split(': ', 1)[-1].strip()
return rewritten_prompt[:200] # Ensure reasonable length
# Load main image editing pipeline
pipe = QwenImageEditPipeline.from_pretrained(
"Qwen/Qwen-Image-Edit",
torch_dtype=dtype
).to(device)
# Load LoRA weights for acceleration
pipe.load_lora_weights(
"lightx2v/Qwen-Image-Lightning",
weight_name="Qwen-Image-Lightning-8steps-V1.1.safetensors"
)
pipe.fuse_lora()
if is_xformers_available():
pipe.enable_xformers_memory_efficient_attention()
else:
print("xformers not available")
# def unload_rewriter():
# """Clear enhancement model from memory"""
# global rewriter_tokenizer, rewriter_model
# if rewriter_model:
# del rewriter_tokenizer, rewriter_model
# rewriter_tokenizer = None
# rewriter_model = None
# torch.cuda.empty_cache()
# gc.collect()
@spaces.GPU(duration=60)
def infer(
image,
prompt,
seed=42,
randomize_seed=False,
true_guidance_scale=1.0,
num_inference_steps=8,
rewrite_prompt=False,
num_images_per_prompt=1,
):
"""Image editing endpoint with optimized prompt handling"""
original_prompt = prompt
prompt_info = ""
# Handle prompt rewriting
if rewrite_prompt:
try:
enhanced_instruction = polish_prompt(original_prompt)
prompt_info = (
f"<div style='margin:10px; padding:15px; border-radius:8px; border-left:4px solid #4CAF50; background: #f5f9fe'>"
f"<h4 style='margin-top: 0;'>🚀 Prompt Enhancement</h4>"
f"<p><strong>Original:</strong> {original_prompt}</p>"
f"<p><strong style='color:#2E7D32;'>Enhanced:</strong> {enhanced_instruction}</p>"
f"</div>"
)
prompt = enhanced_instruction
except Exception as e:
gr.Warning(f"Prompt enhancement failed: {str(e)}")
prompt_info = (
f"<div style='margin:10px; padding:15px; border-radius:8px; border-left:4px solid #FF5252; background: #fef5f5'>"
f"<h4 style='margin-top: 0;'>⚠️ Enhancement Not Applied</h4>"
f"<p>Using original prompt. Error: {str(e)[:100]}</p>"
f"</div>"
)
else:
prompt_info = (
f"<div style='margin:10px; padding:10px; border-radius:8px; background: #f8f9fa'>"
f"<h4 style='margin-top: 0;'>📝 Original Prompt</h4>"
f"<p>{original_prompt}</p>"
f"</div>"
)
# Set seed for reproducibility
seed_val = seed if not randomize_seed else random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed_val)
try:
# Generate images
edited_images = pipe(
image=image,
prompt=prompt,
negative_prompt=" ",
num_inference_steps=num_inference_steps,
generator=generator,
true_cfg_scale=true_guidance_scale,
num_images_per_prompt=num_images_per_prompt
).images
return edited_images, seed_val, prompt_info
except Exception as e:
gr.Error(f"Image generation failed: {str(e)}")
return [], seed_val, (
f"<div style='margin:10px; padding:15px; border-radius:8px; border-left:4px solid #dd2c00; background: #fef5f5'>"
f"<h4 style='margin-top: 0;'>⚠️ Processing Error</h4>"
f"<p>{str(e)[:200]}</p>"
f"</div>"
)
MAX_SEED = np.iinfo(np.int32).max
with gr.Blocks(title="Qwen Image Editor Fast") as demo:
gr.Markdown("""
<div style="text-align: center; background: linear-gradient(to right, #3a7bd5, #00d2ff); color: white; padding: 20px; border-radius: 8px;">
<h1 style="margin-bottom: 5px;">⚡️ Qwen-Image-Edit Lightning</h1>
<p>✨ 8-step inferencing with lightx2v's LoRA.")
<p>📝 Local Prompt Enhancement</p>
</div>
""")
with gr.Row(equal_height=True):
# Input Column
with gr.Column(scale=1):
input_image = gr.Image(
label="Source Image",
type="pil",
height=300
)
prompt = gr.Textbox(
label="Edit Instructions",
placeholder="e.g. Replace the background with a beach sunset...",
lines=2,
max_lines=4
)
with gr.Row():
rewrite_toggle = gr.Checkbox(
label="Enable Prompt Enhancement",
value=True,
interactive=True
)
run_button = gr.Button(
"Generate Edits",
variant="primary",
min_width=120
)
with gr.Accordion("Advanced Parameters", open=False):
with gr.Row():
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=42
)
randomize_seed = gr.Checkbox(
label="Random Seed",
value=True
)
with gr.Row():
true_guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1.0,
maximum=5.0,
step=0.1,
value=1.0
)
num_inference_steps = gr.Slider(
label="Inference Steps",
minimum=4,
maximum=16,
step=1,
value=8
)
num_images_per_prompt = gr.Slider(
label="Output Count",
minimum=1,
maximum=4,
step=1,
value=1
)
# Output Column
with gr.Column(scale=1):
result = gr.Gallery(
label="Edited Images",
columns=lambda x: min(x, 2),
height=500,
object_fit="cover",
preview=True
)
prompt_info = gr.HTML(
value="<div style='padding:15px; background:#f8f9fa; border-radius:8px; margin-top:15px'>"
"Prompt details will appear after generation</div>"
)
# # Examples
# gr.Examples(
# examples=[
# "Change the background scene to a rooftop bar at night",
# "Transform to pixel art style with 8-bit graphics",
# "Replace all text with 'Qwen AI' in futuristic font"
# ],
# inputs=[prompt],
# label="Sample Instructions",
# cache_examples=True
# )
# Set up processing
inputs = [
input_image,
prompt,
seed,
randomize_seed,
true_guidance_scale,
num_inference_steps,
rewrite_toggle,
num_images_per_prompt
]
outputs = [result, seed, prompt_info]
run_button.click(
fn=infer,
inputs=inputs,
outputs=outputs
)
prompt.submit(
fn=infer,
inputs=inputs,
outputs=outputs
)
if __name__ == "__main__":
demo.launch()