ghost613's picture
Update app.py
a6c97db verified
import gradio as gr
import torch
from PIL import Image, ImageDraw, ImageFont
from diffusers import QwenImageEditPlusPipeline
import os
# Load the pipeline with CPU optimizations
model_id = "unsloth/Qwen-Image-Edit-2511-GGUF"
device = "cpu" # Force CPU usage
print(f"Loading pipeline on {device}...")
try:
pipeline = QwenImageEditPlusPipeline.from_pretrained(
model_id,
torch_dtype=torch.float32, # Use float32 for CPU
low_cpu_mem_usage=True, # Reduce memory usage
device_map="cpu" # Explicitly map to CPU
)
pipeline.to(device)
# Enable CPU optimizations
pipeline.enable_attention_slicing()
print("Pipeline loaded successfully on CPU.")
except Exception as e:
print(f"Error loading pipeline: {e}")
pipeline = None
def create_default_background():
"""Create a simple notebook background if assets aren't available"""
width, height = 800, 1000
img = Image.new('RGB', (width, height), color='#f5f5dc') # Beige color
draw = ImageDraw.Draw(img)
# Draw horizontal lines
for y in range(100, height, 40):
draw.line([(50, y), (width - 50, y)], fill='#add8e6', width=1)
# Draw left margin
draw.line([(80, 0), (80, height)], fill='#ff69b4', width=2)
return img
def generate_notes_image(notes, background_image=None):
if pipeline is None:
return None, "Error: Pipeline not loaded. Please check the model availability."
if not notes:
return None, "Please enter some notes."
# Use default background if none provided
if background_image is None:
if os.path.exists("assets/notebook_blank.png"):
background_image = Image.open("assets/notebook_blank.png")
else:
background_image = create_default_background()
# Ensure image is RGB
if background_image.mode != 'RGB':
background_image = background_image.convert('RGB')
# Resize if too large (to reduce memory usage on CPU)
max_size = 768
if max(background_image.size) > max_size:
ratio = max_size / max(background_image.size)
new_size = tuple(int(dim * ratio) for dim in background_image.size)
background_image = background_image.resize(new_size, Image.Resampling.LANCZOS)
# Construct the prompt
prompt = f"Write the following notes on the notebook page in a neat, realistic handwriting style: {notes}"
inputs = {
"image": [background_image],
"prompt": prompt,
"true_cfg_scale": 4.0,
"num_inference_steps": 20, # Reduced for CPU performance
"guidance_scale": 1.0,
"num_images_per_prompt": 1,
}
try:
with torch.inference_mode():
output = pipeline(**inputs)
output_image = output.images[0]
return output_image, "Success! (Note: CPU inference may be slow)"
except Exception as e:
return None, f"Error during generation: {str(e)}"
# Gradio Interface
with gr.Blocks(title="Qwen Notes to Notebook") as demo:
gr.Markdown("# 📝 Qwen Notes to Notebook")
gr.Markdown("Transform your digital notes into a realistic notebook page using the **Qwen-Image-Edit-2511** model.")
gr.Markdown("⚠️ **Note**: Running on CPU. Generation may take several minutes per image.")
with gr.Row():
with gr.Column():
notes_input = gr.Textbox(
label="Enter your notes",
placeholder="Type your notes here...",
lines=10
)
bg_input = gr.Image(
label="Optional: Upload a custom notebook background",
type="pil"
)
generate_btn = gr.Button("Generate Notebook Image", variant="primary")
with gr.Column():
output_image = gr.Image(label="Generated Notebook Page")
status_text = gr.Textbox(label="Status", interactive=False)
generate_btn.click(
fn=generate_notes_image,
inputs=[notes_input, bg_input],
outputs=[output_image, status_text]
)
gr.Examples(
examples=[
["Meeting Agenda:\n1. Project Kickoff\n2. Budget Review\n3. Timeline Discussion", None],
["Shopping List:\n- Milk\n- Eggs\n- Bread\n- Coffee beans", None],
["To-do List:\n- Finish the report\n- Call the client\n- Gym at 6 PM", None]
],
inputs=[notes_input, bg_input]
)
if __name__ == "__main__":
demo.launch()