Spaces:
Running
Running
File size: 4,520 Bytes
0fa7e54 a6c97db 0fa7e54 a6c97db a3dae99 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db 0fa7e54 a6c97db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import gradio as gr
import torch
from PIL import Image, ImageDraw, ImageFont
from diffusers import QwenImageEditPlusPipeline
import os
# Load the pipeline with CPU optimizations
model_id = "unsloth/Qwen-Image-Edit-2511-GGUF"
device = "cpu" # Force CPU usage
print(f"Loading pipeline on {device}...")
try:
pipeline = QwenImageEditPlusPipeline.from_pretrained(
model_id,
torch_dtype=torch.float32, # Use float32 for CPU
low_cpu_mem_usage=True, # Reduce memory usage
device_map="cpu" # Explicitly map to CPU
)
pipeline.to(device)
# Enable CPU optimizations
pipeline.enable_attention_slicing()
print("Pipeline loaded successfully on CPU.")
except Exception as e:
print(f"Error loading pipeline: {e}")
pipeline = None
def create_default_background():
"""Create a simple notebook background if assets aren't available"""
width, height = 800, 1000
img = Image.new('RGB', (width, height), color='#f5f5dc') # Beige color
draw = ImageDraw.Draw(img)
# Draw horizontal lines
for y in range(100, height, 40):
draw.line([(50, y), (width - 50, y)], fill='#add8e6', width=1)
# Draw left margin
draw.line([(80, 0), (80, height)], fill='#ff69b4', width=2)
return img
def generate_notes_image(notes, background_image=None):
if pipeline is None:
return None, "Error: Pipeline not loaded. Please check the model availability."
if not notes:
return None, "Please enter some notes."
# Use default background if none provided
if background_image is None:
if os.path.exists("assets/notebook_blank.png"):
background_image = Image.open("assets/notebook_blank.png")
else:
background_image = create_default_background()
# Ensure image is RGB
if background_image.mode != 'RGB':
background_image = background_image.convert('RGB')
# Resize if too large (to reduce memory usage on CPU)
max_size = 768
if max(background_image.size) > max_size:
ratio = max_size / max(background_image.size)
new_size = tuple(int(dim * ratio) for dim in background_image.size)
background_image = background_image.resize(new_size, Image.Resampling.LANCZOS)
# Construct the prompt
prompt = f"Write the following notes on the notebook page in a neat, realistic handwriting style: {notes}"
inputs = {
"image": [background_image],
"prompt": prompt,
"true_cfg_scale": 4.0,
"num_inference_steps": 20, # Reduced for CPU performance
"guidance_scale": 1.0,
"num_images_per_prompt": 1,
}
try:
with torch.inference_mode():
output = pipeline(**inputs)
output_image = output.images[0]
return output_image, "Success! (Note: CPU inference may be slow)"
except Exception as e:
return None, f"Error during generation: {str(e)}"
# Gradio Interface
with gr.Blocks(title="Qwen Notes to Notebook") as demo:
gr.Markdown("# 📝 Qwen Notes to Notebook")
gr.Markdown("Transform your digital notes into a realistic notebook page using the **Qwen-Image-Edit-2511** model.")
gr.Markdown("⚠️ **Note**: Running on CPU. Generation may take several minutes per image.")
with gr.Row():
with gr.Column():
notes_input = gr.Textbox(
label="Enter your notes",
placeholder="Type your notes here...",
lines=10
)
bg_input = gr.Image(
label="Optional: Upload a custom notebook background",
type="pil"
)
generate_btn = gr.Button("Generate Notebook Image", variant="primary")
with gr.Column():
output_image = gr.Image(label="Generated Notebook Page")
status_text = gr.Textbox(label="Status", interactive=False)
generate_btn.click(
fn=generate_notes_image,
inputs=[notes_input, bg_input],
outputs=[output_image, status_text]
)
gr.Examples(
examples=[
["Meeting Agenda:\n1. Project Kickoff\n2. Budget Review\n3. Timeline Discussion", None],
["Shopping List:\n- Milk\n- Eggs\n- Bread\n- Coffee beans", None],
["To-do List:\n- Finish the report\n- Call the client\n- Gym at 6 PM", None]
],
inputs=[notes_input, bg_input]
)
if __name__ == "__main__":
demo.launch() |