milliyin's picture
Update app.py
6da7199 verified
import torch
import gradio as gr
from diffusers import FluxPipeline
import os
from PIL import Image
import gc
# Configuration
BASE_MODEL = "black-forest-labs/FLUX.1-dev"
LORA_REPO = "milliyin/art_characters_lora_flux_nf4"
# Global pipeline variable
pipe = None
def load_pipeline():
"""Load the pipeline with LoRA weights optimized for CPU"""
global pipe
if pipe is None:
print("Loading base model for CPU inference...")
# CPU optimizations
pipe = FluxPipeline.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float32, # Use float32 for CPU
use_safetensors=True,
low_cpu_mem_usage=True,
device_map="auto"
)
print("Loading LoRA weights...")
pipe.load_lora_weights(LORA_REPO)
# CPU optimizations
pipe.to("cpu")
pipe.enable_sequential_cpu_offload() # Memory optimization
print("Pipeline loaded successfully for CPU!")
return pipe
def generate_pixel_art(
hair_color="blue",
clothing="purple apron",
custom_prompt="",
negative_prompt="blurry, low quality, distorted, ugly, nsfw",
width=384, # Reduced for CPU
height=384, # Reduced for CPU
guidance_scale=7.5,
num_steps=20, # Reduced for CPU
seed=-1
):
"""Generate pixel art character based on parameters - CPU optimized"""
try:
# Memory cleanup
gc.collect()
# Load pipeline
pipeline = load_pipeline()
# Build prompt
if custom_prompt.strip():
prompt = custom_prompt
else:
prompt = f"pixel art character, with {hair_color} hair, wearing {clothing}, holding no weapon, facing forward"
# Handle random seed
if seed == -1:
seed = torch.randint(0, 2**32 - 1, (1,)).item()
# Set seed
torch.manual_seed(seed)
print(f"Generating with prompt: {prompt}")
print(f"Seed: {seed}")
print("⚠️ CPU generation may take 5-15 minutes...")
# Generate image with CPU optimizations
with torch.inference_mode():
with torch.autocast("cpu", enabled=False): # Disable autocast for CPU
image = pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=num_steps,
max_sequence_length=256, # Reduce memory usage
).images[0]
# Memory cleanup
gc.collect()
return image, f"Generated with seed: {seed}\n✅ Generation completed on CPU"
except torch.cuda.OutOfMemoryError:
return None, "❌ Out of memory. Try reducing image size or steps."
except Exception as e:
print(f"Error generating image: {e}")
return None, f"❌ Error: {str(e)}\nTry reducing parameters or refresh the page."
# Predefined options
HAIR_COLORS = ["blue", "red", "black", "brown", "blonde", "green", "purple", "pink", "white", "gray"]
CLOTHING_OPTIONS = [
"no clothes",
"purple apron",
"gray armor",
"brown armor",
"pink scarf",
"blue clothing",
"red robe",
"green tunic",
"black cloak",
"white dress",
"yellow shirt",
"leather jacket"
]
# Create Gradio interface
def create_interface():
with gr.Blocks(title="Pixel Art Character Generator", theme=gr.themes.Soft()) as app:
gr.HTML("""
<div style="text-align: center; margin-bottom: 20px;">
<h1>🎮 Pixel Art Character Generator</h1>
<p>Generate pixel art characters using a fine-tuned FLUX model</p>
<div style="background: #fff3cd; border: 1px solid #ffeaa7; padding: 10px; border-radius: 5px; margin: 10px 0;">
<strong>⚠️ CPU Mode:</strong> Generation takes 5-15 minutes. Please be patient!
</div>
</div>
""")
with gr.Row():
with gr.Column(scale=1):
gr.HTML("<h3>Character Settings</h3>")
hair_color = gr.Dropdown(
choices=HAIR_COLORS,
value="blue",
label="Hair Color"
)
clothing = gr.Dropdown(
choices=CLOTHING_OPTIONS,
value="purple apron",
label="Clothing"
)
custom_prompt = gr.Textbox(
label="Custom Prompt (Optional)",
placeholder="Leave empty to use hair color and clothing settings above",
lines=3
)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="blurry, low quality, distorted, ugly, nsfw",
lines=2
)
with gr.Row():
width = gr.Slider(256, 512, 384, step=64, label="Width (smaller = faster)")
height = gr.Slider(256, 512, 384, step=64, label="Height (smaller = faster)")
with gr.Row():
guidance_scale = gr.Slider(1, 15, 7.5, step=0.5, label="Guidance Scale")
num_steps = gr.Slider(10, 30, 20, step=2, label="Steps (fewer = faster)")
seed = gr.Number(
label="Seed (-1 for random)",
value=-1,
precision=0
)
gr.HTML("""
<div style="background: #e8f5e8; border: 1px solid #c3e6c3; padding: 8px; border-radius: 4px; margin: 10px 0;">
<small><strong>💡 CPU Tips:</strong> Lower resolution (256x256) and fewer steps (10-15) will generate faster</small>
</div>
""")
generate_btn = gr.Button("🎨 Generate Character", variant="primary", size="lg")
with gr.Column(scale=1):
gr.HTML("<h3>Generated Character</h3>")
output_image = gr.Image(label="Generated Pixel Art", height=400)
output_info = gr.Textbox(label="Generation Info", interactive=False)
# Example gallery
with gr.Row():
gr.HTML("<h3>Example Prompts</h3>")
examples = [
["blue", "purple apron", "", "blurry, low quality, distorted, ugly", 384, 384, 7.5, 20, 42],
["red", "gray armor", "", "blurry, low quality, distorted, ugly", 384, 384, 7.5, 20, 123],
["black", "pink scarf", "", "blurry, low quality, distorted, ugly", 256, 256, 7.0, 15, 456],
["", "", "pixel art character, mage with purple robes, simple style", "blurry, low quality", 256, 256, 8.0, 15, 789]
]
gr.Examples(
examples=examples,
inputs=[hair_color, clothing, custom_prompt, negative_prompt, width, height, guidance_scale, num_steps, seed],
outputs=[output_image, output_info],
fn=generate_pixel_art,
cache_examples=False
)
# Event handlers
generate_btn.click(
fn=generate_pixel_art,
inputs=[hair_color, clothing, custom_prompt, negative_prompt, width, height, guidance_scale, num_steps, seed],
outputs=[output_image, output_info]
)
return app
if __name__ == "__main__":
app = create_interface()
app.queue(max_size=5, default_concurrency_limit=1) # Limit concurrent users for CPU
app.launch(show_error=True)