Spaces:
Sleeping
Sleeping
File size: 4,134 Bytes
678240b 0c4677e 678240b da348cc 678240b 0c4677e 5435d62 0c4677e 137fb51 5435d62 0c4677e 678240b 0c4677e 678240b 0c4677e f95fb91 0c4677e 137fb51 0c4677e 678240b e6379ac 678240b 0c4677e 920f263 e6379ac 678240b 3af7399 da348cc 0c4677e e6379ac 0c4677e 137fb51 0c4677e 5435d62 0c4677e f95fb91 0c4677e 137fb51 0c4677e 137fb51 0c4677e 137fb51 0c4677e 137fb51 5435d62 0c4677e 920f263 3af7399 da348cc 0c4677e 678240b f95fb91 0c4677e 137fb51 0c4677e 1c687f2 0c4677e f95fb91 0c4677e f95fb91 0c4677e f95fb91 0c4677e f95fb91 0c4677e f95fb91 0c4677e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | import gradio as gr
import torch
from diffusers import StableDiffusionPipeline
from PIL import Image, ImageDraw, ImageFilter, ImageOps
import numpy as np
import spaces
# Use regular SD pipeline - more reliable than inpainting
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16,
safety_checker=None,
requires_safety_checker=False
)
pipe.enable_attention_slicing()
# We'll simulate inpainting using img2img + compositing
PROMPTS = {
"Sari": "woman wearing beautiful red and gold traditional indian sari, professional fashion photo",
"Kimono": "person wearing elegant traditional japanese kimono, professional portrait",
"Dashiki": "person wearing vibrant african dashiki with patterns, professional photo",
"Qipao": "woman wearing traditional chinese qipao dress, elegant, professional"
}
def create_clothing_area_mask(image):
"""Create mask for clothing area only"""
w, h = image.size
# Create gradient mask - stronger in center/torso
mask = Image.new('L', (w, h), 0)
draw = ImageDraw.Draw(mask)
# Torso area gets full opacity
torso_area = [w*0.25, h*0.35, w*0.75, h*0.75]
draw.ellipse(torso_area, fill=255)
# Fade out at edges
mask = mask.filter(ImageFilter.GaussianBlur(radius=40))
return mask
@spaces.GPU(duration=60)
def generate_clothing(image, clothing_type):
if image is None:
return None, "Please upload an image"
try:
pipe.to("cuda")
# Convert image
if isinstance(image, np.ndarray):
image = Image.fromarray(image).convert("RGB")
original = image.copy()
# Resize for processing
if max(image.size) > 512:
image.thumbnail((512, 512), Image.Resampling.LANCZOS)
original.thumbnail((512, 512), Image.Resampling.LANCZOS)
# Fix dimensions
w, h = image.size
w = w - (w % 8)
h = h - (h % 8)
image = image.resize((w, h), Image.Resampling.LANCZOS)
original = original.resize((w, h), Image.Resampling.LANCZOS)
# Generate new image with clothing
prompt = PROMPTS[clothing_type] + ", high quality, professional photography"
negative = "ugly, deformed, bad anatomy, bad hands"
# Use image-to-image generation
with torch.autocast("cuda"):
generated = pipe(
prompt=prompt,
negative_prompt=negative,
image=image,
strength=0.7, # Moderate transformation
num_inference_steps=30,
guidance_scale=7.5
).images[0]
# Create smooth blend using mask
mask = create_clothing_area_mask(original)
# Composite: use generated for clothing area, original for face/hands
final = Image.composite(generated, original, mask)
pipe.to("cpu")
torch.cuda.empty_cache()
return final, f"✅ {clothing_type} added successfully!"
except Exception as e:
return None, f"Error: {str(e)}"
# UI
with gr.Blocks(title="Traditional Clothing AI") as app:
gr.Markdown("""
# 👘 Traditional Clothing AI - Alternative Method
This version uses regular SD model + smart blending (avoids inpainting issues).
""")
with gr.Row():
with gr.Column():
input_img = gr.Image(type="pil", label="Upload Photo")
clothing = gr.Dropdown(list(PROMPTS.keys()), value="Sari", label="Clothing")
btn = gr.Button("Generate", variant="primary")
with gr.Column():
output = gr.Image(label="Result")
status = gr.Textbox(label="Status")
gr.Markdown("""
### Why this works better:
- Uses standard SD model (always downloads correctly)
- Smart blending preserves face/hands
- No special inpainting model needed
""")
btn.click(generate_clothing, [input_img, clothing], [output, status])
app.launch() |