Spaces:
Running
on
Zero
Running
on
Zero
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
| 4 |
+
from diffusers import DDIMScheduler
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import cv2
|
| 7 |
+
import numpy as np
|
| 8 |
+
from controlnet_aux import CannyDetector
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
class SketchToRealisticFace:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
"""Initialize the sketch-to-realistic face pipeline"""
|
| 14 |
+
|
| 15 |
+
# Load ControlNet model for Canny edge detection
|
| 16 |
+
self.controlnet = ControlNetModel.from_pretrained(
|
| 17 |
+
"lllyasviel/sd-controlnet-canny",
|
| 18 |
+
torch_dtype=torch.float16
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
# # Load base SD 1.5 model
|
| 22 |
+
# self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 23 |
+
# "runwayml/stable-diffusion-v1-5",
|
| 24 |
+
# controlnet=self.controlnet,
|
| 25 |
+
# torch_dtype=torch.float16,
|
| 26 |
+
# safety_checker=None,
|
| 27 |
+
# requires_safety_checker=False
|
| 28 |
+
# )
|
| 29 |
+
|
| 30 |
+
# Load Realistic LoRA model
|
| 31 |
+
self.pipe = StableDiffusionControlNetPipeline.from_single_file(
|
| 32 |
+
"realisticVisionV60B1_v51HyperVAE.safetensors",
|
| 33 |
+
controlnet=self.controlnet,
|
| 34 |
+
torch_dtype=torch.float16,
|
| 35 |
+
safety_checker=None,
|
| 36 |
+
requires_safety_checker=False
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Use DDIM scheduler for better quality
|
| 40 |
+
self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
|
| 41 |
+
|
| 42 |
+
# Move to GPU if available
|
| 43 |
+
if torch.cuda.is_available():
|
| 44 |
+
self.pipe = self.pipe.to("cuda")
|
| 45 |
+
|
| 46 |
+
# Initialize Canny detector
|
| 47 |
+
self.canny_detector = CannyDetector()
|
| 48 |
+
|
| 49 |
+
# Enable memory efficient attention
|
| 50 |
+
self.pipe.enable_memory_efficient_attention()
|
| 51 |
+
|
| 52 |
+
# Set default parameters
|
| 53 |
+
self.default_prompt = "RAW photo, portrait, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3"
|
| 54 |
+
self.default_negative_prompt = "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck"
|
| 55 |
+
|
| 56 |
+
def preprocess_sketch(self, sketch_image, low_threshold=100, high_threshold=200):
|
| 57 |
+
"""Preprocess sketch image to create Canny edge map"""
|
| 58 |
+
image_array = np.array(sketch_image)
|
| 59 |
+
|
| 60 |
+
if len(image_array.shape) == 3:
|
| 61 |
+
image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)
|
| 62 |
+
|
| 63 |
+
canny = cv2.Canny(image_array, low_threshold, high_threshold)
|
| 64 |
+
control_image = Image.fromarray(canny).convert("RGB")
|
| 65 |
+
|
| 66 |
+
return control_image
|
| 67 |
+
|
| 68 |
+
def generate(self, sketch_image, custom_prompt=None, width=512, height=512, seed=None):
|
| 69 |
+
"""Generate realistic face from sketch"""
|
| 70 |
+
|
| 71 |
+
if sketch_image is None:
|
| 72 |
+
return None, None
|
| 73 |
+
|
| 74 |
+
# Use custom prompt if provided, otherwise use default
|
| 75 |
+
prompt = custom_prompt if custom_prompt and custom_prompt.strip() else self.default_prompt
|
| 76 |
+
|
| 77 |
+
# Resize sketch to target dimensions
|
| 78 |
+
sketch_image = sketch_image.resize((width, height))
|
| 79 |
+
|
| 80 |
+
# Preprocess sketch to create control image
|
| 81 |
+
control_image = self.preprocess_sketch(sketch_image)
|
| 82 |
+
|
| 83 |
+
# Set seed for reproducibility
|
| 84 |
+
generator = torch.Generator(device=self.pipe.device).manual_seed(seed) if seed else None
|
| 85 |
+
|
| 86 |
+
# Generate image
|
| 87 |
+
with torch.autocast("cuda" if torch.cuda.is_available() else "cpu"):
|
| 88 |
+
result = self.pipe(prompt=prompt, image=control_image, num_inference_steps=20, guidance_scale=7.0,
|
| 89 |
+
controlnet_conditioning_scale=1.0, generator=generator, width=width, height=height,
|
| 90 |
+
negative_prompt=self.default_negative_prompt)
|
| 91 |
+
|
| 92 |
+
return result.images[0], control_image
|
| 93 |
+
|
| 94 |
+
# Initialize the generator globally
|
| 95 |
+
print("Loading model... This may take a few minutes.")
|
| 96 |
+
generator = SketchToRealisticFace()
|
| 97 |
+
print("Model loaded successfully!")
|
| 98 |
+
|
| 99 |
+
def generate_face(sketch_image, custom_prompt, seed):
|
| 100 |
+
"""Wrapper function for Gradio interface"""
|
| 101 |
+
try:
|
| 102 |
+
# Convert seed to int if provided
|
| 103 |
+
seed_int = int(seed) if seed else None
|
| 104 |
+
|
| 105 |
+
# Generate the realistic face
|
| 106 |
+
realistic_face, control_image = generator.generate(
|
| 107 |
+
sketch_image=sketch_image,
|
| 108 |
+
custom_prompt=custom_prompt,
|
| 109 |
+
seed=seed_int
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
return realistic_face, control_image
|
| 113 |
+
|
| 114 |
+
except Exception as e:
|
| 115 |
+
print(f"Error: {str(e)}")
|
| 116 |
+
return None, None
|
| 117 |
+
|
| 118 |
+
# Create Gradio interface
|
| 119 |
+
with gr.Blocks(title="Sketch to Realistic Face Generator", theme=gr.themes.Soft()) as app:
|
| 120 |
+
gr.Markdown(
|
| 121 |
+
"""
|
| 122 |
+
# π¨ Sketch to Realistic Face Generator
|
| 123 |
+
|
| 124 |
+
Transform your sketches into realistic faces using Stable Diffusion with ControlNet!
|
| 125 |
+
|
| 126 |
+
**Instructions:**
|
| 127 |
+
1. Upload a sketch or drawing of a face
|
| 128 |
+
2. Optionally customize the prompt
|
| 129 |
+
3. Set a seed for reproducible results (optional)
|
| 130 |
+
4. Click "Generate Realistic Face"
|
| 131 |
+
"""
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
with gr.Row():
|
| 135 |
+
with gr.Column():
|
| 136 |
+
# Input components
|
| 137 |
+
sketch_input = gr.Image(
|
| 138 |
+
label="Upload Sketch",
|
| 139 |
+
type="pil",
|
| 140 |
+
height=400
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
custom_prompt = gr.Textbox(
|
| 144 |
+
label="Custom Prompt (optional)",
|
| 145 |
+
placeholder="Leave empty to use default prompt, or customize: 'portrait of a young person, professional headshot, studio lighting...'",
|
| 146 |
+
lines=3
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
seed_input = gr.Number(
|
| 150 |
+
label="Seed (optional)",
|
| 151 |
+
placeholder="Enter a number for reproducible results",
|
| 152 |
+
precision=0
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
generate_btn = gr.Button(
|
| 156 |
+
"π Generate Realistic Face",
|
| 157 |
+
variant="primary",
|
| 158 |
+
size="lg"
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
with gr.Column():
|
| 162 |
+
# Output components
|
| 163 |
+
with gr.Row():
|
| 164 |
+
realistic_output = gr.Image(
|
| 165 |
+
label="Generated Realistic Face",
|
| 166 |
+
height=400
|
| 167 |
+
)
|
| 168 |
+
control_output = gr.Image(
|
| 169 |
+
label="Control Image (Canny Edges)",
|
| 170 |
+
height=400
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# Add examples
|
| 174 |
+
gr.Markdown("## π Default Prompt")
|
| 175 |
+
gr.Markdown(f"```{generator.default_prompt}```")
|
| 176 |
+
|
| 177 |
+
gr.Markdown(
|
| 178 |
+
"""
|
| 179 |
+
## π‘ Tips:
|
| 180 |
+
- Upload clear sketches with well-defined facial features
|
| 181 |
+
- The model works best with front-facing portraits
|
| 182 |
+
- Use the same seed number to get consistent results
|
| 183 |
+
- Customize the prompt to specify style, lighting, or other details
|
| 184 |
+
- The control image shows how your sketch is interpreted as edges
|
| 185 |
+
"""
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# Connect the function to the interface
|
| 189 |
+
generate_btn.click(fn=generate_face, inputs=[sketch_input, custom_prompt, seed_input], outputs=[realistic_output, control_output])
|
| 190 |
+
|
| 191 |
+
# Launch the app
|
| 192 |
+
if __name__ == "__main__":
|
| 193 |
+
app.launch(share=True)
|