File size: 8,454 Bytes
1f9e50b ef09491 1f9e50b f66da45 1f9e50b f66da45 1f9e50b 4c7840d 1f9e50b ef09491 1f9e50b d709b64 1f9e50b f2b3fe5 1f9e50b ef09491 1f9e50b ef09491 1f9e50b ef09491 1f9e50b ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 fc2c059 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 ef09491 dc6d5f6 1f9e50b dc6d5f6 ef09491 1f9e50b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
import gradio as gr
import numpy as np
from PIL import Image, ImageDraw
import requests
import io
import os
import spaces
import json
import re
import torch
from diffusers import FluxKontextPipeline
# Initialize FLUX model for advanced inpainting
@spaces.GPU
def load_flux_model():
"""Load FLUX.1 Kontext model for high-quality object removal"""
try:
pipe = FluxKontextPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Kontext-dev",
torch_dtype=torch.bfloat16
).to("cuda")
return pipe
except Exception as e:
print(f"Failed to load FLUX model: {e}")
return None
# Global variable to store the model (loaded once)
flux_pipe = None
# Initialize object detection using proven working models
def fuzzy_match_object(user_input, detected_labels):
"""
Advanced matching function that handles synonyms, plurals, and fuzzy matching
"""
user_input = user_input.lower().strip()
matches = []
# Direct matching
for detection in detected_labels:
label = detection.get('label', '').lower()
# Exact match
if label == user_input:
matches.append(detection)
continue
# Handle plurals
if user_input.endswith('s') and label == user_input[:-1]:
matches.append(detection)
continue
if label.endswith('s') and user_input == label[:-1]:
matches.append(detection)
continue
# Substring matching
if user_input in label or label in user_input:
matches.append(detection)
continue
# Handle common synonyms
synonyms = {
'person': ['human', 'people', 'man', 'woman', 'individual'],
'car': ['vehicle', 'automobile', 'auto'],
'bike': ['bicycle', 'cycle'],
'phone': ['mobile', 'cellphone', 'smartphone'],
'tv': ['television', 'telly'],
'couch': ['sofa', 'settee'],
'bag': ['purse', 'handbag', 'backpack'],
'glasses': ['spectacles', 'eyeglasses'],
'plane': ['airplane', 'aircraft'],
'boat': ['ship', 'vessel'],
'dog': ['puppy', 'canine'],
'cat': ['kitten', 'feline']
}
# Check if user input matches any synonym
for main_word, synonym_list in synonyms.items():
if (user_input == main_word and label in synonym_list) or \
(user_input in synonym_list and label == main_word):
matches.append(detection)
break
return matches
@spaces.GPU
def flux_inpainting(image, object_name, guidance_scale=2.5, steps=28):
"""
Use FLUX.1 Kontext for intelligent object removal
"""
global flux_pipe
try:
# Load FLUX model if not already loaded
if flux_pipe is None:
print("Loading FLUX.1 Kontext model...")
flux_pipe = load_flux_model()
if flux_pipe is None:
raise Exception("Failed to load FLUX model")
# Create intelligent removal prompt
removal_prompt = f"Remove the {object_name} from this image, fill with background that matches the surrounding environment, photorealistic, seamless, high quality"
# Use FLUX for contextual editing
result = flux_pipe(
image=image.convert("RGB"),
prompt=removal_prompt,
guidance_scale=guidance_scale,
width=image.size[0],
height=image.size[1],
num_inference_steps=steps,
generator=torch.Generator().manual_seed(42),
).images[0]
return result, True
except Exception as e:
print(f"FLUX inpainting error: {str(e)}")
return None, False
@spaces.GPU
def remove_objects(image, object_name, guidance_scale, steps):
"""
Main function to remove any specified object using advanced detection + FLUX inpainting
"""
try:
if image is None:
raise gr.Error("Please upload an image")
if not object_name or not object_name.strip():
raise gr.Error("Please enter the name of the object you want to remove")
# Try to get token from multiple sources
token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
if not token:
raise gr.Error("Please provide your Hugging Face token or set HF_TOKEN in Space secrets")
# Step 3: Use FLUX.1 Kontext for intelligent object removal
print("Using FLUX.1 Kontext for advanced object removal...")
result_image, flux_success = flux_inpainting(image, object_name, guidance_scale, steps)
if flux_success and result_image:
status_msg = f"✅ Successfully removed '{object_name}' object(s)\n"
status_msg += f"⚙️ Settings: Guidance={guidance_scale}, Steps={steps}"
return result_image, status_msg
else:
# Fallback: show detection areas
status_msg = f"⚠️ Inpainting failed, but detection was successful\n"
status_msg += f"💡 Try adjusting guidance scale or steps, or check GPU availability"
return result_image, status_msg
except Exception as e:
return image, None, f"❌ Error: {str(e)}"
# Create Gradio interface
with gr.Blocks(
fill_height=True,
title="Professional Object Removal",
theme=gr.themes.Soft()
) as demo:
gr.Markdown("""
# 🚀 Professional Object Removal using Advanced AI
Upload an image and specify **ANY object** you want to remove with professional results!
""")
with gr.Row():
with gr.Column(scale=1):
# Input section
gr.Markdown("## 📤 Input")
input_image = gr.Image(
label="Upload Image",
type="pil",
height=300
)
object_name = gr.Textbox(
label="🎯 Object to Remove",
placeholder="Enter any object name (e.g., person, car, dog, bottle, tree, sign...)",
value="person",
info="Type ANY object name - supports synonyms and variations!"
)
# Add suggestions
with gr.Row():
gr.Examples(
examples=[
["person"], ["car"], ["dog"], ["cat"], ["bottle"],
["chair"], ["tree"], ["sign"], ["bag"], ["phone"]
],
inputs=[object_name],
label="💡 Quick Examples"
)
with gr.Accordion("⚙️ Advanced Settings", open=False):
guidance_scale = gr.Slider(
minimum=1.0,
maximum=10.0,
value=2.5,
step=0.1,
label="🎯 Guidance Scale",
info="Higher = more faithful to prompt, lower = more creative"
)
steps = gr.Slider(
minimum=10,
maximum=50,
value=28,
step=2,
label="🔄 Steps",
info="More steps = higher quality but slower processing"
)
remove_btn = gr.Button("🚀 Remove Objects", variant="primary", size="lg")
with gr.Column(scale=2):
# Output section
gr.Markdown("## 📋 Results")
with gr.Row():
output_image = gr.Image(
label="🖼️ Result",
type="pil",
height=300
)
status_text = gr.Textbox(
label="📊 Status & Detection Info",
interactive=False,
max_lines=5
)
# Event handlers
remove_btn.click(
fn=remove_objects,
inputs=[
input_image,
object_name,
guidance_scale,
steps,
],
outputs=[output_image, status_text]
)
if __name__ == "__main__":
demo.launch() |