SaltedAI / app.py
Revrse's picture
Update app.py
ef09491 verified
import gradio as gr
import numpy as np
from PIL import Image, ImageDraw
import requests
import io
import os
import spaces
import json
import re
import torch
from diffusers import FluxKontextPipeline
# Initialize FLUX model for advanced inpainting
@spaces.GPU
def load_flux_model():
"""Load FLUX.1 Kontext model for high-quality object removal"""
try:
pipe = FluxKontextPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Kontext-dev",
torch_dtype=torch.bfloat16
).to("cuda")
return pipe
except Exception as e:
print(f"Failed to load FLUX model: {e}")
return None
# Global variable to store the model (loaded once)
flux_pipe = None
# Initialize object detection using proven working models
def fuzzy_match_object(user_input, detected_labels):
"""
Advanced matching function that handles synonyms, plurals, and fuzzy matching
"""
user_input = user_input.lower().strip()
matches = []
# Direct matching
for detection in detected_labels:
label = detection.get('label', '').lower()
# Exact match
if label == user_input:
matches.append(detection)
continue
# Handle plurals
if user_input.endswith('s') and label == user_input[:-1]:
matches.append(detection)
continue
if label.endswith('s') and user_input == label[:-1]:
matches.append(detection)
continue
# Substring matching
if user_input in label or label in user_input:
matches.append(detection)
continue
# Handle common synonyms
synonyms = {
'person': ['human', 'people', 'man', 'woman', 'individual'],
'car': ['vehicle', 'automobile', 'auto'],
'bike': ['bicycle', 'cycle'],
'phone': ['mobile', 'cellphone', 'smartphone'],
'tv': ['television', 'telly'],
'couch': ['sofa', 'settee'],
'bag': ['purse', 'handbag', 'backpack'],
'glasses': ['spectacles', 'eyeglasses'],
'plane': ['airplane', 'aircraft'],
'boat': ['ship', 'vessel'],
'dog': ['puppy', 'canine'],
'cat': ['kitten', 'feline']
}
# Check if user input matches any synonym
for main_word, synonym_list in synonyms.items():
if (user_input == main_word and label in synonym_list) or \
(user_input in synonym_list and label == main_word):
matches.append(detection)
break
return matches
@spaces.GPU
def flux_inpainting(image, object_name, guidance_scale=2.5, steps=28):
"""
Use FLUX.1 Kontext for intelligent object removal
"""
global flux_pipe
try:
# Load FLUX model if not already loaded
if flux_pipe is None:
print("Loading FLUX.1 Kontext model...")
flux_pipe = load_flux_model()
if flux_pipe is None:
raise Exception("Failed to load FLUX model")
# Create intelligent removal prompt
removal_prompt = f"Remove the {object_name} from this image, fill with background that matches the surrounding environment, photorealistic, seamless, high quality"
# Use FLUX for contextual editing
result = flux_pipe(
image=image.convert("RGB"),
prompt=removal_prompt,
guidance_scale=guidance_scale,
width=image.size[0],
height=image.size[1],
num_inference_steps=steps,
generator=torch.Generator().manual_seed(42),
).images[0]
return result, True
except Exception as e:
print(f"FLUX inpainting error: {str(e)}")
return None, False
@spaces.GPU
def remove_objects(image, object_name, guidance_scale, steps):
"""
Main function to remove any specified object using advanced detection + FLUX inpainting
"""
try:
if image is None:
raise gr.Error("Please upload an image")
if not object_name or not object_name.strip():
raise gr.Error("Please enter the name of the object you want to remove")
# Try to get token from multiple sources
token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
if not token:
raise gr.Error("Please provide your Hugging Face token or set HF_TOKEN in Space secrets")
# Step 3: Use FLUX.1 Kontext for intelligent object removal
print("Using FLUX.1 Kontext for advanced object removal...")
result_image, flux_success = flux_inpainting(image, object_name, guidance_scale, steps)
if flux_success and result_image:
status_msg = f"✅ Successfully removed '{object_name}' object(s)\n"
status_msg += f"⚙️ Settings: Guidance={guidance_scale}, Steps={steps}"
return result_image, status_msg
else:
# Fallback: show detection areas
status_msg = f"⚠️ Inpainting failed, but detection was successful\n"
status_msg += f"💡 Try adjusting guidance scale or steps, or check GPU availability"
return result_image, status_msg
except Exception as e:
return image, None, f"❌ Error: {str(e)}"
# Create Gradio interface
with gr.Blocks(
fill_height=True,
title="Professional Object Removal",
theme=gr.themes.Soft()
) as demo:
gr.Markdown("""
# 🚀 Professional Object Removal using Advanced AI
Upload an image and specify **ANY object** you want to remove with professional results!
""")
with gr.Row():
with gr.Column(scale=1):
# Input section
gr.Markdown("## 📤 Input")
input_image = gr.Image(
label="Upload Image",
type="pil",
height=300
)
object_name = gr.Textbox(
label="🎯 Object to Remove",
placeholder="Enter any object name (e.g., person, car, dog, bottle, tree, sign...)",
value="person",
info="Type ANY object name - supports synonyms and variations!"
)
# Add suggestions
with gr.Row():
gr.Examples(
examples=[
["person"], ["car"], ["dog"], ["cat"], ["bottle"],
["chair"], ["tree"], ["sign"], ["bag"], ["phone"]
],
inputs=[object_name],
label="💡 Quick Examples"
)
with gr.Accordion("⚙️ Advanced Settings", open=False):
guidance_scale = gr.Slider(
minimum=1.0,
maximum=10.0,
value=2.5,
step=0.1,
label="🎯 Guidance Scale",
info="Higher = more faithful to prompt, lower = more creative"
)
steps = gr.Slider(
minimum=10,
maximum=50,
value=28,
step=2,
label="🔄 Steps",
info="More steps = higher quality but slower processing"
)
remove_btn = gr.Button("🚀 Remove Objects", variant="primary", size="lg")
with gr.Column(scale=2):
# Output section
gr.Markdown("## 📋 Results")
with gr.Row():
output_image = gr.Image(
label="🖼️ Result",
type="pil",
height=300
)
status_text = gr.Textbox(
label="📊 Status & Detection Info",
interactive=False,
max_lines=5
)
# Event handlers
remove_btn.click(
fn=remove_objects,
inputs=[
input_image,
object_name,
guidance_scale,
steps,
],
outputs=[output_image, status_text]
)
if __name__ == "__main__":
demo.launch()