feature-editor / app_cloud_gpu.py
Your Name
Add additional entries to .gitignore for Python environments, cache files, distribution artifacts, IDE settings, and local development logs
241b467
import os
import gradio as gr
import requests
from PIL import Image
import numpy as np
import io
import json
import base64
# Global variables
FEATURE_TYPES = ["Eyes", "Nose", "Lips", "Face Shape", "Hair", "Body"]
MODIFICATION_PRESETS = {
"Eyes": ["Larger", "Smaller", "Change Color", "Change Shape"],
"Nose": ["Refine", "Reshape", "Resize"],
"Lips": ["Fuller", "Thinner", "Change Color"],
"Face Shape": ["Slim", "Round", "Define Jawline", "Soften Features"],
"Hair": ["Change Color", "Change Style", "Add Volume"],
"Body": ["Slim", "Athletic", "Curvy", "Muscular"]
}
# Mapping from our UI controls to InstructPix2Pix instructions
INSTRUCTION_MAPPING = {
"Eyes": {
"Larger": "make the eyes larger",
"Smaller": "make the eyes smaller",
"Change Color": "change the eye color to blue",
"Change Shape": "make the eyes more almond shaped"
},
"Nose": {
"Refine": "refine the nose shape",
"Reshape": "make the nose more straight",
"Resize": "make the nose smaller"
},
"Lips": {
"Fuller": "make the lips fuller",
"Thinner": "make the lips thinner",
"Change Color": "make the lips more red"
},
"Face Shape": {
"Slim": "make the face slimmer",
"Round": "make the face more round",
"Define Jawline": "define the jawline more",
"Soften Features": "soften the facial features"
},
"Hair": {
"Change Color": "change the hair color to blonde",
"Change Style": "make the hair wavy",
"Add Volume": "add more volume to the hair"
},
"Body": {
"Slim": "make the body slimmer",
"Athletic": "make the body more athletic",
"Curvy": "make the body more curvy",
"Muscular": "make the body more muscular"
}
}
# Function to process image using InstructPix2Pix
def process_with_instructpix2pix(image, feature_type, modification_type, intensity, custom_prompt="", use_custom_prompt=False):
if image is None:
return None, "Please upload an image first."
try:
# Prepare the instruction
if use_custom_prompt and custom_prompt:
instruction = custom_prompt
else:
instruction = INSTRUCTION_MAPPING[feature_type][modification_type]
# Adjust instruction based on intensity
if intensity < 0.3:
instruction = "slightly " + instruction
elif intensity > 0.7:
instruction = "dramatically " + instruction
# Convert image to base64 for API request
if isinstance(image, np.ndarray):
image_pil = Image.fromarray(image)
else:
image_pil = image
# Resize image if too large (InstructPix2Pix works best with images around 512x512)
width, height = image_pil.size
max_dim = 512
if width > max_dim or height > max_dim:
if width > height:
new_width = max_dim
new_height = int(height * (max_dim / width))
else:
new_height = max_dim
new_width = int(width * (max_dim / height))
image_pil = image_pil.resize((new_width, new_height), Image.LANCZOS)
# Convert to bytes for API request
buffered = io.BytesIO()
image_pil.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
# Create API request to InstructPix2Pix Space
api_url = "https://timbrooks-instruct-pix2pix.hf.space/api/predict"
payload = {
"data": [
f"data:image/png;base64,{img_str}", # Input image
instruction, # Instruction
50, # Steps
7.5, # Text CFG
1.5, # Image CFG
1371, # Seed
False, # Randomize seed
True, # Fix CFG
False # Randomize CFG
]
}
# Send request
response = requests.post(api_url, json=payload)
if response.status_code == 200:
result = response.json()
# Extract the output image
if 'data' in result and len(result['data']) >= 1:
output_data = result['data'][0]
if isinstance(output_data, str) and output_data.startswith('data:image'):
# Handle base64 encoded image
image_data = output_data.split(',')[1]
decoded_image = base64.b64decode(image_data)
output_image = Image.open(io.BytesIO(decoded_image))
return output_image, f"Edit completed successfully using instruction: '{instruction}'"
# If we couldn't parse the image from the response
return image, f"Error: Could not parse response from InstructPix2Pix."
else:
return image, f"Error: InstructPix2Pix returned status code {response.status_code}."
except Exception as e:
import traceback
traceback.print_exc()
return image, f"Error processing with InstructPix2Pix: {str(e)}"
# UI Components
def create_ui():
with gr.Blocks(title="AI-Powered Facial & Body Feature Editor") as app:
gr.Markdown("# AI-Powered Facial & Body Feature Editor")
gr.Markdown("Upload an image and use the controls to edit specific facial and body features using cloud GPU processing.")
with gr.Row():
with gr.Column(scale=1):
# Input controls
input_image = gr.Image(label="Upload Image", type="pil")
with gr.Group():
gr.Markdown("### Feature Selection")
feature_type = gr.Dropdown(
choices=FEATURE_TYPES,
label="Select Feature",
value="Eyes"
)
# Initialize with choices for the default feature (Eyes)
modification_type = gr.Dropdown(
choices=MODIFICATION_PRESETS["Eyes"],
label="Modification Type",
value="Larger"
)
intensity = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.5,
step=0.1,
label="Intensity"
)
with gr.Group():
gr.Markdown("### Custom Prompt (Advanced)")
use_custom_prompt = gr.Checkbox(
label="Use Custom Prompt",
value=False
)
custom_prompt = gr.Textbox(
label="Custom Prompt",
placeholder="e.g., make the eyes blue and add long eyelashes"
)
edit_button = gr.Button("Apply Edit", variant="primary")
reset_button = gr.Button("Reset")
status_text = gr.Textbox(label="Status", interactive=False)
with gr.Column(scale=1):
# Output display
output_image = gr.Image(label="Edited Image", type="pil")
with gr.Accordion("Edit History", open=False):
edit_history = gr.State([])
history_gallery = gr.Gallery(label="Previous Edits")
# Information about cloud processing
with gr.Accordion("Cloud GPU Processing Information", open=True):
gr.Markdown("""
### About Cloud GPU Processing
This application uses InstructPix2Pix, a public GPU-accelerated Space on Hugging Face, to process your images.
**Benefits:**
- GPU-accelerated processing without local setup
- Works on any device with internet access
- No need to install CUDA or PyTorch
**How it works:**
1. Your image is sent to the InstructPix2Pix Space
2. Your feature selections are converted to text instructions
3. The Space processes your image using GPU acceleration
4. The edited image is returned to this interface
**Note:** Processing may take 10-30 seconds depending on server load.
""")
# Event handlers
def update_modification_choices(feature):
return gr.Dropdown(choices=MODIFICATION_PRESETS[feature])
feature_type.change(
fn=update_modification_choices,
inputs=feature_type,
outputs=modification_type
)
edit_button.click(
fn=process_with_instructpix2pix,
inputs=[
input_image,
feature_type,
modification_type,
intensity,
custom_prompt,
use_custom_prompt
],
outputs=[output_image, status_text]
)
def reset_image():
return None, "Image reset."
reset_button.click(
fn=reset_image,
inputs=[],
outputs=[output_image, status_text]
)
# Add ethical usage notice
gr.Markdown("""
## Ethical Usage Notice
This tool is designed for creative and personal use. Please ensure:
- You have appropriate rights to edit the images you upload
- You use this tool responsibly and respect the dignity of individuals
- You understand that AI-generated modifications are artificial and may not represent reality
By using this application, you agree to these terms.
""")
return app
# Launch the app
if __name__ == "__main__":
app = create_ui()
app.launch(server_name="0.0.0.0", share=False)