Spaces:
Running
Running
Your Name
Implement remote image processing via local server, enhancing the editing function with server connection checks and performance settings. Update UI to include local server setup instructions and modify the main editing function to handle server requests.
9cf1d05 | import os | |
| import gradio as gr | |
| import torch | |
| from PIL import Image | |
| import numpy as np | |
| from models.ledits_model import LEDITSModel | |
| from utils.image_processing import preprocess_image, postprocess_image | |
| from utils.feature_detection import detect_features, create_mask | |
| # Initialize models with GPU support | |
| def initialize_models(device=None): | |
| if device is None: | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"Using device: {device}") | |
| if device == "cuda": | |
| print(f"GPU: {torch.cuda.get_device_name(0)}") | |
| print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB") | |
| ledits_model = LEDITSModel(device=device) | |
| return ledits_model | |
| # Main editing function | |
| def edit_image(image, feature_type, modification_type, intensity, | |
| num_inference_steps, guidance_scale, resolution, | |
| custom_prompt="", use_custom_prompt=False): | |
| if image is None: | |
| return None, "Please upload an image first." | |
| try: | |
| # Convert to numpy array if needed | |
| if isinstance(image, Image.Image): | |
| image_np = np.array(image) | |
| else: | |
| image_np = image | |
| # Resize image based on resolution setting | |
| if resolution != "Original": | |
| max_dim = int(resolution.split("x")[0]) | |
| height, width = image_np.shape[:2] | |
| if height > width: | |
| new_height = min(max_dim, height) | |
| new_width = int(width * (new_height / height)) | |
| else: | |
| new_width = min(max_dim, width) | |
| new_height = int(height * (new_width / width)) | |
| image_np = Image.fromarray(image_np).resize((new_width, new_height), Image.LANCZOS) | |
| image_np = np.array(image_np) | |
| # Preprocess image | |
| processed_image = preprocess_image(image_np) | |
| # Detect features and create mask | |
| features = detect_features(processed_image) | |
| mask = create_mask(processed_image, feature_type, features) | |
| # Get model | |
| ledits_model = initialize_models() | |
| # Prepare prompt | |
| if use_custom_prompt and custom_prompt: | |
| prompt = custom_prompt | |
| else: | |
| prompt = f"{feature_type} {modification_type}" | |
| # Apply edit with custom parameters | |
| edited_image = ledits_model.edit_image( | |
| processed_image, | |
| mask, | |
| prompt, | |
| intensity=intensity, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=num_inference_steps | |
| ) | |
| # Postprocess | |
| final_image = postprocess_image(edited_image, processed_image, mask) | |
| return final_image, "Edit completed successfully." | |
| except Exception as e: | |
| import traceback | |
| traceback.print_exc() | |
| return image, f"Error during editing: {str(e)}" | |
| # Create the server interface | |
| def create_server(): | |
| with gr.Blocks(title="AI-Powered Facial & Body Feature Editor - Local Server") as server: | |
| gr.Markdown("# AI-Powered Facial & Body Feature Editor - Local GPU Server") | |
| gr.Markdown("This is the local GPU server component. Keep this running to process edits from the Hugging Face Space.") | |
| # Input components | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_image = gr.Image(label="Input Image", type="pil") | |
| feature_type = gr.Dropdown( | |
| choices=["Eyes", "Nose", "Lips", "Face Shape", "Hair", "Body"], | |
| label="Feature Type", | |
| value="Eyes" | |
| ) | |
| modification_type = gr.Dropdown( | |
| choices=["Larger", "Smaller", "Change Color", "Change Shape"], | |
| label="Modification Type", | |
| value="Larger" | |
| ) | |
| intensity = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.5, | |
| step=0.1, | |
| label="Intensity" | |
| ) | |
| custom_prompt = gr.Textbox( | |
| label="Custom Prompt", | |
| placeholder="e.g., blue eyes with long eyelashes" | |
| ) | |
| use_custom_prompt = gr.Checkbox( | |
| label="Use Custom Prompt", | |
| value=False | |
| ) | |
| # Performance settings | |
| with gr.Group(): | |
| gr.Markdown("### Performance Settings") | |
| num_inference_steps = gr.Slider( | |
| minimum=5, | |
| maximum=50, | |
| value=20, | |
| step=1, | |
| label="Inference Steps (lower = faster, higher = better quality)" | |
| ) | |
| guidance_scale = gr.Slider( | |
| minimum=1.0, | |
| maximum=15.0, | |
| value=7.5, | |
| step=0.5, | |
| label="Guidance Scale (lower = more creative, higher = more accurate)" | |
| ) | |
| resolution = gr.Dropdown( | |
| choices=["Original", "512x512", "768x768", "1024x1024"], | |
| label="Processing Resolution", | |
| value="512x512" | |
| ) | |
| with gr.Column(): | |
| output_image = gr.Image(label="Output Image", type="pil") | |
| status_text = gr.Textbox(label="Status", interactive=False) | |
| # GPU info display | |
| gpu_info = gr.Textbox(label="GPU Information", interactive=False) | |
| def get_gpu_info(): | |
| if torch.cuda.is_available(): | |
| return f"GPU: {torch.cuda.get_device_name(0)}\nVRAM: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB\nCUDA Version: {torch.version.cuda}" | |
| else: | |
| return "No GPU detected. Using CPU mode." | |
| # Display GPU info on load | |
| gpu_info.value = get_gpu_info() | |
| # Create the API endpoint | |
| edit_image_api = gr.Interface( | |
| fn=edit_image, | |
| inputs=[ | |
| input_image, | |
| feature_type, | |
| modification_type, | |
| intensity, | |
| num_inference_steps, | |
| guidance_scale, | |
| resolution, | |
| custom_prompt, | |
| use_custom_prompt | |
| ], | |
| outputs=[output_image, status_text], | |
| title="AI-Powered Facial & Body Feature Editor API", | |
| description="Local GPU server API endpoint", | |
| allow_flagging="never" | |
| ) | |
| return server | |
| # Launch the server | |
| if __name__ == "__main__": | |
| # Check for GPU | |
| if torch.cuda.is_available(): | |
| print(f"GPU detected: {torch.cuda.get_device_name(0)}") | |
| print(f"VRAM: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB") | |
| else: | |
| print("No GPU detected. Using CPU mode.") | |
| # Create and launch server | |
| server = create_server() | |
| server.queue() | |
| server.launch(server_name="0.0.0.0", share=True) | |