import gradio as gr import numpy as np from PIL import Image, ImageDraw, ImageFilter import cv2 import os from io import BytesIO import base64 class VirtualTryOnRoom: def __init__(self): self.person_img = None self.cloth_img = None self.result_img = None def preprocess_image(self, image, target_size=(512, 512)): """Preprocess image to consistent size""" if image is None: return None image = Image.fromarray(image) image = image.resize(target_size, Image.Resampling.LANCZOS) return np.array(image) def load_person_image(self, image): """Load and preprocess person image""" if image is None: return None, "Please upload a person image first!" self.person_img = self.preprocess_image(image) return self.person_img, f"Person image loaded successfully! Size: {self.person_img.shape[1]}x{self.person_img.shape[0]}" def load_cloth_image(self, image): """Load and preprocess clothing image""" if image is None: return None, "Please upload a clothing image first!" self.cloth_img = self.preprocess_image(image) return self.cloth_img, f"Clothing image loaded successfully! Size: {self.cloth_img.shape[1]}x{self.cloth_img.shape[0]}" def extract_person_mask(self, image): """Simple person detection and masking using edge detection""" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Create a simple oval mask for person region (assuming person is in center) mask = np.zeros(gray.shape, dtype=np.uint8) center_x, center_y = gray.shape[1] // 2, gray.shape[0] // 2 axes_x, axes_y = gray.shape[1] // 3, gray.shape[0] // 2 cv2.ellipse(mask, (center_x, center_y), (axes_x, axes_y), 0, 0, 360, 255, -1) # Apply Gaussian blur to soften edges mask = cv2.GaussianBlur(mask, (51, 51), 0) mask = mask.astype(np.float32) / 255.0 return mask def simple_virtual_tryon(self): """Simple virtual try-on implementation""" if self.person_img is None or self.cloth_img is None: return None, "Please upload both person and clothing images first!" try: # Create working copies person_copy = self.person_img.copy() cloth_copy = self.cloth_img.copy() # Resize clothing to fit person width cloth_height, cloth_width = cloth_copy.shape[:2] person_height, person_width = person_copy.shape[:2] # Calculate scaling to fit clothing width to person's width scale_width = person_width / cloth_width target_height = int(cloth_height * scale_width * 0.8) # Make it a bit smaller # Resize clothing cloth_resized = cv2.resize(cloth_copy, (person_width, target_height)) # Extract person mask person_mask = self.extract_person_mask(person_copy) # Create result image result = person_copy.copy() # Position clothing on upper body (simple positioning) start_y = person_height // 6 end_y = start_y + target_height start_x = 0 end_x = person_width if end_y <= person_height and end_x <= person_width: # Blend clothing with person for c in range(3): # For each color channel # Get clothing and person regions cloth_region = cloth_resized[:, :, c] person_region = person_copy[start_y:end_y, start_x:end_x, c] mask_region = person_mask[start_y:end_y, start_x:end_x] # Blend using mask blended = person_region.astype(np.float32) * (1 - 0.7 * mask_region) + cloth_region.astype(np.float32) * (0.7 * mask_region) result[start_y:end_y, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8) self.result_img = result return result, "Virtual try-on completed! Use the sliders below for fine-tuning." except Exception as e: return None, f"Error during virtual try-on: {str(e)}" def adjust_opacity(self, opacity): """Adjust the opacity of the clothing overlay""" if self.person_img is None or self.result_img is None: return None, "Please complete the virtual try-on first!" try: # Recreate the blend with new opacity result = self.person_img.copy() cloth_copy = self.cloth_img.copy() # Resize clothing to fit person width cloth_height, cloth_width = cloth_copy.shape[:2] person_height, person_width = self.person_img.shape[:2] scale_width = person_width / cloth_width target_height = int(cloth_height * scale_width * 0.8) cloth_resized = cv2.resize(cloth_copy, (person_width, target_height)) person_mask = self.extract_person_mask(self.person_img) # Position clothing on upper body start_y = person_height // 6 end_y = start_y + target_height start_x = 0 end_x = person_width if end_y <= person_height and end_x <= person_width: for c in range(3): cloth_region = cloth_resized[:, :, c] person_region = self.person_img[start_y:end_y, start_x:end_x, c] mask_region = person_mask[start_y:end_y, start_x:end_x] # Use adjustable opacity blended = person_region.astype(np.float32) * (1 - opacity * mask_region) + cloth_region.astype(np.float32) * (opacity * mask_region) result[start_y:end_y, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8) return result, f"Opacity adjusted to {opacity:.2f}" except Exception as e: return None, f"Error adjusting opacity: {str(e)}" def adjust_position(self, vertical_offset): """Adjust the vertical position of the clothing""" if self.person_img is None or self.cloth_img is None: return None, "Please complete the virtual try-on first!" try: result = self.person_img.copy() cloth_copy = self.cloth_img.copy() # Resize clothing to fit person width cloth_height, cloth_width = cloth_copy.shape[:2] person_height, person_width = self.person_img.shape[:2] scale_width = person_width / cloth_width target_height = int(cloth_height * scale_width * 0.8) cloth_resized = cv2.resize(cloth_copy, (person_width, target_height)) person_mask = self.extract_person_mask(self.person_img) # Adjust vertical position base_y = person_height // 6 start_y = max(0, base_y + int(vertical_offset * 50)) # Scale the offset end_y = start_y + target_height start_x = 0 end_x = person_width # Check if position is valid if start_y >= person_height or end_y <= 0: start_y = max(0, base_y) end_y = min(person_height, start_y + target_height) # Apply clothing with adjusted position for c in range(3): if end_y > 0 and start_y < person_height: cloth_y_start = max(0, -start_y) cloth_y_end = min(target_height, person_height - start_y) person_y_start = max(0, start_y) person_y_end = min(person_height, end_y) if cloth_y_end > cloth_y_start and person_y_end > person_y_start: cloth_region = cloth_resized[cloth_y_start:cloth_y_end, :, c] person_region = self.person_img[person_y_start:person_y_end, start_x:end_x, c] mask_region = person_mask[person_y_start:person_y_end, start_x:end_x] blended = person_region.astype(np.float32) * (1 - 0.7 * mask_region) + cloth_region.astype(np.float32) * (0.7 * mask_region) result[person_y_start:person_y_end, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8) return result, f"Position adjusted (vertical offset: {vertical_offset:.2f})" except Exception as e: return None, f"Error adjusting position: {str(e)}" def reset_app(self): """Reset the application state""" self.person_img = None self.cloth_img = None self.result_img = None return None, None, None, "Application reset! Please upload new images to start." tryon_app = VirtualTryOnRoom() # Create the Gradio interface with gr.Blocks(title="Virtual Cloth Trial Room", theme=gr.themes.Soft()) as demo: # Header gr.HTML("""
""") # Instructions gr.HTML("""Note: This is a simplified demo. For best results, use clear photos with good lighting.
This virtual try-on system uses computer vision and image processing techniques to simulate clothing try-on.
Tip: For better results, use high-quality images with clear contrast between person and background.