File size: 14,243 Bytes
19b11aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
import gradio as gr
import numpy as np
from PIL import Image, ImageDraw, ImageFilter
import cv2
import os
from io import BytesIO
import base64

class VirtualTryOnRoom:
    def __init__(self):
        self.person_img = None
        self.cloth_img = None
        self.result_img = None
    
    def preprocess_image(self, image, target_size=(512, 512)):
        """Preprocess image to consistent size"""
        if image is None:
            return None
        image = Image.fromarray(image)
        image = image.resize(target_size, Image.Resampling.LANCZOS)
        return np.array(image)
    
    def load_person_image(self, image):
        """Load and preprocess person image"""
        if image is None:
            return None, "Please upload a person image first!"
        
        self.person_img = self.preprocess_image(image)
        return self.person_img, f"Person image loaded successfully! Size: {self.person_img.shape[1]}x{self.person_img.shape[0]}"
    
    def load_cloth_image(self, image):
        """Load and preprocess clothing image"""
        if image is None:
            return None, "Please upload a clothing image first!"
        
        self.cloth_img = self.preprocess_image(image)
        return self.cloth_img, f"Clothing image loaded successfully! Size: {self.cloth_img.shape[1]}x{self.cloth_img.shape[0]}"
    
    def extract_person_mask(self, image):
        """Simple person detection and masking using edge detection"""
        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        
        # Create a simple oval mask for person region (assuming person is in center)
        mask = np.zeros(gray.shape, dtype=np.uint8)
        center_x, center_y = gray.shape[1] // 2, gray.shape[0] // 2
        axes_x, axes_y = gray.shape[1] // 3, gray.shape[0] // 2
        
        cv2.ellipse(mask, (center_x, center_y), (axes_x, axes_y), 0, 0, 360, 255, -1)
        
        # Apply Gaussian blur to soften edges
        mask = cv2.GaussianBlur(mask, (51, 51), 0)
        mask = mask.astype(np.float32) / 255.0
        
        return mask
    
    def simple_virtual_tryon(self):
        """Simple virtual try-on implementation"""
        if self.person_img is None or self.cloth_img is None:
            return None, "Please upload both person and clothing images first!"
        
        try:
            # Create working copies
            person_copy = self.person_img.copy()
            cloth_copy = self.cloth_img.copy()
            
            # Resize clothing to fit person width
            cloth_height, cloth_width = cloth_copy.shape[:2]
            person_height, person_width = person_copy.shape[:2]
            
            # Calculate scaling to fit clothing width to person's width
            scale_width = person_width / cloth_width
            target_height = int(cloth_height * scale_width * 0.8)  # Make it a bit smaller
            
            # Resize clothing
            cloth_resized = cv2.resize(cloth_copy, (person_width, target_height))
            
            # Extract person mask
            person_mask = self.extract_person_mask(person_copy)
            
            # Create result image
            result = person_copy.copy()
            
            # Position clothing on upper body (simple positioning)
            start_y = person_height // 6
            end_y = start_y + target_height
            start_x = 0
            end_x = person_width
            
            if end_y <= person_height and end_x <= person_width:
                # Blend clothing with person
                for c in range(3):  # For each color channel
                    # Get clothing and person regions
                    cloth_region = cloth_resized[:, :, c]
                    person_region = person_copy[start_y:end_y, start_x:end_x, c]
                    mask_region = person_mask[start_y:end_y, start_x:end_x]
                    
                    # Blend using mask
                    blended = person_region.astype(np.float32) * (1 - 0.7 * mask_region) + cloth_region.astype(np.float32) * (0.7 * mask_region)
                    result[start_y:end_y, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8)
            
            self.result_img = result
            return result, "Virtual try-on completed! Use the sliders below for fine-tuning."
            
        except Exception as e:
            return None, f"Error during virtual try-on: {str(e)}"
    
    def adjust_opacity(self, opacity):
        """Adjust the opacity of the clothing overlay"""
        if self.person_img is None or self.result_img is None:
            return None, "Please complete the virtual try-on first!"
        
        try:
            # Recreate the blend with new opacity
            result = self.person_img.copy()
            cloth_copy = self.cloth_img.copy()
            
            # Resize clothing to fit person width
            cloth_height, cloth_width = cloth_copy.shape[:2]
            person_height, person_width = self.person_img.shape[:2]
            
            scale_width = person_width / cloth_width
            target_height = int(cloth_height * scale_width * 0.8)
            
            cloth_resized = cv2.resize(cloth_copy, (person_width, target_height))
            person_mask = self.extract_person_mask(self.person_img)
            
            # Position clothing on upper body
            start_y = person_height // 6
            end_y = start_y + target_height
            start_x = 0
            end_x = person_width
            
            if end_y <= person_height and end_x <= person_width:
                for c in range(3):
                    cloth_region = cloth_resized[:, :, c]
                    person_region = self.person_img[start_y:end_y, start_x:end_x, c]
                    mask_region = person_mask[start_y:end_y, start_x:end_x]
                    
                    # Use adjustable opacity
                    blended = person_region.astype(np.float32) * (1 - opacity * mask_region) + cloth_region.astype(np.float32) * (opacity * mask_region)
                    result[start_y:end_y, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8)
            
            return result, f"Opacity adjusted to {opacity:.2f}"
            
        except Exception as e:
            return None, f"Error adjusting opacity: {str(e)}"
    
    def adjust_position(self, vertical_offset):
        """Adjust the vertical position of the clothing"""
        if self.person_img is None or self.cloth_img is None:
            return None, "Please complete the virtual try-on first!"
        
        try:
            result = self.person_img.copy()
            cloth_copy = self.cloth_img.copy()
            
            # Resize clothing to fit person width
            cloth_height, cloth_width = cloth_copy.shape[:2]
            person_height, person_width = self.person_img.shape[:2]
            
            scale_width = person_width / cloth_width
            target_height = int(cloth_height * scale_width * 0.8)
            
            cloth_resized = cv2.resize(cloth_copy, (person_width, target_height))
            person_mask = self.extract_person_mask(self.person_img)
            
            # Adjust vertical position
            base_y = person_height // 6
            start_y = max(0, base_y + int(vertical_offset * 50))  # Scale the offset
            end_y = start_y + target_height
            start_x = 0
            end_x = person_width
            
            # Check if position is valid
            if start_y >= person_height or end_y <= 0:
                start_y = max(0, base_y)
                end_y = min(person_height, start_y + target_height)
            
            # Apply clothing with adjusted position
            for c in range(3):
                if end_y > 0 and start_y < person_height:
                    cloth_y_start = max(0, -start_y)
                    cloth_y_end = min(target_height, person_height - start_y)
                    person_y_start = max(0, start_y)
                    person_y_end = min(person_height, end_y)
                    
                    if cloth_y_end > cloth_y_start and person_y_end > person_y_start:
                        cloth_region = cloth_resized[cloth_y_start:cloth_y_end, :, c]
                        person_region = self.person_img[person_y_start:person_y_end, start_x:end_x, c]
                        mask_region = person_mask[person_y_start:person_y_end, start_x:end_x]
                        
                        blended = person_region.astype(np.float32) * (1 - 0.7 * mask_region) + cloth_region.astype(np.float32) * (0.7 * mask_region)
                        result[person_y_start:person_y_end, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8)
            
            return result, f"Position adjusted (vertical offset: {vertical_offset:.2f})"
            
        except Exception as e:
            return None, f"Error adjusting position: {str(e)}"
    
    def reset_app(self):
        """Reset the application state"""
        self.person_img = None
        self.cloth_img = None
        self.result_img = None
        return None, None, None, "Application reset! Please upload new images to start."

tryon_app = VirtualTryOnRoom()

# Create the Gradio interface
with gr.Blocks(title="Virtual Cloth Trial Room", theme=gr.themes.Soft()) as demo:
    
    # Header
    gr.HTML("""
    <div style="text-align: center; padding: 20px; background: linear-gradient(45deg, #667eea 0%, #764ba2 100%); border-radius: 10px; margin-bottom: 20px;">
        <h1 style="color: white; margin: 0; font-size: 2.5em;">Virtual Cloth Trial Room</h1>
        <p style="color: white; margin: 10px 0; font-size: 1.2em;">Try on clothes virtually with AI-powered technology</p>
        <div style="color: white; margin-top: 15px;">
            <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #ffeb3b; text-decoration: none; font-weight: bold;">Built with anycoder</a>
        </div>
    </div>
    """)
    
    # Instructions
    gr.HTML("""
    <div style="background: #f8f9fa; padding: 20px; border-radius: 10px; margin-bottom: 20px; border-left: 5px solid #667eea;">
        <h3 style="color: #333; margin-top: 0;">How to use:</h3>
        <ol style="color: #555; line-height: 1.6;">
            <li>Upload a photo of yourself (preferably standing upright)</li>
            <li>Upload an image of the clothing you want to try on</li>
            <li>Click "Try On" to see the virtual try-on result</li>
            <li>Use the sliders below to adjust opacity and position</li>
        </ol>
        <p style="color: #777; font-size: 0.9em; margin-bottom: 0;">
            <strong>Note:</strong> This is a simplified demo. For best results, use clear photos with good lighting.
        </p>
    </div>
    """)
    
    with gr.Row():
        with gr.Column(scale=1):
            gr.HTML("<h3 style='text-align: center; color: #333;'>Upload Person Photo</h3>")
            person_input = gr.Image(label="Person Image", type="numpy", height=400)
            person_status = gr.Textbox(label="Status", interactive=False)
        
        with gr.Column(scale=1):
            gr.HTML("<h3 style='text-align: center; color: #333;'>Upload Clothing</h3>")
            cloth_input = gr.Image(label="Clothing Image", type="numpy", height=400)
            cloth_status = gr.Textbox(label="Status", interactive=False)
    
    with gr.Row():
        with gr.Column(scale=1):
            tryon_button = gr.Button("🛍️ Try On Clothing", variant="primary", size="lg")
        
        with gr.Column(scale=1):
            reset_button = gr.Button("🔄 Reset", variant="secondary", size="lg")
    
    with gr.Row():
        with gr.Column():
            gr.HTML("<h3 style='text-align: center; color: #333;'>Result</h3>")
            result_output = gr.Image(label="Virtual Try-On Result", height=500)
            result_status = gr.Textbox(label="Status", interactive=False)
    
    # Adjustment controls
    with gr.Row():
        with gr.Column():
            gr.HTML("<h4 style='text-align: center; color: #333;'>Adjustments</h4>")
            opacity_slider = gr.Slider(
                minimum=0.1, 
                maximum=1.0, 
                value=0.7, 
                step=0.05, 
                label="Clothing Opacity",
                info="Adjust how transparent the clothing appears"
            )
        
        with gr.Column():
            position_slider = gr.Slider(
                minimum=-2.0, 
                maximum=2.0, 
                value=0.0, 
                step=0.1, 
                label="Vertical Position",
                info="Move clothing up or down"
            )
    
    # Image loading handlers
    person_input.change(
        fn=tryon_app.load_person_image,
        inputs=person_input,
        outputs=[person_input, person_status]
    )
    
    cloth_input.change(
        fn=tryon_app.load_cloth_image,
        inputs=cloth_input,
        outputs=[cloth_input, cloth_status]
    )
    
    # Button handlers
    tryon_button.click(
        fn=tryon_app.simple_virtual_tryon,
        outputs=[result_output, result_status]
    )
    
    opacity_slider.change(
        fn=tryon_app.adjust_opacity,
        inputs=opacity_slider,
        outputs=[result_output, result_status]
    )
    
    position_slider.change(
        fn=tryon_app.adjust_position,
        inputs=position_slider,
        outputs=[result_output, result_status]
    )
    
    reset_button.click(
        fn=tryon_app.reset_app,
        outputs=[person_input, cloth_input, result_output, result_status]
    )
    
    # Footer
    gr.HTML("""
    <div style="text-align: center; padding: 20px; background: #f1f3f4; border-radius: 10px; margin-top: 30px;">
        <p style="color: #666; margin: 0;">
            This virtual try-on system uses computer vision and image processing techniques to simulate clothing try-on.
            <br>
            <strong>Tip:</strong> For better results, use high-quality images with clear contrast between person and background.
        </p>
    </div>
    """)

if __name__ == "__main__":
    demo.launch(share=True, debug=True)