Harsha1845 commited on
Commit
370c714
·
verified ·
1 Parent(s): 3a63cb8

Upload 16 files

Browse files
.env.local ADDED
@@ -0,0 +1 @@
 
 
1
+ GEMINI_API_KEY=PLACEHOLDER_API_KEY
README.md CHANGED
@@ -1,10 +1,20 @@
1
- ---
2
- title: StyleGenie Tryon
3
- emoji: 👁
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <img width="1200" height="475" alt="GHBanner" src="https://github.com/user-attachments/assets/0aa67016-6eaf-458a-adb2-6e31a0763ed6" />
3
+ </div>
4
+
5
+ # Run and deploy your AI Studio app
6
+
7
+ This contains everything you need to run your app locally.
8
+
9
+ View your app in AI Studio: https://ai.studio/apps/drive/1ShSNx9xZ78b_bOyqzEXVeLKn_m1VDIrC
10
+
11
+ ## Run Locally
12
+
13
+ **Prerequisites:** Node.js
14
+
15
+
16
+ 1. Install dependencies:
17
+ `npm install`
18
+ 2. Set the `GEMINI_API_KEY` in [.env.local](.env.local) to your Gemini API key
19
+ 3. Run the app:
20
+ `npm run dev`
index.html ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>StyleGenie - AI Virtual Try-On</title>
8
+ <script src="https://cdn.tailwindcss.com"></script>
9
+ <script>
10
+ tailwind.config = {
11
+ theme: {
12
+ extend: {
13
+ colors: {
14
+ brand: {
15
+ 50: '#f0f9ff',
16
+ 100: '#e0f2fe',
17
+ 500: '#0ea5e9',
18
+ 600: '#0284c7',
19
+ 900: '#0c4a6e',
20
+ }
21
+ }
22
+ }
23
+ }
24
+ }
25
+ </script>
26
+ <script type="importmap">
27
+ {
28
+ "imports": {
29
+ "react": "https://esm.sh/react@^19.2.3",
30
+ "react-dom/": "https://esm.sh/react-dom@^19.2.3/",
31
+ "react/": "https://esm.sh/react@^19.2.3/",
32
+ "@google/genai": "https://esm.sh/@google/genai@^1.33.0",
33
+ "lucide-react": "https://esm.sh/lucide-react@^0.561.0",
34
+ "@mediapipe/tasks-vision": "https://esm.sh/@mediapipe/tasks-vision@^0.10.22-rc.20250304",
35
+ "vite": "https://esm.sh/vite@^7.3.0",
36
+ "@vitejs/plugin-react": "https://esm.sh/@vitejs/plugin-react@^5.1.2"
37
+ }
38
+ }
39
+ </script>
40
+ <link rel="stylesheet" href="/index.css">
41
+ </head>
42
+ <body class="bg-slate-50 text-slate-900 antialiased">
43
+ <div id="root"></div>
44
+ <script type="module" src="/index.tsx"></script>
45
+ </body>
46
+ </html>
metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "StyleGenie AI Try-On",
3
+ "description": "Virtual wardrobe try-on experience using Generative AI. Select an item, snap a photo, and see yourself wearing it realistically.",
4
+ "requestFramePermissions": [
5
+ "camera"
6
+ ]
7
+ }
package.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "stylegenie-app",
3
+ "private": true,
4
+ "version": "0.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "vite build",
9
+ "preview": "vite preview"
10
+ },
11
+ "dependencies": {
12
+ "react": "^18.3.1",
13
+ "react-dom": "^18.3.1",
14
+ "@google/genai": "^0.1.2",
15
+ "lucide-react": "^0.344.0",
16
+ "@mediapipe/tasks-vision": "^0.10.17"
17
+ },
18
+ "devDependencies": {
19
+ "@types/react": "^18.2.66",
20
+ "@types/react-dom": "^18.2.22",
21
+ "@vitejs/plugin-react": "^4.2.1",
22
+ "typescript": "^5.2.2",
23
+ "vite": "^5.2.0"
24
+ }
25
+ }
src/App.tsx ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState } from 'react';
2
+ import { Product, AppView } from './types';
3
+ import { PRODUCTS } from './constants';
4
+ import { generateTryOn } from './services/geminiService';
5
+ import Navigation from './components/Navigation';
6
+ import ProductCard from './components/ProductCard';
7
+ import CameraView from './components/CameraView';
8
+ import ResultView from './components/ResultView';
9
+
10
+ function App() {
11
+ const [currentView, setCurrentView] = useState<AppView>(AppView.CATALOG);
12
+ const [selectedProduct, setSelectedProduct] = useState<Product | null>(null);
13
+ const [userImage, setUserImage] = useState<string | null>(null);
14
+ const [generatedResult, setGeneratedResult] = useState<string | null>(null);
15
+ const [isGenerating, setIsGenerating] = useState(false);
16
+ const [generationError, setGenerationError] = useState<string | null>(null);
17
+
18
+ const handleSelectProduct = (product: Product) => {
19
+ setSelectedProduct(product);
20
+ setCurrentView(AppView.CAMERA);
21
+ };
22
+
23
+ const handleBackToCatalog = () => {
24
+ setCurrentView(AppView.CATALOG);
25
+ setSelectedProduct(null);
26
+ setUserImage(null);
27
+ setGeneratedResult(null);
28
+ setGenerationError(null);
29
+ };
30
+
31
+ const handleCapture = async (imageSrc: string) => {
32
+ setUserImage(imageSrc);
33
+ setCurrentView(AppView.RESULT);
34
+
35
+ // Automatically trigger generation
36
+ if (selectedProduct) {
37
+ await processTryOn(imageSrc, selectedProduct);
38
+ }
39
+ };
40
+
41
+ const processTryOn = async (imageSrc: string, product: Product) => {
42
+ setIsGenerating(true);
43
+ setGenerationError(null);
44
+ try {
45
+ const resultImage = await generateTryOn(imageSrc, product);
46
+ setGeneratedResult(resultImage);
47
+ } catch (error: any) {
48
+ console.error("Try-on failed", error);
49
+ setGenerationError("Sorry, we couldn't generate the try-on image. Please try a different pose or lighting.");
50
+ } finally {
51
+ setIsGenerating(false);
52
+ }
53
+ };
54
+
55
+ const handleRetake = () => {
56
+ setCurrentView(AppView.CAMERA);
57
+ setGeneratedResult(null);
58
+ setGenerationError(null);
59
+ };
60
+
61
+ return (
62
+ <div className="min-h-screen bg-slate-50 font-sans text-slate-900">
63
+
64
+ {currentView === AppView.CATALOG && (
65
+ <>
66
+ <Navigation showBack={false} />
67
+ <main className="max-w-7xl mx-auto p-4 md:p-6 lg:p-8">
68
+ <div className="mb-8 text-center max-w-2xl mx-auto">
69
+ <h1 className="text-3xl md:text-4xl font-bold text-gray-900 mb-4">Virtual Wardrobe</h1>
70
+ <p className="text-gray-500 text-lg">Select any item to virtually try it on instantly using our advanced AI technology.</p>
71
+ </div>
72
+
73
+ <div className="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 md:gap-6">
74
+ {PRODUCTS.map(product => (
75
+ <ProductCard
76
+ key={product.id}
77
+ product={product}
78
+ onTryOn={handleSelectProduct}
79
+ />
80
+ ))}
81
+ </div>
82
+ </main>
83
+ </>
84
+ )}
85
+
86
+ {currentView === AppView.CAMERA && selectedProduct && (
87
+ <CameraView
88
+ product={selectedProduct}
89
+ onCapture={handleCapture}
90
+ onClose={handleBackToCatalog}
91
+ />
92
+ )}
93
+
94
+ {currentView === AppView.RESULT && selectedProduct && userImage && (
95
+ <>
96
+ <Navigation showBack={true} onBack={handleBackToCatalog} />
97
+ <ResultView
98
+ product={selectedProduct}
99
+ originalImage={userImage}
100
+ generatedImage={generatedResult}
101
+ loading={isGenerating}
102
+ error={generationError}
103
+ onRetake={handleRetake}
104
+ />
105
+ </>
106
+ )}
107
+ </div>
108
+ );
109
+ }
110
+
111
+ export default App;
src/components/CameraView.tsx ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useRef, useEffect, useState, useCallback } from 'react';
2
+ import { X, RefreshCw, Image as ImageIcon, AlertCircle, Layers, ScanFace, BoxSelect } from 'lucide-react';
3
+ import { Product, ProductCategory } from '../types';
4
+ import { FilesetResolver, PoseLandmarker } from '@mediapipe/tasks-vision';
5
+
6
+ interface CameraViewProps {
7
+ product: Product;
8
+ onCapture: (imageSrc: string) => void;
9
+ onClose: () => void;
10
+ }
11
+
12
+ const CameraView: React.FC<CameraViewProps> = ({ product, onCapture, onClose }) => {
13
+ const videoRef = useRef<HTMLVideoElement>(null);
14
+ const canvasRef = useRef<HTMLCanvasElement>(null);
15
+ const lastVideoTimeRef = useRef<number>(-1);
16
+
17
+ // State
18
+ const [stream, setStream] = useState<MediaStream | null>(null);
19
+ const [error, setError] = useState<string | null>(null);
20
+ const [countdown, setCountdown] = useState<number | null>(null);
21
+ const [isCameraReady, setIsCameraReady] = useState(false);
22
+ const [isTracking, setIsTracking] = useState(false);
23
+
24
+ // AR Transform State (Calculated in real-time)
25
+ const [arStyle, setArStyle] = useState<React.CSSProperties>({
26
+ opacity: 0, // Hide until tracked
27
+ transform: 'translate(-50%, -50%) scale(1) rotate(0deg)',
28
+ top: '50%',
29
+ left: '50%',
30
+ width: '50%',
31
+ });
32
+
33
+ // Manual override state (if tracking fails or user wants to adjust)
34
+ const [manualMode, setManualMode] = useState(false);
35
+ const [manualScale, setManualScale] = useState(1);
36
+ const [manualY, setManualY] = useState(0);
37
+
38
+ const landmarkerRef = useRef<any>(null);
39
+ const requestRef = useRef<number | null>(null);
40
+
41
+ // 1. Initialize Camera
42
+ useEffect(() => {
43
+ let mediaStream: MediaStream | null = null;
44
+
45
+ const startCamera = async () => {
46
+ try {
47
+ if (!navigator.mediaDevices?.getUserMedia) throw new Error("No camera access");
48
+
49
+ mediaStream = await navigator.mediaDevices.getUserMedia({
50
+ video: {
51
+ width: { ideal: 1280 },
52
+ height: { ideal: 720 },
53
+ facingMode: 'user'
54
+ },
55
+ audio: false,
56
+ });
57
+ setStream(mediaStream);
58
+ } catch (err) {
59
+ console.error("Camera Error:", err);
60
+ setError("Could not access camera. Please allow permissions.");
61
+ }
62
+ };
63
+
64
+ startCamera();
65
+
66
+ return () => {
67
+ if (mediaStream) mediaStream.getTracks().forEach(t => t.stop());
68
+ if (requestRef.current) cancelAnimationFrame(requestRef.current);
69
+ if (landmarkerRef.current) landmarkerRef.current.close();
70
+ };
71
+ }, []);
72
+
73
+ // 2. Initialize MediaPipe Pose Landmarker
74
+ useEffect(() => {
75
+ const loadLandmarker = async () => {
76
+ try {
77
+ const vision = await FilesetResolver.forVisionTasks(
78
+ "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.17/wasm"
79
+ );
80
+
81
+ landmarkerRef.current = await PoseLandmarker.createFromOptions(vision, {
82
+ baseOptions: {
83
+ modelAssetPath: `https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task`,
84
+ delegate: "GPU"
85
+ },
86
+ runningMode: "VIDEO",
87
+ numPoses: 1
88
+ });
89
+ console.log("AR Engine Loaded");
90
+ } catch (err) {
91
+ console.error("Failed to load AR engine:", err);
92
+ // Fallback to manual mode if AR fails
93
+ setManualMode(true);
94
+ }
95
+ };
96
+
97
+ loadLandmarker();
98
+ }, []);
99
+
100
+ // 3. Real-time Tracking Loop
101
+ const predictWebcam = useCallback(() => {
102
+ if (!landmarkerRef.current || !videoRef.current || !isCameraReady) {
103
+ requestRef.current = requestAnimationFrame(predictWebcam);
104
+ return;
105
+ }
106
+
107
+ const video = videoRef.current;
108
+
109
+ // Performance optimization: Only process if frame changed
110
+ if (video.currentTime !== lastVideoTimeRef.current) {
111
+ lastVideoTimeRef.current = video.currentTime;
112
+ try {
113
+ const result = landmarkerRef.current.detectForVideo(video, performance.now());
114
+
115
+ if (result.landmarks && result.landmarks.length > 0) {
116
+ setIsTracking(true);
117
+ const landmarks = result.landmarks[0]; // First person
118
+ updateOverlay(landmarks);
119
+ } else {
120
+ setIsTracking(false);
121
+ }
122
+ } catch (e) {
123
+ console.warn("Tracking glitch", e);
124
+ }
125
+ }
126
+
127
+ if (!manualMode) {
128
+ requestRef.current = requestAnimationFrame(predictWebcam);
129
+ }
130
+ }, [isCameraReady, manualMode, product.category]);
131
+
132
+ useEffect(() => {
133
+ if (isCameraReady && !manualMode) {
134
+ requestRef.current = requestAnimationFrame(predictWebcam);
135
+ }
136
+ return () => {
137
+ if (requestRef.current) cancelAnimationFrame(requestRef.current);
138
+ };
139
+ }, [isCameraReady, manualMode, predictWebcam]);
140
+
141
+ // 4. Calculate Coordinates & Apply Physics
142
+ const updateOverlay = (landmarks: any[]) => {
143
+ // MediaPipe Landmarks:
144
+ // 11: left_shoulder, 12: right_shoulder, 0: nose, 15: left_wrist, 16: right_wrist
145
+
146
+ let top = 50;
147
+ let left = 50;
148
+ let width = 50;
149
+ let rotation = 0;
150
+
151
+ const lShoulder = landmarks[11];
152
+ const rShoulder = landmarks[12];
153
+ const nose = landmarks[0];
154
+ const lEar = landmarks[7];
155
+ const rEar = landmarks[8];
156
+
157
+ // Calculate Shoulder Width (Screen Space)
158
+ const shoulderDx = (rShoulder.x - lShoulder.x);
159
+ const shoulderDy = (rShoulder.y - lShoulder.y);
160
+ const shoulderDist = Math.sqrt(shoulderDx*shoulderDx + shoulderDy*shoulderDy);
161
+
162
+ // Calculate Body Rotation (Tilt)
163
+ const angleRad = Math.atan2(shoulderDy, shoulderDx);
164
+ const angleDeg = angleRad * (180 / Math.PI);
165
+
166
+ // LOGIC PER CATEGORY
167
+ if (product.category === ProductCategory.SHIRT || product.category === ProductCategory.PANTS) {
168
+ // Anchor to Chest (Midpoint of shoulders)
169
+ left = (lShoulder.x + rShoulder.x) / 2 * 100;
170
+ top = ((lShoulder.y + rShoulder.y) / 2) * 100;
171
+
172
+ // Shirt width is roughly 2.5x shoulder width
173
+ width = shoulderDist * 280; // Multiplier heuristic
174
+ rotation = angleDeg;
175
+
176
+ // Offset down slightly for shirts so it covers torso
177
+ top += 15;
178
+ }
179
+ else if (product.category === ProductCategory.EYEWEAR) {
180
+ // Anchor to Eyes
181
+ const lEye = landmarks[2];
182
+ const rEye = landmarks[5];
183
+ const eyeDist = Math.sqrt(Math.pow(rEye.x - lEye.x, 2) + Math.pow(rEye.y - lEye.y, 2));
184
+
185
+ left = (nose.x * 100);
186
+ top = (nose.y * 100) - 2; // Slightly above nose tip
187
+ width = eyeDist * 350; // Glasses are wider than eye-distance
188
+ rotation = Math.atan2(rEye.y - lEye.y, rEye.x - lEye.x) * (180/Math.PI);
189
+ }
190
+ else if (product.category === ProductCategory.HEADWEAR) {
191
+ // Anchor to Forehead
192
+ left = (nose.x * 100);
193
+ const headTopY = Math.min(lEar.y, rEar.y) - (Math.abs(lEar.x - rEar.x) * 0.8);
194
+ top = (headTopY * 100);
195
+ width = Math.abs(lEar.x - rEar.x) * 250;
196
+ rotation = angleDeg; // Follow head tilt (approx shoulder tilt or calc ears)
197
+ }
198
+
199
+ setArStyle({
200
+ position: 'absolute',
201
+ left: `${left}%`,
202
+ top: `${top}%`,
203
+ width: `${width}%`,
204
+ transform: `translate(-50%, -50%) rotate(${rotation}deg)`,
205
+ opacity: 1,
206
+ transition: 'all 0.1s linear', // Smooth interpolation
207
+ mixBlendMode: 'multiply',
208
+ filter: 'brightness(1.1) contrast(1.1)',
209
+ pointerEvents: 'none'
210
+ });
211
+ };
212
+
213
+ // Video Binding
214
+ useEffect(() => {
215
+ if (videoRef.current && stream) {
216
+ videoRef.current.srcObject = stream;
217
+ }
218
+ }, [stream]);
219
+
220
+ const handleCapture = () => {
221
+ if (countdown !== null) return;
222
+ setCountdown(3);
223
+
224
+ let count = 3;
225
+ const timer = setInterval(() => {
226
+ count--;
227
+ if (count > 0) setCountdown(count);
228
+ else {
229
+ clearInterval(timer);
230
+ setCountdown(null);
231
+ takePhoto();
232
+ }
233
+ }, 1000);
234
+ };
235
+
236
+ const takePhoto = () => {
237
+ if (videoRef.current && canvasRef.current) {
238
+ const video = videoRef.current;
239
+ const canvas = canvasRef.current;
240
+ const context = canvas.getContext('2d');
241
+ if (context) {
242
+ canvas.width = video.videoWidth;
243
+ canvas.height = video.videoHeight;
244
+ context.translate(canvas.width, 0);
245
+ context.scale(-1, 1);
246
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
247
+ onCapture(canvas.toDataURL('image/jpeg', 0.9));
248
+ }
249
+ }
250
+ };
251
+
252
+ const getFinalStyle = () => {
253
+ if (manualMode) {
254
+ return {
255
+ position: 'absolute' as const,
256
+ top: '50%',
257
+ left: '50%',
258
+ transform: `translate(-50%, -50%) scale(${manualScale}) translateY(${manualY}px)`,
259
+ width: product.category === ProductCategory.EYEWEAR ? '40%' : '60%',
260
+ mixBlendMode: 'multiply' as const,
261
+ opacity: 1
262
+ };
263
+ }
264
+ return arStyle;
265
+ };
266
+
267
+ return (
268
+ <div className="fixed inset-0 z-[60] bg-black flex flex-col">
269
+ {/* Header */}
270
+ <div className="absolute top-0 left-0 right-0 p-4 flex justify-between items-center z-10 bg-gradient-to-b from-black/80 to-transparent">
271
+ <button onClick={onClose} className="p-2 bg-white/10 backdrop-blur-md rounded-full text-white hover:bg-white/20 border border-white/10">
272
+ <X className="w-6 h-6" />
273
+ </button>
274
+ <div className="flex flex-col items-center">
275
+ <span className="text-white/80 text-[10px] uppercase tracking-widest mb-0.5">
276
+ {manualMode ? "Manual Adjust" : isTracking ? "AR Active" : "Scanning..."}
277
+ </span>
278
+ <span className="text-white font-bold text-sm shadow-sm">{product.name}</span>
279
+ </div>
280
+ <button
281
+ onClick={() => setManualMode(!manualMode)}
282
+ className={`p-2 rounded-full border ${manualMode ? 'bg-white text-black' : 'bg-white/10 text-white'} transition-colors`}
283
+ >
284
+ <BoxSelect className="w-5 h-5" />
285
+ </button>
286
+ </div>
287
+
288
+ {/* Main Viewport */}
289
+ <div className="flex-1 relative bg-gray-900 overflow-hidden flex items-center justify-center">
290
+ {!isCameraReady && !error && (
291
+ <div className="absolute inset-0 flex flex-col items-center justify-center z-20 bg-gray-900 text-white gap-3">
292
+ <div className="w-10 h-10 border-4 border-brand-500 border-t-transparent rounded-full animate-spin"></div>
293
+ <p className="text-sm font-medium animate-pulse">Initializing Vision Engine...</p>
294
+ </div>
295
+ )}
296
+
297
+ {error ? (
298
+ <div className="text-white text-center p-6 max-w-sm z-20 flex flex-col items-center">
299
+ <AlertCircle className="w-12 h-12 text-red-500 mb-4" />
300
+ <p className="mb-6 font-medium text-lg">{error}</p>
301
+ <button onClick={() => window.location.reload()} className="bg-white text-black px-6 py-3 rounded-xl font-bold">Reload</button>
302
+ </div>
303
+ ) : (
304
+ <div className="relative w-full h-full flex items-center justify-center">
305
+ {/* The wrapper handles the flipping for BOTH video and overlay */}
306
+ <div className="relative w-full h-full transform -scale-x-100 origin-center">
307
+ <video
308
+ ref={videoRef}
309
+ autoPlay
310
+ playsInline
311
+ muted
312
+ onLoadedData={() => setIsCameraReady(true)}
313
+ className="absolute w-full h-full object-cover"
314
+ />
315
+
316
+ {/* AR OVERLAY */}
317
+ {isCameraReady && (
318
+ <div style={getFinalStyle()} className="pointer-events-none z-10">
319
+ <img
320
+ src={product.imageUrl}
321
+ crossOrigin="anonymous"
322
+ alt="AR Overlay"
323
+ className="w-full h-full object-contain"
324
+ />
325
+ </div>
326
+ )}
327
+ </div>
328
+
329
+ {/* Tracking Indicator - Displayed Normally (Not flipped) */}
330
+ {!manualMode && !isTracking && isCameraReady && (
331
+ <div className="absolute top-20 bg-black/50 text-white text-xs px-3 py-1 rounded-full backdrop-blur flex items-center gap-2 animate-pulse z-20">
332
+ <ScanFace className="w-3 h-3" />
333
+ Stand further back to track body
334
+ </div>
335
+ )}
336
+
337
+ {countdown !== null && (
338
+ <div className="absolute inset-0 flex items-center justify-center bg-black/40 backdrop-blur-sm z-30">
339
+ <span className="text-[10rem] font-bold text-white animate-bounce">{countdown}</span>
340
+ </div>
341
+ )}
342
+ </div>
343
+ )}
344
+ <canvas ref={canvasRef} className="hidden" />
345
+ </div>
346
+
347
+ {/* Controls */}
348
+ <div className="bg-black/90 backdrop-blur-md pb-8 pt-4 px-6 border-t border-white/10 z-50">
349
+ {/* Manual Controls (Only visible in Manual Mode) */}
350
+ {manualMode && (
351
+ <div className="flex justify-center gap-4 mb-6 animate-in slide-in-from-bottom duration-300">
352
+ <div className="flex items-center gap-2 bg-gray-800 rounded-lg p-2 border border-gray-700">
353
+ <span className="text-[10px] text-gray-400 uppercase w-8 text-center">Size</span>
354
+ <input type="range" min="0.5" max="2" step="0.1" value={manualScale} onChange={(e) => setManualScale(parseFloat(e.target.value))} className="w-20 h-1 bg-gray-600 rounded-lg appearance-none cursor-pointer accent-brand-500" />
355
+ </div>
356
+ <div className="flex items-center gap-2 bg-gray-800 rounded-lg p-2 border border-gray-700">
357
+ <span className="text-[10px] text-gray-400 uppercase w-8 text-center">Pos</span>
358
+ <input type="range" min="-200" max="200" step="10" value={manualY} onChange={(e) => setManualY(parseFloat(e.target.value))} className="w-20 h-1 bg-gray-600 rounded-lg appearance-none cursor-pointer accent-brand-500" />
359
+ </div>
360
+ </div>
361
+ )}
362
+
363
+ {/* Shutter Button */}
364
+ <div className="flex items-center justify-center">
365
+ <button
366
+ onClick={handleCapture}
367
+ className="w-20 h-20 rounded-full border-4 border-white/90 flex items-center justify-center relative group shadow-[0_0_30px_rgba(255,255,255,0.2)] hover:shadow-[0_0_40px_rgba(255,255,255,0.4)] transition-all"
368
+ >
369
+ <div className="w-16 h-16 bg-white rounded-full group-hover:scale-90 transition-transform duration-200" />
370
+ </button>
371
+ </div>
372
+ </div>
373
+ </div>
374
+ );
375
+ };
376
+
377
+ export default CameraView;
src/components/Navigation.tsx ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+ import { ShoppingBag, ChevronLeft } from 'lucide-react';
3
+ import { APP_NAME } from '../constants';
4
+
5
+ interface NavigationProps {
6
+ onBack?: () => void;
7
+ showBack: boolean;
8
+ }
9
+
10
+ const Navigation: React.FC<NavigationProps> = ({ onBack, showBack }) => {
11
+ return (
12
+ <nav className="sticky top-0 z-50 bg-white border-b border-gray-100 shadow-sm h-16 flex items-center px-4 justify-between">
13
+ <div className="flex items-center gap-2">
14
+ {showBack && (
15
+ <button
16
+ onClick={onBack}
17
+ className="p-2 hover:bg-gray-100 rounded-full transition-colors mr-1"
18
+ >
19
+ <ChevronLeft className="w-6 h-6 text-gray-700" />
20
+ </button>
21
+ )}
22
+ <div className="flex items-center gap-2">
23
+ <div className="bg-brand-600 p-1.5 rounded-lg">
24
+ <ShoppingBag className="w-5 h-5 text-white" />
25
+ </div>
26
+ <span className="font-bold text-xl tracking-tight text-gray-900">{APP_NAME}</span>
27
+ </div>
28
+ </div>
29
+ <div>
30
+ {/* Placeholder for cart or profile */}
31
+ <div className="w-8 h-8 bg-gray-200 rounded-full overflow-hidden border border-gray-300">
32
+ <img src="https://picsum.photos/100/100" alt="Profile" className="w-full h-full object-cover" />
33
+ </div>
34
+ </div>
35
+ </nav>
36
+ );
37
+ };
38
+
39
+ export default Navigation;
src/components/ProductCard.tsx ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState } from 'react';
2
+ import { Product } from '../types';
3
+ import { Camera, Eye, ImageOff } from 'lucide-react';
4
+
5
+ interface ProductCardProps {
6
+ product: Product;
7
+ onTryOn: (product: Product) => void;
8
+ }
9
+
10
+ const ProductCard: React.FC<ProductCardProps> = ({ product, onTryOn }) => {
11
+ const [imgError, setImgError] = useState(false);
12
+
13
+ return (
14
+ <div className="bg-white rounded-xl shadow-sm border border-gray-100 overflow-hidden flex flex-col group hover:shadow-md transition-shadow">
15
+ <div className="relative aspect-[4/5] overflow-hidden bg-gray-100">
16
+ {!imgError ? (
17
+ <img
18
+ src={product.imageUrl}
19
+ alt={product.name}
20
+ onError={() => setImgError(true)}
21
+ className="w-full h-full object-cover group-hover:scale-105 transition-transform duration-500"
22
+ />
23
+ ) : (
24
+ <div className="w-full h-full flex flex-col items-center justify-center text-gray-400 bg-gray-50">
25
+ <ImageOff className="w-8 h-8 mb-2" />
26
+ <span className="text-xs">Image Unavailable</span>
27
+ </div>
28
+ )}
29
+ <div className="absolute inset-0 bg-black/0 group-hover:bg-black/10 transition-colors" />
30
+
31
+ <button
32
+ onClick={() => onTryOn(product)}
33
+ className="absolute bottom-4 right-4 bg-white/90 backdrop-blur-sm text-brand-600 hover:bg-brand-600 hover:text-white px-4 py-2 rounded-full font-medium text-sm shadow-lg flex items-center gap-2 transition-all transform translate-y-2 opacity-0 group-hover:translate-y-0 group-hover:opacity-100"
34
+ >
35
+ <Camera className="w-4 h-4" />
36
+ Try On
37
+ </button>
38
+ </div>
39
+
40
+ <div className="p-4 flex flex-col flex-grow">
41
+ <div className="text-xs font-semibold text-brand-600 uppercase tracking-wider mb-1">
42
+ {product.category}
43
+ </div>
44
+ <h3 className="font-medium text-gray-900 mb-1 leading-snug">{product.name}</h3>
45
+ <p className="text-gray-500 text-sm line-clamp-2 mb-4 flex-grow">{product.description}</p>
46
+ <div className="flex items-center justify-between mt-auto pt-3 border-t border-gray-50">
47
+ <span className="font-bold text-lg text-gray-900">${product.price.toFixed(2)}</span>
48
+ <button className="text-sm font-medium text-gray-600 hover:text-brand-600">
49
+ View Details
50
+ </button>
51
+ </div>
52
+ </div>
53
+ </div>
54
+ );
55
+ };
56
+
57
+ export default ProductCard;
src/components/ResultView.tsx ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useEffect, useState } from 'react';
2
+ import { Product } from '../types';
3
+ import { RefreshCw, Download, Share2, AlertCircle, ShoppingCart, Sparkles, ScanFace } from 'lucide-react';
4
+
5
+ interface ResultViewProps {
6
+ originalImage: string;
7
+ generatedImage: string | null;
8
+ product: Product;
9
+ loading: boolean;
10
+ error: string | null;
11
+ onRetake: () => void;
12
+ }
13
+
14
+ const ResultView: React.FC<ResultViewProps> = ({
15
+ originalImage,
16
+ generatedImage,
17
+ product,
18
+ loading,
19
+ error,
20
+ onRetake
21
+ }) => {
22
+ // Simulate progress steps for better UX during the wait
23
+ const [progressStep, setProgressStep] = useState(0);
24
+
25
+ useEffect(() => {
26
+ if (loading) {
27
+ setProgressStep(0);
28
+ const interval = setInterval(() => {
29
+ setProgressStep(prev => (prev < 3 ? prev + 1 : prev));
30
+ }, 1500); // Change text every 1.5s
31
+ return () => clearInterval(interval);
32
+ }
33
+ }, [loading]);
34
+
35
+ const loadingTexts = [
36
+ "Analyzing body pose and lighting...",
37
+ "Extracting product details...",
38
+ "Warping fabric to fit your shape...",
39
+ "Finalizing realistic shadows..."
40
+ ];
41
+
42
+ return (
43
+ <div className="min-h-screen bg-gray-50 flex flex-col">
44
+ <div className="flex-1 max-w-4xl mx-auto w-full p-4 flex flex-col md:flex-row gap-6 items-start justify-center pt-8">
45
+
46
+ {/* Original Image (Small preview) */}
47
+ <div className="w-full md:w-1/3 bg-white p-3 rounded-2xl shadow-sm border border-gray-100 flex flex-col gap-2">
48
+ <h3 className="text-sm font-semibold text-gray-500 uppercase tracking-wide px-1">Original Capture</h3>
49
+ <div className="aspect-[3/4] rounded-xl overflow-hidden bg-gray-100 relative group">
50
+ <img src={originalImage} alt="Original" className="w-full h-full object-cover" />
51
+ {/* Product Overlay Badge */}
52
+ <div className="absolute bottom-2 right-2 bg-white/90 backdrop-blur rounded-lg p-1 shadow-sm w-12 h-12 border border-gray-200">
53
+ <img src={product.imageUrl} className="w-full h-full object-contain mix-blend-multiply" alt="item" />
54
+ </div>
55
+ </div>
56
+ </div>
57
+
58
+ {/* Generated Result */}
59
+ <div className="w-full md:w-2/3 bg-white p-3 rounded-2xl shadow-lg border border-gray-100 flex flex-col gap-2 relative min-h-[400px]">
60
+ <h3 className="text-sm font-semibold text-brand-600 uppercase tracking-wide px-1 flex items-center gap-2">
61
+ <Sparkles className="w-4 h-4 text-brand-500" />
62
+ AI Generated Look
63
+ </h3>
64
+
65
+ <div className="aspect-[3/4] md:aspect-square rounded-xl overflow-hidden bg-gray-900 relative flex items-center justify-center">
66
+ {loading && (
67
+ <div className="absolute inset-0 flex flex-col items-center justify-center bg-gray-900/80 backdrop-blur-md z-10 text-white p-6 text-center">
68
+ <div className="relative mb-6">
69
+ <div className="w-16 h-16 border-4 border-brand-500/30 rounded-full animate-pulse"></div>
70
+ <div className="absolute inset-0 w-16 h-16 border-4 border-brand-500 border-t-transparent rounded-full animate-spin"></div>
71
+ <ScanFace className="absolute inset-0 w-8 h-8 m-auto text-brand-500 animate-pulse" />
72
+ </div>
73
+ <h4 className="text-xl font-bold mb-2">Creating Your Try-On</h4>
74
+ <p className="text-brand-300 font-medium animate-pulse">{loadingTexts[progressStep]}</p>
75
+ <p className="text-xs text-gray-500 mt-4">Powered by Gemini Nano Banana</p>
76
+ </div>
77
+ )}
78
+
79
+ {error && (
80
+ <div className="absolute inset-0 flex flex-col items-center justify-center p-8 text-center bg-gray-800">
81
+ <div className="bg-red-500/10 p-4 rounded-full mb-4">
82
+ <AlertCircle className="w-10 h-10 text-red-500" />
83
+ </div>
84
+ <h4 className="text-white font-bold text-lg mb-2">Generation Failed</h4>
85
+ <p className="text-gray-400 text-sm mb-6 max-w-xs">{error}</p>
86
+ <button
87
+ onClick={onRetake}
88
+ className="px-6 py-2.5 bg-white text-black rounded-full font-bold text-sm hover:bg-gray-100 transition-colors"
89
+ >
90
+ Try Again
91
+ </button>
92
+ </div>
93
+ )}
94
+
95
+ {!loading && !error && generatedImage && (
96
+ <img src={generatedImage} alt="Generated Try-On" className="w-full h-full object-contain animate-in fade-in duration-700" />
97
+ )}
98
+ </div>
99
+
100
+ {/* Actions */}
101
+ {!loading && !error && generatedImage && (
102
+ <div className="grid grid-cols-2 gap-3 mt-2">
103
+ <button
104
+ onClick={onRetake}
105
+ className="flex items-center justify-center gap-2 py-3 bg-gray-100 hover:bg-gray-200 text-gray-800 rounded-xl font-bold transition-colors"
106
+ >
107
+ <RefreshCw className="w-4 h-4" />
108
+ Retake
109
+ </button>
110
+ <button className="flex items-center justify-center gap-2 py-3 bg-brand-600 hover:bg-brand-700 text-white rounded-xl font-bold transition-colors shadow-lg shadow-brand-200 hover:shadow-brand-300">
111
+ <ShoppingCart className="w-4 h-4" />
112
+ Add to Cart
113
+ </button>
114
+ </div>
115
+ )}
116
+ </div>
117
+ </div>
118
+ </div>
119
+ );
120
+ };
121
+
122
+ export default ResultView;
src/constants.ts ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Product, ProductCategory } from './types';
2
+
3
+ // We use high-quality Unsplash images.
4
+ export const PRODUCTS: Product[] = [
5
+ {
6
+ id: '1',
7
+ name: 'Classic Aviator Sunglasses',
8
+ category: ProductCategory.EYEWEAR,
9
+ price: 150.00,
10
+ imageUrl: 'https://images.unsplash.com/photo-1511499767150-a48a237f0083?auto=format&fit=crop&q=80&w=500',
11
+ description: 'Timeless aviator sunglasses with gold frames and dark lenses.'
12
+ },
13
+ {
14
+ id: '2',
15
+ name: 'Oxford Blue Shirt',
16
+ category: ProductCategory.SHIRT,
17
+ price: 89.50,
18
+ imageUrl: 'https://images.unsplash.com/photo-1596755094514-f87e34085b2c?auto=format&fit=crop&q=80&w=500',
19
+ description: 'Premium cotton oxford shirt in a versatile blue shade.'
20
+ },
21
+ {
22
+ id: '3',
23
+ name: 'Gold Layered Necklace',
24
+ category: ProductCategory.NECKLACE,
25
+ price: 45.00,
26
+ imageUrl: 'https://images.unsplash.com/photo-1601121141461-9d6647bca1ed?auto=format&fit=crop&q=80&w=500',
27
+ description: 'Delicate layered gold chain necklace.'
28
+ },
29
+ {
30
+ id: '4',
31
+ name: 'Beige Chino Pants',
32
+ category: ProductCategory.PANTS,
33
+ price: 65.00,
34
+ imageUrl: 'https://images.unsplash.com/photo-1624378439575-d8705ad7ae80?auto=format&fit=crop&q=80&w=500',
35
+ description: 'Slim fit beige chinos suitable for casual or formal wear.'
36
+ },
37
+ {
38
+ id: '5',
39
+ name: 'Wide Brim Fedora',
40
+ category: ProductCategory.HEADWEAR,
41
+ price: 55.00,
42
+ imageUrl: 'https://images.unsplash.com/photo-1514327605112-b887c0e61c0a?auto=format&fit=crop&q=80&w=500',
43
+ description: 'Stylish wide-brimmed felt hat in charcoal grey.'
44
+ },
45
+ {
46
+ id: '6',
47
+ name: 'Denim Jacket',
48
+ category: ProductCategory.SHIRT,
49
+ price: 120.00,
50
+ imageUrl: 'https://images.unsplash.com/photo-1576871337622-98d48d1cf531?auto=format&fit=crop&q=80&w=500',
51
+ description: 'Classic vintage-wash denim jacket.'
52
+ }
53
+ ];
54
+
55
+ export const APP_NAME = "StyleGenie";
src/index.tsx ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+ import ReactDOM from 'react-dom/client';
3
+ import App from './App';
4
+
5
+ const rootElement = document.getElementById('root');
6
+ if (!rootElement) {
7
+ throw new Error("Could not find root element to mount to");
8
+ }
9
+
10
+ const root = ReactDOM.createRoot(rootElement);
11
+ root.render(
12
+ <React.StrictMode>
13
+ <App />
14
+ </React.StrictMode>
15
+ );
src/services/geminiService.ts ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { GoogleGenAI } from "@google/genai";
2
+ import { Product, ProductCategory } from "../types";
3
+
4
+ // Helper to clean base64 string
5
+ const cleanBase64 = (data: string) => {
6
+ return data.replace(/^data:image\/(png|jpeg|jpg|webp);base64,/, '');
7
+ };
8
+
9
+ const getMimeType = (data: string) => {
10
+ const match = data.match(/^data:(image\/\w+);base64,/);
11
+ return match ? match[1] : 'image/jpeg';
12
+ }
13
+
14
+ // Robust image fetcher that handles CORS better than simple fetch in some cases
15
+ const urlToBase64 = async (url: string): Promise<string> => {
16
+ try {
17
+ const response = await fetch(url);
18
+ if (!response.ok) throw new Error(`Failed to fetch image: ${response.statusText}`);
19
+ const blob = await response.blob();
20
+ return await new Promise((resolve, reject) => {
21
+ const reader = new FileReader();
22
+ reader.onloadend = () => resolve(reader.result as string);
23
+ reader.onerror = reject;
24
+ reader.readAsDataURL(blob);
25
+ });
26
+ } catch (error) {
27
+ console.warn("Fetch failed, attempting canvas fallback...", error);
28
+ // Fallback: Load via Image tag (handles some CORS headers differently) and draw to canvas
29
+ return new Promise((resolve, reject) => {
30
+ const img = new Image();
31
+ img.crossOrigin = "Anonymous";
32
+ img.onload = () => {
33
+ const canvas = document.createElement('canvas');
34
+ canvas.width = img.width;
35
+ canvas.height = img.height;
36
+ const ctx = canvas.getContext('2d');
37
+ if (!ctx) {
38
+ reject(new Error("Could not get canvas context"));
39
+ return;
40
+ }
41
+ ctx.drawImage(img, 0, 0);
42
+ try {
43
+ const dataURL = canvas.toDataURL('image/jpeg');
44
+ resolve(dataURL);
45
+ } catch (e) {
46
+ reject(new Error("Canvas tainted by cross-origin data"));
47
+ }
48
+ };
49
+ img.onerror = () => reject(new Error("Image load failed"));
50
+ img.src = url;
51
+ });
52
+ }
53
+ };
54
+
55
+ export const generateTryOn = async (userImageBase64: string, product: Product): Promise<string> => {
56
+ const apiKey = process.env.API_KEY;
57
+ if (!apiKey) {
58
+ throw new Error("API Key is missing. Please set the API_KEY environment variable.");
59
+ }
60
+
61
+ const ai = new GoogleGenAI({ apiKey });
62
+
63
+ // Fetch product image to base64 with robust handling
64
+ let productBase64 = '';
65
+ try {
66
+ productBase64 = await urlToBase64(product.imageUrl);
67
+ } catch (e) {
68
+ console.error("Failed to load product image", e);
69
+ throw new Error("Could not load product image. Please try another item.");
70
+ }
71
+
72
+ // --- PROMPT ENGINEERING FOR VIRTUAL TRY-ON ---
73
+ // We use Gemini 2.5 Flash Image ('nano banana') capabilities.
74
+ // The key is to instruct it to act as a segmentation and warping engine.
75
+
76
+ const basePrompt = `Act as an advanced Virtual Try-On (VTON) AI model.
77
+
78
+ TASK:
79
+ Synthesize a photorealistic image of the USER (Image A) wearing the GARMENT (Image B).
80
+
81
+ STRICT PROCESSING STEPS:
82
+ 1. SEGMENTATION: Identify the user's body parts relevant to the garment (e.g., torso for shirts, legs for pants, face for glasses).
83
+ 2. EXTRACTION: Extract the garment from Image B, completely ignoring any white background, hangers, or mannequins.
84
+ 3. WARPING: Warp the extracted garment to match the user's body pose, rotation, and shape in Image A.
85
+ - If the user is turning, the garment must turn.
86
+ - If the user has arms crossed, the garment must fold realistically.
87
+ 4. COMPOSITING: Blend the warped garment onto the user.
88
+ - Apply lighting from the user's environment to the garment.
89
+ - Cast shadows from the garment onto the user's skin/body to create depth.
90
+
91
+ CONSTRAINTS:
92
+ - PRESERVE FACE IDENTITY: Do not modify the user's facial features.
93
+ - PRESERVE BACKGROUND: Do not change the background behind the user.
94
+ - REALISM: The output must look like a photograph, not a photoshop cut-paste.
95
+
96
+ CATEGORY SPECIFIC INSTRUCTIONS:
97
+ `;
98
+
99
+ let categoryInstruction = "";
100
+ switch (product.category) {
101
+ case ProductCategory.EYEWEAR:
102
+ categoryInstruction = `Category: EYEWEAR.
103
+ - Fit: Align perfectly with the eyes and nose bridge.
104
+ - Scale: Ensure the glasses width matches the face width.
105
+ - Reality: If the glasses are sunglasses, add reflections. If clear, show the eyes through the lenses.`;
106
+ break;
107
+ case ProductCategory.NECKLACE:
108
+ categoryInstruction = `Category: JEWELRY.
109
+ - Fit: Drape the necklace around the neck curve. Gravity is important.
110
+ - Layering: It must sit ON TOP of skin or the shirt the user is already wearing (unless it's a shirt try-on, then on the new shirt).`;
111
+ break;
112
+ case ProductCategory.HEADWEAR:
113
+ categoryInstruction = `Category: HAT.
114
+ - Fit: Sit naturally on the crown of the head.
115
+ - Occlusion: If the user has hair volume, the hat should compress it slightly or sit behind fringe.`;
116
+ break;
117
+ case ProductCategory.SHIRT:
118
+ categoryInstruction = `Category: UPPER BODY GARMENT.
119
+ - Action: Completely replace the user's current top.
120
+ - Sleeves: Match the sleeve length of the target garment. If the user's arm pose is complex, warp the sleeves accordingly.
121
+ - Tucking: Determine if a tuck is natural based on the user's pose.`;
122
+ break;
123
+ case ProductCategory.PANTS:
124
+ categoryInstruction = `Category: LOWER BODY GARMENT.
125
+ - Action: Replace the user's pants/skirt.
126
+ - Waist: Align with the natural waistline.`;
127
+ break;
128
+ default:
129
+ categoryInstruction = `Category: FASHION ITEM. Wear it naturally.`;
130
+ break;
131
+ }
132
+
133
+ const fullPrompt = `${basePrompt}\n${categoryInstruction}`;
134
+
135
+ const userMime = getMimeType(userImageBase64);
136
+ const productMime = getMimeType(productBase64);
137
+
138
+ try {
139
+ // gemini-2.5-flash-image is the recommended model for high-fidelity image manipulation tasks
140
+ const response = await ai.models.generateContent({
141
+ model: 'gemini-2.5-flash-image',
142
+ contents: {
143
+ parts: [
144
+ {
145
+ text: fullPrompt
146
+ },
147
+ {
148
+ inlineData: {
149
+ mimeType: userMime,
150
+ data: cleanBase64(userImageBase64)
151
+ }
152
+ },
153
+ {
154
+ inlineData: {
155
+ mimeType: productMime,
156
+ data: cleanBase64(productBase64)
157
+ }
158
+ }
159
+ ]
160
+ }
161
+ });
162
+
163
+ const parts = response.candidates?.[0]?.content?.parts;
164
+ if (parts) {
165
+ for (const part of parts) {
166
+ if (part.inlineData && part.inlineData.data) {
167
+ return `data:${part.inlineData.mimeType || 'image/png'};base64,${part.inlineData.data}`;
168
+ }
169
+ }
170
+ }
171
+
172
+ throw new Error("No image generated by the AI.");
173
+
174
+ } catch (error) {
175
+ console.error("Gemini API Error:", error);
176
+ throw error;
177
+ }
178
+ };
src/types.ts ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export enum ProductCategory {
2
+ SHIRT = 'Shirt',
3
+ PANTS = 'Pants',
4
+ EYEWEAR = 'Eyewear',
5
+ NECKLACE = 'Necklace',
6
+ HEADWEAR = 'Headwear'
7
+ }
8
+
9
+ export interface Product {
10
+ id: string;
11
+ name: string;
12
+ category: ProductCategory;
13
+ price: number;
14
+ imageUrl: string;
15
+ description: string;
16
+ }
17
+
18
+ export enum AppView {
19
+ CATALOG = 'CATALOG',
20
+ CAMERA = 'CAMERA',
21
+ RESULT = 'RESULT'
22
+ }
23
+
24
+ export interface TryOnRequest {
25
+ product: Product;
26
+ userImageBase64: string; // The captured frame
27
+ }
28
+
29
+ export interface TryOnResult {
30
+ generatedImageBase64: string | null;
31
+ loading: boolean;
32
+ error: string | null;
33
+ }
tsconfig.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2020",
4
+ "useDefineForClassFields": true,
5
+ "lib": ["ES2020", "DOM", "DOM.Iterable"],
6
+ "module": "ESNext",
7
+ "skipLibCheck": true,
8
+ "moduleResolution": "bundler",
9
+ "allowImportingTsExtensions": true,
10
+ "resolveJsonModule": true,
11
+ "isolatedModules": true,
12
+ "noEmit": true,
13
+ "jsx": "react-jsx",
14
+ "strict": true,
15
+ "noUnusedLocals": true,
16
+ "noUnusedParameters": true,
17
+ "noFallthroughCasesInSwitch": true
18
+ },
19
+ "include": ["**/*.ts", "**/*.tsx"]
20
+ }
vite.config.ts ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig, loadEnv } from 'vite'
2
+ import react from '@vitejs/plugin-react'
3
+
4
+ // https://vitejs.dev/config/
5
+ export default defineConfig(({ mode }) => {
6
+ // Load env file based on `mode` in the current working directory.
7
+ // We pass '' as the third argument to load ALL env vars, including API_KEY from Hugging Face Secrets.
8
+ const env = loadEnv(mode, process.cwd(), '')
9
+
10
+ return {
11
+ plugins: [react()],
12
+ define: {
13
+ // Inject the API Key securely from the build environment
14
+ 'process.env.API_KEY': JSON.stringify(env.API_KEY || process.env.API_KEY)
15
+ },
16
+ server: {
17
+ host: '0.0.0.0', // Required for Docker containers
18
+ port: 7860, // Required port for Hugging Face Spaces
19
+ strictPort: true,
20
+ hmr: {
21
+ clientPort: 443 // Fixes HMR behind Hugging Face SSL proxy
22
+ }
23
+ }
24
+ }
25
+ })