Harsha1845's picture
Upload 16 files
370c714 verified
import React, { useRef, useEffect, useState, useCallback } from 'react';
import { X, RefreshCw, Image as ImageIcon, AlertCircle, Layers, ScanFace, BoxSelect } from 'lucide-react';
import { Product, ProductCategory } from '../types';
import { FilesetResolver, PoseLandmarker } from '@mediapipe/tasks-vision';
interface CameraViewProps {
product: Product;
onCapture: (imageSrc: string) => void;
onClose: () => void;
}
const CameraView: React.FC<CameraViewProps> = ({ product, onCapture, onClose }) => {
const videoRef = useRef<HTMLVideoElement>(null);
const canvasRef = useRef<HTMLCanvasElement>(null);
const lastVideoTimeRef = useRef<number>(-1);
// State
const [stream, setStream] = useState<MediaStream | null>(null);
const [error, setError] = useState<string | null>(null);
const [countdown, setCountdown] = useState<number | null>(null);
const [isCameraReady, setIsCameraReady] = useState(false);
const [isTracking, setIsTracking] = useState(false);
// AR Transform State (Calculated in real-time)
const [arStyle, setArStyle] = useState<React.CSSProperties>({
opacity: 0, // Hide until tracked
transform: 'translate(-50%, -50%) scale(1) rotate(0deg)',
top: '50%',
left: '50%',
width: '50%',
});
// Manual override state (if tracking fails or user wants to adjust)
const [manualMode, setManualMode] = useState(false);
const [manualScale, setManualScale] = useState(1);
const [manualY, setManualY] = useState(0);
const landmarkerRef = useRef<any>(null);
const requestRef = useRef<number | null>(null);
// 1. Initialize Camera
useEffect(() => {
let mediaStream: MediaStream | null = null;
const startCamera = async () => {
try {
if (!navigator.mediaDevices?.getUserMedia) throw new Error("No camera access");
mediaStream = await navigator.mediaDevices.getUserMedia({
video: {
width: { ideal: 1280 },
height: { ideal: 720 },
facingMode: 'user'
},
audio: false,
});
setStream(mediaStream);
} catch (err) {
console.error("Camera Error:", err);
setError("Could not access camera. Please allow permissions.");
}
};
startCamera();
return () => {
if (mediaStream) mediaStream.getTracks().forEach(t => t.stop());
if (requestRef.current) cancelAnimationFrame(requestRef.current);
if (landmarkerRef.current) landmarkerRef.current.close();
};
}, []);
// 2. Initialize MediaPipe Pose Landmarker
useEffect(() => {
const loadLandmarker = async () => {
try {
const vision = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.17/wasm"
);
landmarkerRef.current = await PoseLandmarker.createFromOptions(vision, {
baseOptions: {
modelAssetPath: `https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task`,
delegate: "GPU"
},
runningMode: "VIDEO",
numPoses: 1
});
console.log("AR Engine Loaded");
} catch (err) {
console.error("Failed to load AR engine:", err);
// Fallback to manual mode if AR fails
setManualMode(true);
}
};
loadLandmarker();
}, []);
// 3. Real-time Tracking Loop
const predictWebcam = useCallback(() => {
if (!landmarkerRef.current || !videoRef.current || !isCameraReady) {
requestRef.current = requestAnimationFrame(predictWebcam);
return;
}
const video = videoRef.current;
// Performance optimization: Only process if frame changed
if (video.currentTime !== lastVideoTimeRef.current) {
lastVideoTimeRef.current = video.currentTime;
try {
const result = landmarkerRef.current.detectForVideo(video, performance.now());
if (result.landmarks && result.landmarks.length > 0) {
setIsTracking(true);
const landmarks = result.landmarks[0]; // First person
updateOverlay(landmarks);
} else {
setIsTracking(false);
}
} catch (e) {
console.warn("Tracking glitch", e);
}
}
if (!manualMode) {
requestRef.current = requestAnimationFrame(predictWebcam);
}
}, [isCameraReady, manualMode, product.category]);
useEffect(() => {
if (isCameraReady && !manualMode) {
requestRef.current = requestAnimationFrame(predictWebcam);
}
return () => {
if (requestRef.current) cancelAnimationFrame(requestRef.current);
};
}, [isCameraReady, manualMode, predictWebcam]);
// 4. Calculate Coordinates & Apply Physics
const updateOverlay = (landmarks: any[]) => {
// MediaPipe Landmarks:
// 11: left_shoulder, 12: right_shoulder, 0: nose, 15: left_wrist, 16: right_wrist
let top = 50;
let left = 50;
let width = 50;
let rotation = 0;
const lShoulder = landmarks[11];
const rShoulder = landmarks[12];
const nose = landmarks[0];
const lEar = landmarks[7];
const rEar = landmarks[8];
// Calculate Shoulder Width (Screen Space)
const shoulderDx = (rShoulder.x - lShoulder.x);
const shoulderDy = (rShoulder.y - lShoulder.y);
const shoulderDist = Math.sqrt(shoulderDx*shoulderDx + shoulderDy*shoulderDy);
// Calculate Body Rotation (Tilt)
const angleRad = Math.atan2(shoulderDy, shoulderDx);
const angleDeg = angleRad * (180 / Math.PI);
// LOGIC PER CATEGORY
if (product.category === ProductCategory.SHIRT || product.category === ProductCategory.PANTS) {
// Anchor to Chest (Midpoint of shoulders)
left = (lShoulder.x + rShoulder.x) / 2 * 100;
top = ((lShoulder.y + rShoulder.y) / 2) * 100;
// Shirt width is roughly 2.5x shoulder width
width = shoulderDist * 280; // Multiplier heuristic
rotation = angleDeg;
// Offset down slightly for shirts so it covers torso
top += 15;
}
else if (product.category === ProductCategory.EYEWEAR) {
// Anchor to Eyes
const lEye = landmarks[2];
const rEye = landmarks[5];
const eyeDist = Math.sqrt(Math.pow(rEye.x - lEye.x, 2) + Math.pow(rEye.y - lEye.y, 2));
left = (nose.x * 100);
top = (nose.y * 100) - 2; // Slightly above nose tip
width = eyeDist * 350; // Glasses are wider than eye-distance
rotation = Math.atan2(rEye.y - lEye.y, rEye.x - lEye.x) * (180/Math.PI);
}
else if (product.category === ProductCategory.HEADWEAR) {
// Anchor to Forehead
left = (nose.x * 100);
const headTopY = Math.min(lEar.y, rEar.y) - (Math.abs(lEar.x - rEar.x) * 0.8);
top = (headTopY * 100);
width = Math.abs(lEar.x - rEar.x) * 250;
rotation = angleDeg; // Follow head tilt (approx shoulder tilt or calc ears)
}
setArStyle({
position: 'absolute',
left: `${left}%`,
top: `${top}%`,
width: `${width}%`,
transform: `translate(-50%, -50%) rotate(${rotation}deg)`,
opacity: 1,
transition: 'all 0.1s linear', // Smooth interpolation
mixBlendMode: 'multiply',
filter: 'brightness(1.1) contrast(1.1)',
pointerEvents: 'none'
});
};
// Video Binding
useEffect(() => {
if (videoRef.current && stream) {
videoRef.current.srcObject = stream;
}
}, [stream]);
const handleCapture = () => {
if (countdown !== null) return;
setCountdown(3);
let count = 3;
const timer = setInterval(() => {
count--;
if (count > 0) setCountdown(count);
else {
clearInterval(timer);
setCountdown(null);
takePhoto();
}
}, 1000);
};
const takePhoto = () => {
if (videoRef.current && canvasRef.current) {
const video = videoRef.current;
const canvas = canvasRef.current;
const context = canvas.getContext('2d');
if (context) {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
context.translate(canvas.width, 0);
context.scale(-1, 1);
context.drawImage(video, 0, 0, canvas.width, canvas.height);
onCapture(canvas.toDataURL('image/jpeg', 0.9));
}
}
};
const getFinalStyle = () => {
if (manualMode) {
return {
position: 'absolute' as const,
top: '50%',
left: '50%',
transform: `translate(-50%, -50%) scale(${manualScale}) translateY(${manualY}px)`,
width: product.category === ProductCategory.EYEWEAR ? '40%' : '60%',
mixBlendMode: 'multiply' as const,
opacity: 1
};
}
return arStyle;
};
return (
<div className="fixed inset-0 z-[60] bg-black flex flex-col">
{/* Header */}
<div className="absolute top-0 left-0 right-0 p-4 flex justify-between items-center z-10 bg-gradient-to-b from-black/80 to-transparent">
<button onClick={onClose} className="p-2 bg-white/10 backdrop-blur-md rounded-full text-white hover:bg-white/20 border border-white/10">
<X className="w-6 h-6" />
</button>
<div className="flex flex-col items-center">
<span className="text-white/80 text-[10px] uppercase tracking-widest mb-0.5">
{manualMode ? "Manual Adjust" : isTracking ? "AR Active" : "Scanning..."}
</span>
<span className="text-white font-bold text-sm shadow-sm">{product.name}</span>
</div>
<button
onClick={() => setManualMode(!manualMode)}
className={`p-2 rounded-full border ${manualMode ? 'bg-white text-black' : 'bg-white/10 text-white'} transition-colors`}
>
<BoxSelect className="w-5 h-5" />
</button>
</div>
{/* Main Viewport */}
<div className="flex-1 relative bg-gray-900 overflow-hidden flex items-center justify-center">
{!isCameraReady && !error && (
<div className="absolute inset-0 flex flex-col items-center justify-center z-20 bg-gray-900 text-white gap-3">
<div className="w-10 h-10 border-4 border-brand-500 border-t-transparent rounded-full animate-spin"></div>
<p className="text-sm font-medium animate-pulse">Initializing Vision Engine...</p>
</div>
)}
{error ? (
<div className="text-white text-center p-6 max-w-sm z-20 flex flex-col items-center">
<AlertCircle className="w-12 h-12 text-red-500 mb-4" />
<p className="mb-6 font-medium text-lg">{error}</p>
<button onClick={() => window.location.reload()} className="bg-white text-black px-6 py-3 rounded-xl font-bold">Reload</button>
</div>
) : (
<div className="relative w-full h-full flex items-center justify-center">
{/* The wrapper handles the flipping for BOTH video and overlay */}
<div className="relative w-full h-full transform -scale-x-100 origin-center">
<video
ref={videoRef}
autoPlay
playsInline
muted
onLoadedData={() => setIsCameraReady(true)}
className="absolute w-full h-full object-cover"
/>
{/* AR OVERLAY */}
{isCameraReady && (
<div style={getFinalStyle()} className="pointer-events-none z-10">
<img
src={product.imageUrl}
crossOrigin="anonymous"
alt="AR Overlay"
className="w-full h-full object-contain"
/>
</div>
)}
</div>
{/* Tracking Indicator - Displayed Normally (Not flipped) */}
{!manualMode && !isTracking && isCameraReady && (
<div className="absolute top-20 bg-black/50 text-white text-xs px-3 py-1 rounded-full backdrop-blur flex items-center gap-2 animate-pulse z-20">
<ScanFace className="w-3 h-3" />
Stand further back to track body
</div>
)}
{countdown !== null && (
<div className="absolute inset-0 flex items-center justify-center bg-black/40 backdrop-blur-sm z-30">
<span className="text-[10rem] font-bold text-white animate-bounce">{countdown}</span>
</div>
)}
</div>
)}
<canvas ref={canvasRef} className="hidden" />
</div>
{/* Controls */}
<div className="bg-black/90 backdrop-blur-md pb-8 pt-4 px-6 border-t border-white/10 z-50">
{/* Manual Controls (Only visible in Manual Mode) */}
{manualMode && (
<div className="flex justify-center gap-4 mb-6 animate-in slide-in-from-bottom duration-300">
<div className="flex items-center gap-2 bg-gray-800 rounded-lg p-2 border border-gray-700">
<span className="text-[10px] text-gray-400 uppercase w-8 text-center">Size</span>
<input type="range" min="0.5" max="2" step="0.1" value={manualScale} onChange={(e) => setManualScale(parseFloat(e.target.value))} className="w-20 h-1 bg-gray-600 rounded-lg appearance-none cursor-pointer accent-brand-500" />
</div>
<div className="flex items-center gap-2 bg-gray-800 rounded-lg p-2 border border-gray-700">
<span className="text-[10px] text-gray-400 uppercase w-8 text-center">Pos</span>
<input type="range" min="-200" max="200" step="10" value={manualY} onChange={(e) => setManualY(parseFloat(e.target.value))} className="w-20 h-1 bg-gray-600 rounded-lg appearance-none cursor-pointer accent-brand-500" />
</div>
</div>
)}
{/* Shutter Button */}
<div className="flex items-center justify-center">
<button
onClick={handleCapture}
className="w-20 h-20 rounded-full border-4 border-white/90 flex items-center justify-center relative group shadow-[0_0_30px_rgba(255,255,255,0.2)] hover:shadow-[0_0_40px_rgba(255,255,255,0.4)] transition-all"
>
<div className="w-16 h-16 bg-white rounded-full group-hover:scale-90 transition-transform duration-200" />
</button>
</div>
</div>
</div>
);
};
export default CameraView;