import React, { useState, useEffect } from 'react'; import { Link } from 'react-router-dom'; import { createPageUrl } from '@/utils'; import { motion, AnimatePresence } from 'framer-motion'; import { Brain, CheckCircle2, XCircle, ArrowLeft, Trophy, RotateCcw, Sparkles, Target } from 'lucide-react'; import { Button } from '@/components/ui/button'; import { Progress } from '@/components/ui/progress'; import confetti from 'canvas-confetti'; const questions = [ { id: 1, question: "In YOLO (You Only Look Once), what makes it faster than traditional object detection methods?", options: [ "It uses smaller images", "It processes the entire image in a single pass instead of multiple regions", "It only detects one object at a time", "It uses black and white images" ], correct: 1, topic: "Object Detection" }, { id: 2, question: "What does a confidence score of 95% mean in object detection?", options: [ "The object is 95% visible in the image", "The AI is 95% certain about its prediction", "95% of the object is inside the bounding box", "The detection took 95% of the processing time" ], correct: 1, topic: "Object Detection" }, { id: 3, question: "How many keypoints does a standard pose estimation model typically track on the human body?", options: [ "7 keypoints", "10 keypoints", "17 keypoints", "25 keypoints" ], correct: 2, topic: "Pose Estimation" }, { id: 4, question: "What are 'skeleton lines' in pose estimation?", options: [ "Lines that show bones in an X-ray", "Connections between keypoints showing body structure", "Errors in the detection algorithm", "Grid lines dividing the image" ], correct: 1, topic: "Pose Estimation" }, { id: 5, question: "Which of these is NOT one of the 7 basic emotions in FER (Facial Emotion Recognition)?", options: [ "Fear", "Disgust", "Confused", "Surprise" ], correct: 2, topic: "Emotion Recognition" }, { id: 6, question: "Why do facial emotion recognition models use 48×48 pixel images?", options: [ "Larger images don't work with AI", "Smaller size allows faster processing while retaining key facial features", "48×48 is the size of human faces", "It's the only size cameras can capture" ], correct: 1, topic: "Emotion Recognition" }, { id: 7, question: "Which technology would be BEST for analyzing a dancer's movements in real-time?", options: [ "Object Detection", "Emotion Recognition", "Pose Estimation", "Voice Recognition" ], correct: 2, topic: "Pose Estimation" }, { id: 8, question: "In object detection, what happens during 'non-max suppression'?", options: [ "The AI stops detecting objects", "Duplicate or overlapping bounding boxes are removed", "The confidence threshold is lowered", "The image brightness is reduced" ], correct: 1, topic: "Object Detection" }, { id: 9, question: "Why is consent important before using emotion recognition technology on someone?", options: [ "It makes the AI more accurate", "People have a right to privacy and to know when they're being analyzed", "It's required by all cameras", "Consent improves the lighting in photos" ], correct: 1, topic: "Emotion Recognition" }, { id: 10, question: "What is the main advantage of using grayscale images in emotion recognition instead of color?", options: [ "Grayscale images look more professional", "Color doesn't exist in emotions", "It reduces processing complexity and focuses on facial structure rather than skin tone", "Grayscale cameras are cheaper" ], correct: 2, topic: "Emotion Recognition" }, { id: 11, question: "If pose estimation detects 2 people in a video, how many total keypoints might it track?", options: [ "17 keypoints total", "34 keypoints (17 per person)", "25 keypoints total", "10 keypoints total" ], correct: 1, topic: "Pose Estimation" }, { id: 12, question: "What is a potential bias concern with emotion recognition AI?", options: [ "It works too slowly", "It might perform differently across different cultures, ages, or demographics", "It can only detect happy emotions", "It requires too much computer power" ], correct: 1, topic: "Emotion Recognition" } ]; // 60-minute TTL logic (same as Lessons.js) const COMPLETED_KEY = 'completedChapters_v1'; const TTL_MS = 60 * 60 * 1000; function checkLessonsCompleted() { try { const raw = localStorage.getItem(COMPLETED_KEY); if (!raw) return false; const parsed = JSON.parse(raw); const savedAt = parsed?.savedAt; const value = parsed?.value; if (!Array.isArray(value) || typeof savedAt !== 'number') return false; if (Date.now() - savedAt > TTL_MS) { localStorage.removeItem(COMPLETED_KEY); return false; } return value.length === 3; } catch { return false; } } export default function Quiz() { const [started, setStarted] = useState(false); const [currentQuestion, setCurrentQuestion] = useState(0); const [selectedAnswer, setSelectedAnswer] = useState(null); const [showResult, setShowResult] = useState(false); const [score, setScore] = useState(0); const [answers, setAnswers] = useState([]); const [finished, setFinished] = useState(false); const [allLessonsCompleted, setAllLessonsCompleted] = useState(checkLessonsCompleted); // Trigger confetti when finished with perfect score useEffect(() => { if (finished && score === questions.length) { triggerConfetti(); } }, [finished, score]); const triggerConfetti = () => { const duration = 5 * 1000; const animationEnd = Date.now() + duration; const defaults = { startVelocity: 30, spread: 360, ticks: 60, zIndex: 0 }; const random = (min, max) => Math.random() * (max - min) + min; const interval = setInterval(function() { const timeLeft = animationEnd - Date.now(); if (timeLeft <= 0) { return clearInterval(interval); } const particleCount = 50 * (timeLeft / duration); // Since particles fall down, start a bit higher than random confetti({ ...defaults, particleCount, origin: { x: random(0.1, 0.3), y: Math.random() - 0.2 } }); confetti({ ...defaults, particleCount, origin: { x: random(0.7, 0.9), y: Math.random() - 0.2 } }); }, 250); }; const handleStart = () => { setStarted(true); setCurrentQuestion(0); setScore(0); setAnswers([]); setFinished(false); }; const handleAnswer = (index) => { if (showResult) return; setSelectedAnswer(index); setShowResult(true); const isCorrect = index === questions[currentQuestion].correct; if (isCorrect) { setScore(score + 1); } setAnswers([...answers, { questionId: currentQuestion, selected: index, correct: isCorrect }]); }; const handleNext = () => { if (currentQuestion < questions.length - 1) { setCurrentQuestion(currentQuestion + 1); setSelectedAnswer(null); setShowResult(false); } else { setFinished(true); } }; const getScoreMessage = () => { const percentage = (score / questions.length) * 100; if (percentage === 100) return { emoji: "🏆", message: "PERFECT SCORE! You're an AI Vision Master!" }; if (percentage >= 80) return { emoji: "🌟", message: "Amazing! You really know your AI stuff!" }; if (percentage >= 60) return { emoji: "👍", message: "Good job! Keep learning and you'll be an expert!" }; if (percentage >= 40) return { emoji: "📚", message: "Nice try! Review the lessons and try again!" }; return { emoji: "💪", message: "Don't give up! Go through the lessons and come back stronger!" }; }; // Start Screen if (!started) { return (
Complete all 3 lessons to unlock the quiz!
{message}
Now that you've mastered the concepts, try building your own AI vision projects!
Build your own detector
Create motion games
Analyze expressions
Great job! You got this one right.
The correct answer was: {question.options[question.correct]}