import React, { useState, useEffect, useRef, useCallback } from 'react'
import { Brain, BookOpen, Users, BarChart3, Settings, Play, Pause, Eye, Sparkles, Trophy, Zap, Target, TrendingUp, Hand, Camera, CameraOff, Check, X, Plus, Send, Bot, MessageSquare, RefreshCw, Wifi, WifiOff, Loader2, ChevronRight, Sparkle, Cpu, Zap as ZapIcon, Save, Trash2, Key, Globe, AlertCircle } from 'lucide-react'
const API_URL = 'http://localhost:5001/api'
function App() {
const [activeTab, setActiveTab] = useState('learn')
const [sessionActive, setSessionActive] = useState(false)
const [topic, setTopic] = useState('Machine Learning')
const [predictions, setPredictions] = useState([])
const [confusionLevel, setConfusionLevel] = useState(0)
const [cameraEnabled, setCameraEnabled] = useState(false)
const [cameraStream, setCameraStream] = useState(null)
const [faceBlurred, setFaceBlurred] = useState(true)
const [gestures, setGestures] = useState([
{ id: 'thinking', name: 'Thinking', description: 'Hand on chin', trained: false, type: 'cognitive' },
{ id: 'confused', name: 'Confused', description: 'Scratching head', trained: false, type: 'emotional' },
{ id: 'pause', name: 'Pause', description: 'Open palm', trained: false, type: 'action' },
{ id: 'got_it', name: 'Got It!', description: 'Thumbs up', trained: true, type: 'feedback' }
])
const [trainingGesture, setTrainingGesture] = useState(null)
const [trainingProgress, setTrainingProgress] = useState(0)
const [recognizedGesture, setRecognizedGesture] = useState(null)
const [handLandmarks, setHandLandmarks] = useState(null)
const [gamification, setGamification] = useState({
level: 1,
title: 'Curious Learner',
xp: 0,
streak: 0,
fishXP: 0,
fishStage: 0
})
const [peerInsights, setPeerInsights] = useState([])
const [dueReviews, setDueReviews] = useState([])
const [stats, setStats] = useState({
totalDoubts: 0,
mastered: 0,
streak: 0,
xp: 0
})
// LLM Flow State
const [llmFlowActive, setLlmFlowActive] = useState(false)
const [llmResponses, setLlmResponses] = useState([])
const [llmQuery, setLlmQuery] = useState('')
const [llmLoading, setLlmLoading] = useState(false)
const [selectedProviders, setSelectedProviders] = useState(['chatgpt', 'gemini'])
const [rateLimits, setRateLimits] = useState({})
const [gestureActions, setGestureActions] = useState([])
const [rlLoopActive, setRlLoopActive] = useState(false)
const [rlStatus, setRlStatus] = useState(null)
const [promptTemplates, setPromptTemplates] = useState([
{ id: 'learning_explain', name: 'Explain Concept', icon: Brain },
{ id: 'doubt_resolution', name: 'Resolve Doubt', icon: MessageSquare },
{ id: 'summarize_content', name: 'Summarize', icon: BookOpen },
{ id: 'practice_questions', name: 'Practice Quiz', icon: Target },
{ id: 'spaced_repetition', name: 'Spaced Review', icon: RefreshCw }
])
const [selectedTemplate, setSelectedTemplate] = useState('learning_explain')
const [contextForPrompt, setContextForPrompt] = useState({})
// LLM Settings State
const [llmSettings, setLlmSettings] = useState(() => {
const saved = localStorage.getItem('contextflow_llm_settings')
if (saved) {
return JSON.parse(saved)
}
return {
providers: {
chatgpt: { enabled: true, name: 'ChatGPT', icon: '🤖', color: '#10a37f' },
gemini: { enabled: true, name: 'Gemini', icon: '✨', color: '#4285f4' },
claude: { enabled: false, name: 'Claude', icon: '🧠', color: '#d4a574' },
perplexity: { enabled: false, name: 'Perplexity', icon: '🔍', color: '#20b2aa' },
grok: { enabled: false, name: 'Grok', icon: '🚀', color: '#000000' },
deepseek: { enabled: false, name: 'DeepSeek', icon: '🔭', color: '#0066cc' },
poe: { enabled: false, name: 'Poe', icon: '🦊', color: '#6b5ce7' },
ollama: { enabled: false, name: 'Ollama (Local)', icon: '💻', color: '#9333ea' }
},
activeProviders: ['chatgpt', 'gemini'],
defaultProvider: 'chatgpt',
autoOpenBrowser: true
}
})
const [llmLauncher, setLlmLauncher] = useState(null)
const [browserLaunchResults, setBrowserLaunchResults] = useState([])
const videoRef = useRef(null)
const canvasRef = useRef(null)
const canvasOverlayRef = useRef(null)
const animationRef = useRef(null)
const handsRef = useRef(null)
const faceMeshRef = useRef(null)
const mediaPipeLoaded = useRef(false)
const tabs = [
{ id: 'learn', icon: Brain, label: 'AI Learning' },
{ id: 'llmflow', icon: Cpu, label: 'LLM Flow' },
{ id: 'predict', icon: Sparkles, label: 'Doubt Prediction' },
{ id: 'gestures', icon: Hand, label: 'Hand Gestures' },
{ id: 'behavior', icon: Eye, label: 'Behavior' },
{ id: 'peer', icon: Users, label: 'Peer Network' },
{ id: 'stats', icon: BarChart3, label: 'Statistics' },
{ id: 'gamify', icon: Trophy, label: 'Gamification' },
{ id: 'settings', icon: Settings, label: 'Settings' }
]
useEffect(() => {
loadInitialData()
const initLauncher = async () => {
const { BrowserLLMLauncher } = await import('./BrowserLLMLauncher')
const launcher = new BrowserLLMLauncher()
launcher.setActiveProviders(getEnabledProviders())
setLlmLauncher(launcher)
}
initLauncher()
if (llmFlowActive) {
loadGestureActions()
}
return () => {
if (animationRef.current) {
cancelAnimationFrame(animationRef.current)
}
}
}, [llmFlowActive])
const loadInitialData = async () => {
try {
const res = await fetch(`${API_URL}/health`)
if (res.ok) {
console.log('Connected to ContextFlow Research API')
}
} catch (e) {
console.log('API not available, running in demo mode')
}
setPredictions([
{ doubt: 'What is the bias-variance tradeoff?', confidence: 0.92, explanation: 'Key concept before diving into model selection', priority: 95 },
{ doubt: 'How do I choose between L1 and L2 regularization?', confidence: 0.87, explanation: 'Common struggle point for beginners', priority: 88 },
{ doubt: 'What is the difference between supervised and unsupervised?', confidence: 0.85, explanation: 'Foundation concept', priority: 82 },
{ doubt: 'How do I handle imbalanced datasets?', confidence: 0.78, explanation: 'Practical challenge in real projects', priority: 75 },
{ doubt: 'What is cross-validation and why is it important?', confidence: 0.72, explanation: 'Essential for reliable evaluation', priority: 68 }
])
setPeerInsights([
{ type: 'common_struggle', content: '87% of learners struggle with prerequisites before mastering ML fundamentals', peer_count: 1247 },
{ type: 'effective_resource', content: 'Interactive coding exercises show 40% better retention for ML concepts', peer_count: 892 },
{ type: 'learning_pattern', content: '15-minute focused sessions are more effective than long marathons', peer_count: 1563 }
])
setDueReviews([
{ card_id: '1', front: 'What is a perceptron?', back: 'A single-layer neural network that makes predictions based on linear function', topic: 'Deep Learning', interval: 3 },
{ card_id: '2', front: 'Explain backpropagation', back: 'Algorithm that calculates gradients by propagating errors backwards through the network', topic: 'Deep Learning', interval: 1 },
{ card_id: '3', front: 'What is the vanishing gradient problem?', back: 'When gradients become very small during backprop, preventing learning in early layers', topic: 'Deep Learning', interval: 5 }
])
}
const loadMediaPipe = async () => {
if (mediaPipeLoaded.current) return true
try {
const { Hands } = await import('https://cdn.jsdelivr.net/npm/@mediapipe/hands@0.4.1675469240/hands.js')
const { FaceMesh } = await import('https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh@0.4.1633529614/face_mesh.js')
handsRef.current = new Hands({
locateFile: (file) => `https://cdn.jsdelivr.net/npm/@mediapipe/hands@0.4.1675469240/${file}`
})
handsRef.current.setOptions({
maxNumHands: 1,
modelComplexity: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
})
handsRef.current.onResults(onHandResults)
faceMeshRef.current = new FaceMesh({
locateFile: (file) => `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh@0.4.1633529614/${file}`
})
faceMeshRef.current.setOptions({
maxNumFaces: 1,
refineLandmarks: true,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
})
faceMeshRef.current.onResults(onFaceResults)
mediaPipeLoaded.current = true
return true
} catch (e) {
console.error('Failed to load MediaPipe:', e)
return false
}
}
const onHandResults = (results) => {
if (results.multiHandLandmarks && results.multiHandLandmarks.length > 0) {
const landmarks = results.multiHandLandmarks[0]
setHandLandmarks(landmarks)
const landmarksArray = landmarks.map(lm => [lm.x, lm.y, lm.z])
if (cameraEnabled) {
recognizeGesture(landmarksArray)
}
} else {
setHandLandmarks(null)
}
}
const onFaceResults = (results) => {
if (!faceBlurred || !canvasRef.current) return
if (results.multiFaceLandmarks && results.multiFaceLandmarks.length > 0) {
const landmarks = results.multiFaceLandmarks[0]
applyMediaPipeFaceBlur(landmarks)
}
}
const applyMediaPipeFaceBlur = (landmarks) => {
const video = videoRef.current
const canvas = canvasRef.current
if (!video || !canvas) return
const ctx = canvas.getContext('2d')
canvas.width = video.videoWidth
canvas.height = video.videoHeight
ctx.drawImage(video, 0, 0)
const w = video.videoWidth
const h = video.videoHeight
let minX = 1, maxX = 0, minY = 1, maxY = 0
for (const lm of landmarks) {
minX = Math.min(minX, lm.x)
maxX = Math.max(maxX, lm.x)
minY = Math.min(minY, lm.y)
maxY = Math.max(maxY, lm.y)
}
const padding = 0.15
const padX = (maxX - minX) * padding
const padY = (maxY - minY) * padding
const x = Math.max(0, Math.floor((minX - padX) * w))
const y = Math.max(0, Math.floor((minY - padY) * h))
const bw = Math.min(w, Math.floor((maxX + padX - minX + padX) * w))
const bh = Math.min(h, Math.floor((maxY + padY - minY + padY) * h))
if (bw > 10 && bh > 10) {
const imageData = ctx.getImageData(x, y, bw, bh)
const data = imageData.data
const pixelSize = 15
for (let py = 0; py < bh; py += pixelSize) {
for (let px = 0; px < bw; px += pixelSize) {
const i = (py * bw + px) * 4
const r = data[i]
const g = data[i + 1]
const b = data[i + 2]
for (let dy = 0; dy < pixelSize && py + dy < bh; dy++) {
for (let dx = 0; dx < pixelSize && px + dx < bw; dx++) {
const ni = ((py + dy) * bw + (px + dx)) * 4
data[ni] = r
data[ni + 1] = g
data[ni + 2] = b
}
}
}
}
ctx.putImageData(imageData, x, y)
}
}
const drawHandLandmarks = useCallback(() => {
const canvas = canvasOverlayRef.current
const video = videoRef.current
if (!canvas || !video || !handLandmarks) return
canvas.width = video.videoWidth
canvas.height = video.videoHeight
const ctx = canvas.getContext('2d')
ctx.clearRect(0, 0, canvas.width, canvas.height)
const connections = [
[0, 1], [1, 2], [2, 3], [3, 4],
[0, 5], [5, 6], [6, 7], [7, 8],
[0, 9], [9, 10], [10, 11], [11, 12],
[0, 13], [13, 14], [14, 15], [15, 16],
[0, 17], [17, 18], [18, 19], [19, 20],
[5, 9], [9, 13], [13, 17]
]
ctx.strokeStyle = 'rgba(0, 255, 136, 0.8)'
ctx.lineWidth = 2
for (const connection of connections) {
const start = handLandmarks[connection[0]]
const end = handLandmarks[connection[1]]
ctx.beginPath()
ctx.moveTo(start.x * canvas.width, start.y * canvas.height)
ctx.lineTo(end.x * canvas.width, end.y * canvas.height)
ctx.stroke()
}
for (let i = 0; i < handLandmarks.length; i++) {
const landmark = handLandmarks[i]
const x = landmark.x * canvas.width
const y = landmark.y * canvas.height
ctx.beginPath()
ctx.arc(x, y, i % 4 === 0 ? 6 : 4, 0, 2 * Math.PI)
ctx.fillStyle = i % 4 === 0 ? '#00ff88' : '#ffffff'
ctx.fill()
ctx.strokeStyle = '#000000'
ctx.lineWidth = 1
ctx.stroke()
}
}, [handLandmarks])
useEffect(() => {
if (handLandmarks && cameraEnabled) {
drawHandLandmarks()
}
}, [handLandmarks, cameraEnabled, drawHandLandmarks])
const startCamera = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: 'user', width: 640, height: 480 }
})
setCameraStream(stream)
if (videoRef.current) {
videoRef.current.srcObject = stream
}
await loadMediaPipe()
setCameraEnabled(true)
if (handsRef.current && faceMeshRef.current) {
processMediaPipeFrame()
}
} catch (err) {
console.error('Camera access denied:', err)
alert('Camera access denied. Please enable camera permissions.')
}
}
const processMediaPipeFrame = useCallback(() => {
if (!videoRef.current || !handsRef.current || !faceMeshRef.current || !cameraEnabled) return
handsRef.current.send({ image: videoRef.current })
faceMeshRef.current.send({ image: videoRef.current })
animationRef.current = requestAnimationFrame(processMediaPipeFrame)
}, [cameraEnabled])
const stopCamera = () => {
if (cameraStream) {
cameraStream.getTracks().forEach(track => track.stop())
setCameraStream(null)
}
if (animationRef.current) {
cancelAnimationFrame(animationRef.current)
}
setCameraEnabled(false)
setHandLandmarks(null)
}
const recognizeGesture = async (landmarks) => {
if (!landmarks || landmarks.length < 21) return
try {
const res = await fetch(`${API_URL}/gesture/recognize`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ user_id: 'demo', landmarks })
})
const data = await res.json()
if (data.recognized) {
setRecognizedGesture(data.gesture.name)
await fetch(`${API_URL}/session/update`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
user_id: 'demo',
behavioral_data: {
gesture_signal: data.signal,
gesture_name: data.gesture.name,
gesture_confidence: data.gesture.confidence
}
})
})
setTimeout(() => setRecognizedGesture(null), 2000)
}
} catch (e) {
// Silently handle recognition errors
}
}
const startGestureTraining = (gesture) => {
setTrainingGesture(gesture)
setTrainingProgress(0)
const interval = setInterval(() => {
if (!handLandmarks) return
const landmarksArray = handLandmarks.map(lm => [lm.x, lm.y, lm.z])
fetch(`${API_URL}/gesture/training/sample`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ user_id: 'demo', landmarks: landmarksArray })
}).then(res => res.json()).then(data => {
if (data.status === 'completed') {
clearInterval(interval)
setTrainingGesture(null)
setTrainingProgress(100)
setGestures(prev => prev.map(g =>
g.id === gesture.id ? { ...g, trained: true } : g
))
} else {
const progress = (data.samples_collected / data.samples_needed) * 100
setTrainingProgress(progress)
}
}).catch(() => {
setTrainingProgress(prev => Math.min(prev + 5, 95))
if (trainingProgress >= 95) {
clearInterval(interval)
setTrainingGesture(null)
setTrainingProgress(100)
setGestures(prev => prev.map(g =>
g.id === gesture.id ? { ...g, trained: true } : g
))
}
})
}, 500)
}
const addCustomGesture = () => {
const name = prompt('Gesture name:')
if (name) {
const description = prompt('Description (what does this gesture mean)?')
const newGesture = {
id: `custom_${Date.now()}`,
name,
description: description || '',
trained: false,
type: 'custom'
}
setGestures(prev => [...prev, newGesture])
}
}
// LLM Flow Functions
const loadRateLimits = async () => {
try {
const res = await fetch(`${API_URL}/llm/rate-limits`)
if (res.ok) {
const data = await res.json()
setRateLimits(data)
}
} catch (e) {
console.log('Rate limits not available')
}
}
const loadGestureActions = async () => {
try {
const res = await fetch(`${API_URL}/llm/gesture-actions`)
if (res.ok) {
const data = await res.json()
setGestureActions(data.actions || [])
}
} catch (e) {
console.log('Gesture actions not available')
}
}
const sendLlmQuery = async () => {
if (!llmQuery.trim()) return
const newResponses = [...llmResponses, { role: 'user', content: llmQuery, timestamp: new Date() }]
setLlmResponses(newResponses)
setLlmLoading(true)
const enabledProviders = getEnabledProviders()
if (llmLauncher) {
llmLauncher.setActiveProviders(enabledProviders)
}
for (const provider of enabledProviders) {
setLlmResponses(prev => [...prev, {
role: provider,
content: `Opening ${provider}... Prompt copied to clipboard!`,
success: true,
launching: true,
timestamp: new Date()
}])
}
const results = await launchInBrowser(llmQuery)
setLlmResponses(prev => prev.map(r => {
if (r.launching) {
return { ...r, launching: false, launched: true }
}
return r
}))
setLlmQuery('')
setLlmLoading(false)
}
const startRlLoop = async () => {
try {
const res = await fetch(`${API_URL}/llm/rl/start`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
context: {
topic: topic,
progress: 50,
confusion_level: confusionLevel * 100
}
})
})
if (res.ok) {
setRlLoopActive(true)
loadRlStatus()
}
} catch (e) {
console.error('Failed to start RL loop')
}
}
const loadRlStatus = async () => {
try {
const res = await fetch(`${API_URL}/llm/rl/status`)
if (res.ok) {
const data = await res.json()
setRlStatus(data)
}
} catch (e) {
console.error('Failed to load RL status')
}
}
const sendRlFeedback = async (quality) => {
try {
await fetch(`${API_URL}/llm/rl/feedback`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ quality })
})
loadRlStatus()
} catch (e) {
console.error('Failed to send feedback')
}
}
const generatePromptFromTemplate = async () => {
try {
const res = await fetch(`${API_URL}/llm/prompt/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
template: selectedTemplate,
context: {
topic: topic,
confusion_point: 'understanding the concept',
learning_goal: 'master this topic'
}
})
})
if (res.ok) {
const data = await res.json()
setLlmQuery(data.prompt)
}
} catch (e) {
console.error('Failed to generate prompt')
}
}
const toggleProvider = (provider) => {
toggleProviderEnabled(provider)
}
const getRateLimitStatus = (provider) => {
const status = rateLimits[provider]
if (!status) return 'ok'
if (status.rate_limited) return 'error'
if (status.requests_this_minute / status.requests_per_minute_limit > 0.7) return 'warning'
return 'ok'
}
const getRateLimitIcon = (status) => {
if (status === 'ok') return
if (status === 'warning') return
return
}
// LLM Settings Functions
const saveLlmSettings = () => {
localStorage.setItem('contextflow_llm_settings', JSON.stringify(llmSettings))
const activeProviders = Object.entries(llmSettings.providers)
.filter(([_, config]) => config.enabled)
.map(([name]) => name)
setLlmSettings(prev => ({ ...prev, activeProviders }))
if (llmLauncher) {
llmLauncher.setActiveProviders(activeProviders)
}
return activeProviders
}
const toggleProviderEnabled = (providerName) => {
setLlmSettings(prev => {
const newSettings = {
...prev,
providers: {
...prev.providers,
[providerName]: {
...prev.providers[providerName],
enabled: !prev.providers[providerName].enabled
}
}
}
const activeProviders = Object.entries(newSettings.providers)
.filter(([_, config]) => config.enabled)
.map(([name]) => name)
newSettings.activeProviders = activeProviders
if (llmLauncher) {
llmLauncher.setActiveProviders(activeProviders)
}
return newSettings
})
}
const setDefaultProvider = (providerName) => {
setLlmSettings(prev => {
const newSettings = { ...prev, defaultProvider: providerName }
Object.keys(newSettings.providers).forEach(key => {
newSettings.providers[key] = {
...newSettings.providers[key],
default: key === providerName
}
})
return newSettings
})
}
const getEnabledProviders = () => {
return Object.entries(llmSettings.providers)
.filter(([_, config]) => config.enabled)
.map(([name]) => name)
}
const getDefaultProvider = () => {
const defaultEntry = Object.entries(llmSettings.providers).find(([_, config]) => config.default)
return defaultEntry ? defaultEntry[0] : 'chatgpt'
}
const launchInBrowser = async (prompt) => {
if (!llmLauncher) {
const { BrowserLLMLauncher } = await import('./BrowserLLMLauncher')
const launcher = new BrowserLLMLauncher()
launcher.setActiveProviders(getEnabledProviders())
setLlmLauncher(launcher)
}
const results = await llmLauncher.launchAll(prompt, {
context: { topic: topic }
})
setBrowserLaunchResults(results)
return results
}
const launchProvider = async (provider, prompt) => {
if (!llmLauncher) {
const { BrowserLLMLauncher } = await import('./BrowserLLMLauncher')
const launcher = new BrowserLLMLauncher()
setLlmLauncher(launcher)
}
const result = await llmLauncher.launchProvider(provider, prompt)
return result
}
const startSession = async () => {
setSessionActive(true)
const interval = setInterval(() => {
const newConfusion = Math.random() * 0.3 + confusionLevel * 0.7
setConfusionLevel(Math.min(newConfusion, 1))
}, 2000)
return () => clearInterval(interval)
}
const captureDoubt = async (predictedDoubt = null) => {
const doubt = predictedDoubt || {
doubt: `Self-doubt at ${new Date().toLocaleTimeString()}`,
confidence: confusionLevel,
explanation: 'Automatically captured based on confusion signals'
}
setStats(prev => ({
...prev,
totalDoubts: prev.totalDoubts + 1,
xp: prev.xp + 10
}))
setGamification(prev => ({
...prev,
xp: prev.xp + 10,
fishXP: prev.fishXP + 5
}))
}
const completeReview = (cardId, quality) => {
const xpMap = { 1: 5, 2: 8, 3: 10, 4: 15, 5: 25 }
const xp = xpMap[quality] || 5
setStats(prev => ({
...prev,
xp: prev.xp + xp,
mastered: quality >= 4 ? prev.mastered + 1 : prev.mastered
}))
setGamification(prev => ({
...prev,
xp: prev.xp + xp,
fishXP: prev.fishXP + Math.floor(xp / 2)
}))
setDueReviews(prev => prev.filter(c => c.card_id !== cardId))
}
const fishEmojis = ['🥚', '🐣', '🐟', '🐠', '🐡', '🐋']
const fishNames = ['Egg', 'Fry', 'Juvenile', 'Adult', 'Elder', 'Legendary']
return (
ContextFlow Research
AI Learning Intelligence Engine
{gamification.xp} XP
🔥
{gamification.streak} day streak
{tabs.map(tab => (
setActiveTab(tab.id)}
className={`flex items-center gap-2 px-4 py-2 rounded-lg transition-all ${
activeTab === tab.id
? 'bg-primary text-white'
: 'text-gray-400 hover:text-white hover:bg-dark3'
}`}
>
{tab.label}
))}
{activeTab === 'learn' && (
Learning Session
setSessionActive(false) : startSession}
className={`flex items-center gap-2 px-4 py-2 rounded-lg font-medium transition-all ${
sessionActive
? 'bg-red-500 hover:bg-red-600'
: 'btn-primary'
}`}
>
{sessionActive ? (
<>
End Session
>
) : (
<>
Start Learning
>
)}
Topic
setTopic(e.target.value)}
className="input"
placeholder="Enter topic to study..."
/>
Confusion Level
0.7 ? 'text-red-400' :
confusionLevel > 0.4 ? 'text-yellow-400' : 'text-green-400'
}`}>
{Math.round(confusionLevel * 100)}%
{recognizedGesture && (
Gesture Detected: {recognizedGesture}
Your hand gesture was recognized!
)}
{confusionLevel > 0.5 && (
captureDoubt()}
className="mt-3 w-full btn-primary text-sm"
>
💡 Capture Doubt ({Math.round(confusionLevel * 100)}% confidence)
)}
Predicted Doubts
{predictions.map((pred, i) => (
0.85 ? 'border-l-primary' :
pred.confidence > 0.7 ? 'border-l-yellow-500' : 'border-l-gray-500'
}`}
onClick={() => captureDoubt(pred)}
>
{pred.doubt}
{Math.round(pred.confidence * 100)}%
{pred.explanation}
))}
Your AI Companion
{[...Array(5)].map((_, i) => (
))}
{fishEmojis[gamification.fishStage]}
{fishNames[gamification.fishStage]}
{gamification.fishXP} XP collected
Due Reviews ({dueReviews.length})
{dueReviews.slice(0, 3).map(review => (
{review.front}
{[1, 2, 3, 4, 5].map(q => (
completeReview(review.card_id, q)}
className={`flex-1 py-1 rounded text-xs font-medium transition-all ${
q === 1 ? 'bg-red-500 hover:bg-red-600' :
q === 2 ? 'bg-orange-500 hover:bg-orange-600' :
q === 3 ? 'bg-yellow-500 hover:bg-yellow-600' :
q === 4 ? 'bg-green-500 hover:bg-green-600' :
'bg-blue-500 hover:bg-blue-600'
}`}
>
{q}
))}
))}
)}
{activeTab === 'llmflow' && (
{/* Header */}
Gesture AI Launcher
Use hand gestures to open AI chats in browser. No API keys needed!
{getEnabledProviders().length > 0 && (
launchInBrowser('Explain ' + topic + ' in simple terms')}
className="btn-primary flex items-center gap-2"
>
Test Open All
)}
{rlLoopActive ? 'RL Active' : 'Start RL'}
{/* Active Providers */}
{/* How it works */}
How it works: When you use gestures, the system:
Builds a smart prompt based on your learning context
Opens the AI chat in a new browser tab
Copies the prompt to your clipboard
Just paste (Ctrl+V) and ask!
{/* Prompt Templates */}
Quick Templates:
Generate
{promptTemplates.map(template => (
setSelectedTemplate(template.id)}
className={`prompt-template-btn ${
selectedTemplate === template.id
? 'prompt-template-btn-active'
: 'prompt-template-btn-inactive'
}`}
>
{template.name}
))}
{/* Chat Interface */}
LLM Responses
{llmLoading && (
)}
{/* Messages */}
{llmResponses.length === 0 && (
Type a message or use gestures to open AI chats
Your question will be copied and AI windows will open
)}
{llmResponses.map((msg, i) => (
{msg.role !== 'user' && msg.role !== 'error' && (
{llmSettings.providers[msg.role]?.icon || '🤖'}
{llmSettings.providers[msg.role]?.name || msg.role}
{msg.launching && (
Opening...
)}
{msg.launched && (
Opened!
)}
)}
{msg.content}
{msg.role === 'user' && (
{new Date(msg.timestamp).toLocaleTimeString()}
)}
))}
{/* Input */}
setLlmQuery(e.target.value)}
onKeyDown={e => e.key === 'Enter' && sendLlmQuery()}
placeholder="Ask anything or use gestures..."
className="input flex-1"
disabled={llmLoading}
/>
{llmLoading ? (
) : (
)}
{/* Sidebar */}
{/* RL Loop Status */}
RL Learning Loop
{rlStatus ? (
Interactions
{rlStatus.total_interactions}
Feedback
{rlStatus.total_feedback}
Avg Reward
0 ? 'text-green-400' : 'text-red-400'}>
{rlStatus.average_reward.toFixed(2)}
{rlStatus.is_active && (
Loop Active
)}
{rlStatus.top_preferences && rlStatus.top_preferences.length > 0 && (
Learned Preferences:
{rlStatus.top_preferences.slice(0, 5).map(([word, weight], i) => (
{word} ({weight.toFixed(2)})
))}
)}
) : (
Start RL loop to track learning
)}
{/* Quick Feedback Buttons */}
{rlLoopActive && (
Quick Feedback:
{[1, 2, 3, 4, 5].map(q => (
sendRlFeedback(q)}
className={`flex-1 py-2 rounded text-xs font-medium transition-all ${
q === 1 ? 'bg-red-500/50 hover:bg-red-500' :
q === 2 ? 'bg-orange-500/50 hover:bg-orange-500' :
q === 3 ? 'bg-yellow-500/50 hover:bg-yellow-500' :
q === 4 ? 'bg-green-500/50 hover:bg-green-500' :
'bg-blue-500/50 hover:bg-blue-500'
} text-white`}
>
{q}
))}
)}
{/* Gesture Actions Guide */}
Gesture Actions
2 fingers →
Swipe Right
Open ALL AIs
2 fingers ←
Swipe Left
Open Default
Pinch
Pinch
Real Examples
Each gesture builds a specific prompt type and opens the configured AI services.
{/* Launch Status */}
Browser Sessions
{browserLaunchResults.length > 0 ? (
browserLaunchResults.map((result, i) => (
{result.success ? (
<>
{result.providerName} opened
>
) : (
<>
{result.error}
>
)}
))
) : (
No launches yet. Try typing a message above!
)}
{browserLaunchResults.length > 0 && (
Your prompt was copied! Paste it in the AI chat that opened.
)}
)}
{activeTab === 'gestures' && (
Hand Gesture Training with MediaPipe
Train custom hand gestures that the system will recognize during learning sessions.
Your face is automatically blurred using MediaPipe Face Mesh for privacy protection.
{!cameraEnabled && (
Camera is off
Click Start to enable
)}
{handLandmarks && (
Hand Detected
)}
🔒 Privacy Protected: Your face is automatically blurred. Only hand gestures are analyzed.
{gestures.map(gesture => (
{gesture.trained ? : }
{gesture.name}
{gesture.description}
{gesture.trained ? 'Trained' : 'Not Trained'}
{trainingGesture?.id === gesture.id ? (
Training... (show gesture to camera)
{Math.round(trainingProgress)}%
) : (
startGestureTraining(gesture)}
disabled={!cameraEnabled || !handLandmarks}
className={`mt-2 w-full py-2 rounded-lg text-sm font-medium transition-all ${
cameraEnabled && handLandmarks
? 'bg-primary hover:bg-primary/80 text-white'
: 'bg-dark3 text-gray-500 cursor-not-allowed'
}`}
>
{gesture.trained ? 'Retrain' : 'Train'} Gesture
)}
))}
)}
{activeTab === 'predict' && (
RL Doubt Prediction
Our reinforcement learning agent predicts what doubts you'll have before they occur.
State Encoding
Topic, progress, confusion signals, hand gestures
Q-Learning Policy
Deep Q-Network with experience replay
Current Predictions
{predictions.map((pred, i) => (
{pred.doubt}
0.85 ? 'text-red-400' : pred.confidence > 0.7 ? 'text-yellow-400' : 'text-green-400'
}`}>
{Math.round(pred.confidence * 100)}%
))}
)}
{activeTab === 'behavior' && (
Behavioral Tracking
Mouse Tracking
Active
Scroll Tracking
Active
Hand Gestures
{cameraEnabled ? 'Active' : 'Inactive'}
)}
{activeTab === 'peer' && (
Peer Network Insights
{peerInsights.map((insight, i) => (
{insight.type.replace('_', ' ')}
{insight.peer_count.toLocaleString()} peers
{insight.content}
))}
Anonymized Peer Doubts
{[
{ content: 'How do decorators work in Python?', upvotes: 156, resolved: true },
{ content: 'What is the bias-variance tradeoff?', upvotes: 142, resolved: true },
{ content: 'How does batch normalization help?', upvotes: 98, resolved: false }
].map((doubt, i) => (
{doubt.content}
{doubt.upvotes} upvotes
{doubt.resolved &&
Resolved }
))}
)}
{activeTab === 'stats' && (
{stats.totalDoubts}
Doubts Captured
{stats.mastered}
Concepts Mastered
{stats.streak}
Day Streak
)}
{activeTab === 'gamify' && (
Your Progress
{gamification.level}
{gamification.title}
{gamification.xp} / {gamification.xp + 100} XP
Fish Evolution
{fishEmojis.map((emoji, i) => (
))}
)}
{activeTab === 'settings' && (
Browser AI Settings
Toggle which AI services to open in browser when using gestures
{
saveLlmSettings()
}}
className="btn-primary flex items-center gap-2"
>
Save
No API Keys Needed!
This opens AI chat interfaces directly in your browser using your existing login sessions.
Make sure you're logged into the services you want to use.
{/* Provider Grid */}
{Object.entries(llmSettings.providers).map(([providerName, config]) => (
toggleProviderEnabled(providerName)}
>
{config.icon}
{config.name}
{providerName === 'ollama' ? 'localhost:11434' : providerName}
{config.enabled && (
)}
))}
{/* Instructions */}
How It Works
1
Enable the AI services you want to use above
2
Make sure you're logged into those services in your browser
3
Use hand gestures (2-finger swipe, pinch, etc.) during learning
4
The system opens the AI chat, copies your question, and you just paste!
Tip: Allow popups for this site so the AI windows open properly.
{/* Active Summary */}
Active Services
{getEnabledProviders().length === 0 ? (
No services enabled. Click above to enable.
) : (
getEnabledProviders().map(provider => (
{llmSettings.providers[provider].icon}
{llmSettings.providers[provider].name}
{provider === getDefaultProvider() && (
Default
)}
))
)}
{getEnabledProviders().length > 0 && (
2-finger swipe opens ALL active services. Single actions open the default provider.
)}
)}
);
}
export default App