BCI-FPS / generator /webXOS_BCI-FPS_alphav1.html
webxos's picture
Update generator/webXOS_BCI-FPS_alphav1.html
dedf6e6 verified
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>BCI-FPS: by webXOS 2025</title>
<style>
/* Previous styles remain the same, adding only new styles */
/* === VISUAL STIMULI === */
.vstim-target {
position: absolute;
width: 100px;
height: 100px;
border: 3px solid #0ff;
border-radius: 50%;
pointer-events: none;
z-index: 6;
box-shadow: 0 0 50px #0ff;
opacity: 0;
}
.vstim-active {
animation: vstimPulse 0.5s infinite alternate;
}
@keyframes vstimPulse {
from { opacity: 0.3; }
to { opacity: 1; }
}
/* === HANDWRITING TRAINING UI === */
#handwritingCanvas {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background: rgba(0, 10, 0, 0.9);
border: 2px solid #0f0;
border-radius: 10px;
z-index: 100;
display: none;
}
.handwriting-prompt {
position: fixed;
top: 30%;
left: 50%;
transform: translateX(-50%);
color: #0f0;
font-size: 24px;
text-align: center;
z-index: 101;
background: rgba(0, 20, 0, 0.9);
padding: 20px;
border: 2px solid #0f0;
border-radius: 10px;
display: none;
}
</style>
</head>
<body>
<!-- NEURAL NETWORK BACKGROUND -->
<div class="neural-background" id="neuralBackground"></div>
<!-- MAIN MENU -->
<div id="mainMenu">
<div class="menu-container">
<h1 class="bci-title">BCI-FPS</h1>
<p class="bci-subtitle">Neuralink Brain-Computer Interface Training Platform</p>
<div class="research-mission">
<div class="mission-title">RESEARCH MISSION</div>
<p class="mission-text">
*UNDER DEVELOPMENT* by webXOS 2025 // webxos.netlify.pp. Record FPS Game Data for Hugging Face.
This platform generates high-bandwidth neural training data for frontier BCI research.
Through FPS gameplay, we capture simultaneous intent decoding, calibration-free interface training,
and task-optimized neural models. All data supports disability research and Neuralink development.
</p>
</div>
<div class="menu-buttons">
<button class="bci-btn" onclick="startBCITraining('motor_imagery')">
<span class="btn-icon">🧠</span> MOTOR IMAGERY TRAINING
</button>
<button class="bci-btn" onclick="startBCITraining('simultaneous_intent')">
<span class="btn-icon">🎯</span> SIMULTANEOUS INTENT DECODING
</button>
<button class="bci-btn" onclick="startBCITraining('visual_evoked')">
<span class="btn-icon">👁️</span> VISUAL EVOKED POTENTIALS
</button>
<button class="bci-btn" onclick="startBCITraining('handwriting_intent')">
<span class="btn-icon">✍️</span> HANDWRITING INTENT
</button>
<button class="bci-btn" onclick="startBCITraining('full_spectrum')">
<span class="btn-icon"></span> FULL SPECTRUM TRAINING
</button>
</div>
<div class="research-mission" style="margin-top: 30px;">
<div class="mission-title">EXPORT OPTIONS</div>
<div style="display: flex; gap: 15px; justify-content: center; margin-top: 15px;">
<button class="bci-btn" onclick="exportDataset()" style="width: 200px; padding: 15px 30px;">
<span class="btn-icon">📊</span> EXPORT DATASET
</button>
<button class="bci-btn" onclick="showDataPreview()" style="width: 200px; padding: 15px 30px;">
<span class="btn-icon">👁️</span> PREVIEW DATA
</button>
</div>
</div>
</div>
</div>
<!-- GAME CONTAINER -->
<div id="gameContainer"></div>
<!-- UI OVERLAY -->
<div id="uiOverlay" style="display: none;">
<!-- NEURAL ACTIVITY PANEL -->
<div id="neuralPanel" class="hud-panel">
<div class="neural-header">NEURAL ACTIVITY</div>
<div class="neural-grid" id="neuralChannels">
<!-- Neural channels will be populated dynamically -->
</div>
</div>
<!-- BCI INTENT PANEL -->
<div id="intentPanel" class="hud-panel">
<div class="neural-header">INTENT DECODING</div>
<div class="intent-grid" id="intentGrid">
<!-- Intent items will be populated dynamically -->
</div>
</div>
<!-- PERFORMANCE PANEL -->
<div id="performancePanel" class="hud-panel">
<div class="neural-header">PERFORMANCE METRICS</div>
<div class="performance-grid">
<div class="metric-item">
<div class="metric-label">Bandwidth</div>
<div class="metric-value" id="bandwidthValue">60 Hz</div>
</div>
<div class="metric-item">
<div class="metric-label">Accuracy</div>
<div class="metric-value" id="accuracyValue">0%</div>
</div>
<div class="metric-item">
<div class="metric-label">Intent Latency</div>
<div class="metric-value" id="latencyValue">0 ms</div>
</div>
<div class="metric-item">
<div class="metric-label">Simultaneous Intents</div>
<div class="metric-value" id="intentsValue">0</div>
</div>
</div>
</div>
<!-- DATA STREAM PANEL -->
<div id="dataStreamPanel" class="hud-panel">
<div class="neural-header">DATA STREAM</div>
<div class="data-stream">
<div class="stream-line" id="dataStream"></div>
</div>
</div>
</div>
<!-- CROSSHAIR -->
<div id="crosshair" style="display: none;">
<div class="crosshair-dot"></div>
<div class="crosshair-line horizontal left"></div>
<div class="crosshair-line horizontal right"></div>
<div class="crosshair-line vertical top"></div>
<div class="crosshair-line vertical bottom"></div>
</div>
<!-- TASK INDICATOR -->
<div id="taskIndicator">
<div class="task-title" id="taskTitle">MOTOR IMAGERY TRAINING</div>
<div class="task-description" id="taskDescription">
Imagine moving your cursor to the target. This trains motor cortex decoding.
</div>
<div class="task-progress">
<div class="task-progress-bar" id="taskProgress"></div>
</div>
<div style="color: #0a0; font-size: 14px;" id="taskStatus">Starting...</div>
</div>
<!-- EXPERIMENT COMPLETE MODAL -->
<div id="experimentComplete">
<div class="experiment-content">
<div class="experiment-title">TRAINING SESSION COMPLETE</div>
<p style="color: #0a0; margin: 20px 0; line-height: 1.6;">
High-bandwidth neural training data has been successfully recorded.<br>
This dataset can be used for Neuralink research and disability support development.
</p>
<div class="experiment-results" id="experimentResults">
<!-- Results populated dynamically -->
</div>
<div style="margin: 30px 0;">
<button class="bci-btn" onclick="exportDataset()" style="width: 250px; margin: 10px;">
<span class="btn-icon">📊</span> EXPORT TO HUGGING FACE
</button>
<button class="bci-btn" onclick="restartTraining()" style="width: 250px; margin: 10px;">
<span class="btn-icon">🔄</span> RESTART TRAINING
</button>
<button class="bci-btn" onclick="returnToMenu()" style="width: 250px; margin: 10px;">
<span class="btn-icon">🏠</span> RETURN TO MENU
</button>
</div>
</div>
</div>
<!-- BCI CONTROL PANEL -->
<div id="bciControlPanel" style="display: none;">
<button class="bci-control-btn" onclick="pauseTraining()">⏸ PAUSE</button>
<button class="bci-control-btn" onclick="skipTask()">⏭ SKIP</button>
<button class="bci-control-btn" onclick="endSession()">⏹ END</button>
<button class="bci-control-btn" onclick="toggleVisualStimuli()">💡 STIMULI</button>
</div>
<!-- HANDWRITING CANVAS -->
<canvas id="handwritingCanvas" width="800" height="600"></canvas>
<div class="handwriting-prompt" id="handwritingPrompt"></div>
<!-- Three.js & JSZip Libraries -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/2.0.5/FileSaver.min.js"></script>
<script>
// ========== GLOBAL CONFIGURATION ==========
const CONFIG = {
// BCI Research Parameters
SAMPLING_RATE: 1000, // Hz - Neuralink-level sampling
BANDWIDTH: 60, // FPS for visual rendering
INTENT_DECODING_WINDOW: 100, // ms window for intent analysis
// Game Parameters
VISUAL_STIMULI_FREQUENCIES: [5, 10, 15, 20, 25], // Hz for c-VEP
MOTOR_IMAGERY_TRIALS: 50,
SIMULTANEOUS_INTENT_TASKS: 20,
HANDWRITING_SAMPLES: 10,
// Data Collection
MAX_SAMPLES: 1000000,
COMPRESSION_ENABLED: true,
EXPORT_FORMAT: 'arrow', // 'arrow', 'jsonl', 'parquet'
// Neural Simulation
NEURAL_CHANNELS: 32,
NOISE_LEVEL: 0.1,
SIGNAL_STRENGTH: 0.8
};
// ========== GLOBAL STATE ==========
let scene, camera, renderer;
let player, controls = {};
let targets = [];
let neuralData = [];
let intentStream = [];
let visualStimuli = [];
let handwritingSamples = [];
let currentMode = null;
let currentTask = 0;
let totalTasks = 0;
let taskStartTime = 0;
let sessionStartTime = 0;
let mouse = { x: 0, y: 0, dx: 0, dy: 0 };
let keyboard = {};
let fpsCounter = 0;
let lastFpsTime = 0;
let currentFps = 60;
let neuralBackgroundInterval;
let dataStreamInterval;
// ========== INITIALIZATION ==========
function initNeuralBackground() {
const bg = document.getElementById('neuralBackground');
bg.innerHTML = '';
// Create neural nodes
for (let i = 0; i < 50; i++) {
const node = document.createElement('div');
node.className = 'neural-node';
node.style.left = `${Math.random() * 100}%`;
node.style.top = `${Math.random() * 100}%`;
bg.appendChild(node);
}
// Create connections
const nodes = bg.querySelectorAll('.neural-node');
nodes.forEach((node1, i) => {
nodes.forEach((node2, j) => {
if (i < j && Math.random() < 0.1) {
const x1 = parseFloat(node1.style.left);
const y1 = parseFloat(node1.style.top);
const x2 = parseFloat(node2.style.left);
const y2 = parseFloat(node2.style.top);
const length = Math.sqrt(Math.pow(x2 - x1, 2) + Math.pow(y2 - y1, 2));
const angle = Math.atan2(y2 - y1, x2 - x1) * 180 / Math.PI;
const connection = document.createElement('div');
connection.className = 'neural-connection';
connection.style.width = `${length}%`;
connection.style.left = `${x1}%`;
connection.style.top = `${y1}%`;
connection.style.transform = `rotate(${angle}deg)`;
bg.appendChild(connection);
}
});
});
// Animate nodes
neuralBackgroundInterval = setInterval(() => {
nodes.forEach(node => {
node.style.left = `${Math.random() * 100}%`;
node.style.top = `${Math.random() * 100}%`;
});
}, 3000);
}
function initThreeJS() {
scene = new THREE.Scene();
scene.background = new THREE.Color(0x000000);
scene.fog = new THREE.Fog(0x000000, 50, 200);
camera = new THREE.PerspectiveCamera(90, window.innerWidth / window.innerHeight, 0.1, 1000);
camera.position.y = 1.6;
renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
document.getElementById('gameContainer').appendChild(renderer.domElement);
// Add lighting
const ambientLight = new THREE.AmbientLight(0x00ff00, 0.1);
scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0x00ff00, 0.5);
directionalLight.position.set(10, 20, 5);
scene.add(directionalLight);
// Create environment
createEnvironment();
// Setup controls
setupControls();
// Start animation loop
animate();
}
function createEnvironment() {
// Ground
const groundGeometry = new THREE.PlaneGeometry(100, 100, 50, 50);
const groundMaterial = new THREE.MeshBasicMaterial({
color: 0x003300,
wireframe: true,
transparent: true,
opacity: 0.3
});
const ground = new THREE.Mesh(groundGeometry, groundMaterial);
ground.rotation.x = -Math.PI / 2;
scene.add(ground);
// Neural training targets
for (let i = 0; i < 20; i++) {
const geometry = new THREE.SphereGeometry(0.5, 8, 8);
const material = new THREE.MeshBasicMaterial({
color: 0x00ff00,
wireframe: true,
transparent: true,
opacity: 0.6
});
const target = new THREE.Mesh(geometry, material);
target.position.set(
(Math.random() - 0.5) * 80,
1 + Math.random() * 5,
(Math.random() - 0.5) * 80
);
target.userData = {
type: 'neural_target',
active: false,
frequency: CONFIG.VISUAL_STIMULI_FREQUENCIES[Math.floor(Math.random() * CONFIG.VISUAL_STIMULI_FREQUENCIES.length)],
lastFlash: 0
};
scene.add(target);
targets.push(target);
}
// Create visual stimuli targets in DOM
for (let i = 0; i < 5; i++) {
const stim = document.createElement('div');
stim.className = 'vstim-target';
stim.id = `vstim-${i}`;
document.body.appendChild(stim);
}
}
function setupControls() {
// Mouse control
document.addEventListener('mousemove', (e) => {
if (document.pointerLockElement === document.body) {
mouse.dx = e.movementX;
mouse.dy = e.movementY;
mouse.x += mouse.dx * 0.002;
mouse.y += mouse.dy * 0.002;
mouse.y = Math.max(-Math.PI/2, Math.min(Math.PI/2, mouse.y));
camera.rotation.order = 'YXZ';
camera.rotation.y = -mouse.x;
camera.rotation.x = -mouse.y;
}
});
// Keyboard controls
document.addEventListener('keydown', (e) => {
const key = e.key.toLowerCase();
keyboard[key] = true;
// Record key press as intent
if (['w', 'a', 's', 'd', ' '].includes(key)) {
recordIntent({
type: 'key_press',
key: key,
timestamp: Date.now(),
position: camera.position.toArray(),
rotation: [camera.rotation.x, camera.rotation.y, camera.rotation.z]
});
}
});
document.addEventListener('keyup', (e) => {
const key = e.key.toLowerCase();
keyboard[key] = false;
});
// Mouse click
document.addEventListener('mousedown', () => {
keyboard['mouse'] = true;
recordIntent({
type: 'mouse_click',
button: 'left',
timestamp: Date.now(),
target: getAimedTarget()
});
});
document.addEventListener('mouseup', () => {
keyboard['mouse'] = false;
});
// Pointer lock
document.body.addEventListener('click', () => {
if (!document.pointerLockElement) {
document.body.requestPointerLock();
}
});
// Window resize
window.addEventListener('resize', () => {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
});
}
// ========== BCI TRAINING MODES ==========
function startBCITraining(mode) {
currentMode = mode;
sessionStartTime = Date.now();
// Hide menu, show game
document.getElementById('mainMenu').style.display = 'none';
document.getElementById('gameContainer').style.display = 'block';
document.getElementById('uiOverlay').style.display = 'grid';
document.getElementById('crosshair').style.display = 'block';
document.getElementById('bciControlPanel').style.display = 'flex';
// Initialize UI
initNeuralUI();
initDataStream();
// Start specific training mode
switch(mode) {
case 'motor_imagery':
startMotorImageryTraining();
break;
case 'simultaneous_intent':
startSimultaneousIntentTraining();
break;
case 'visual_evoked':
startVisualEvokedTraining();
break;
case 'handwriting_intent':
startHandwritingIntentTraining();
break;
case 'full_spectrum':
startFullSpectrumTraining();
break;
}
// Start data collection
startDataCollection();
}
function startMotorImageryTraining() {
totalTasks = CONFIG.MOTOR_IMAGERY_TRIALS;
currentTask = 0;
showTaskIndicator(
"MOTOR IMAGERY TRAINING",
"Imagine moving your cursor to the target. This trains motor cortex decoding for prosthetic control.",
"Starting trial..."
);
setTimeout(() => {
nextMotorImageryTask();
}, 2000);
}
function nextMotorImageryTask() {
if (currentTask >= totalTasks) {
completeTraining();
return;
}
currentTask++;
taskStartTime = Date.now();
// Activate a random target
const target = targets[Math.floor(Math.random() * targets.length)];
target.userData.active = true;
target.material.color.setHex(0xffff00);
updateTaskIndicator(
`Trial ${currentTask}/${totalTasks}`,
`Imagine moving to the glowing target. Focus on the intent to move.`
);
// Task completion timer
setTimeout(() => {
target.userData.active = false;
target.material.color.setHex(0x00ff00);
// Record completion
recordNeuralData({
type: 'motor_imagery_trial',
trial: currentTask,
duration: Date.now() - taskStartTime,
target_position: target.position.toArray(),
accuracy: calculateAccuracy(target)
});
// Next task
setTimeout(() => {
nextMotorImageryTask();
}, 1000);
}, 3000);
}
function startSimultaneousIntentTraining() {
totalTasks = CONFIG.SIMULTANEOUS_INTENT_TASKS;
currentTask = 0;
showTaskIndicator(
"SIMULTANEOUS INTENT DECODING",
"Move (WASD) while aiming at targets. This trains decoding multiple simultaneous intents.",
"Starting task..."
);
setTimeout(() => {
nextSimultaneousIntentTask();
}, 2000);
}
function nextSimultaneousIntentTask() {
if (currentTask >= totalTasks) {
completeTraining();
return;
}
currentTask++;
taskStartTime = Date.now();
// Activate multiple targets
const activeTargets = [];
for (let i = 0; i < 3; i++) {
const target = targets[Math.floor(Math.random() * targets.length)];
target.userData.active = true;
target.material.color.setHex(0xff0000);
activeTargets.push(target);
}
updateTaskIndicator(
`Task ${currentTask}/${totalTasks}`,
`Move while aiming at all red targets. Focus on simultaneous movement and aiming.`
);
// Task runs for 5 seconds
setTimeout(() => {
activeTargets.forEach(target => {
target.userData.active = false;
target.material.color.setHex(0x00ff00);
});
// Record completion
recordNeuralData({
type: 'simultaneous_intent_task',
task: currentTask,
duration: Date.now() - taskStartTime,
active_targets: activeTargets.map(t => t.position.toArray()),
simultaneous_actions: countSimultaneousActions()
});
// Next task
setTimeout(() => {
nextSimultaneousIntentTask();
}, 1000);
}, 5000);
}
function startVisualEvokedTraining() {
showTaskIndicator(
"VISUAL EVOKED POTENTIALS",
"Focus on the flashing targets. This trains c-VEP decoding for non-verbal communication.",
"Starting visual stimulation..."
);
// Start visual stimuli
startVisualStimuli();
// Run for 60 seconds
setTimeout(() => {
stopVisualStimuli();
completeTraining();
}, 60000);
}
function startHandwritingIntentTraining() {
totalTasks = CONFIG.HANDWRITING_SAMPLES;
currentTask = 0;
showTaskIndicator(
"HANDWRITING INTENT TRAINING",
"Trace the letters with precision aiming. This trains fine motor control decoding.",
"Starting letter tracing..."
);
setTimeout(() => {
nextHandwritingTask();
}, 2000);
}
function nextHandwritingTask() {
if (currentTask >= totalTasks) {
completeTraining();
return;
}
currentTask++;
const letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
const letter = letters[Math.floor(Math.random() * letters.length)];
showHandwritingPrompt(letter);
// Record handwriting session
const startTime = Date.now();
const handwritingSession = {
letter: letter,
start_time: startTime,
samples: []
};
// Sample mouse movements for 3 seconds
const sampleInterval = setInterval(() => {
handwritingSession.samples.push({
timestamp: Date.now(),
position: [mouse.x, mouse.y],
velocity: [mouse.dx, mouse.dy],
pressure: Math.random() // Simulated pressure data
});
}, 16); // ~60Hz sampling
setTimeout(() => {
clearInterval(sampleInterval);
handwritingSession.end_time = Date.now();
handwritingSession.duration = Date.now() - startTime;
handwritingSamples.push(handwritingSession);
hideHandwritingPrompt();
// Record completion
recordNeuralData({
type: 'handwriting_sample',
sample: currentTask,
letter: letter,
duration: handwritingSession.duration,
samples_count: handwritingSession.samples.length
});
// Next task
setTimeout(() => {
nextHandwritingTask();
}, 1000);
}, 3000);
}
function startFullSpectrumTraining() {
// Run all training modes sequentially
const modes = [
{ mode: 'motor_imagery', duration: 30000 },
{ mode: 'simultaneous_intent', duration: 30000 },
{ mode: 'visual_evoked', duration: 30000 },
{ mode: 'handwriting_intent', duration: 30000 }
];
let currentModeIndex = 0;
function runNextMode() {
if (currentModeIndex >= modes.length) {
completeTraining();
return;
}
const mode = modes[currentModeIndex];
currentModeIndex++;
showTaskIndicator(
`FULL SPECTRUM TRAINING - ${mode.mode.toUpperCase()}`,
`Complete the ${mode.mode.replace('_', ' ')} task.`,
"Starting in 3 seconds..."
);
setTimeout(() => {
// Run the mode for specified duration
const startTime = Date.now();
// Set up mode-specific tasks
switch(mode.mode) {
case 'motor_imagery':
// Activate random targets
const interval = setInterval(() => {
const target = targets[Math.floor(Math.random() * targets.length)];
target.material.color.setHex(0xffff00);
setTimeout(() => {
target.material.color.setHex(0x00ff00);
}, 500);
}, 1000);
setTimeout(() => {
clearInterval(interval);
runNextMode();
}, mode.duration);
break;
case 'simultaneous_intent':
// Keep targets active
targets.forEach(t => {
t.material.color.setHex(0xff0000);
t.userData.active = true;
});
setTimeout(() => {
targets.forEach(t => {
t.material.color.setHex(0x00ff00);
t.userData.active = false;
});
runNextMode();
}, mode.duration);
break;
case 'visual_evoked':
startVisualStimuli();
setTimeout(() => {
stopVisualStimuli();
runNextMode();
}, mode.duration);
break;
case 'handwriting_intent':
// Show random letters
const letters = 'ABCD';
let letterIndex = 0;
const letterInterval = setInterval(() => {
if (letterIndex >= letters.length) {
clearInterval(letterInterval);
runNextMode();
return;
}
showHandwritingPrompt(letters[letterIndex]);
letterIndex++;
setTimeout(() => {
hideHandwritingPrompt();
}, 2000);
}, 3000);
break;
}
}, 3000);
}
runNextMode();
}
// ========== DATA COLLECTION ==========
function startDataCollection() {
// High-frequency data collection (1000Hz simulated)
setInterval(() => {
collectNeuralData();
}, 1); // 1ms interval ≈ 1000Hz
// Intent stream collection (60Hz)
setInterval(() => {
collectIntentStream();
}, 16.67); // 60Hz
// Performance metrics (1Hz)
setInterval(() => {
updatePerformanceMetrics();
}, 1000);
}
function collectNeuralData() {
// Simulate neural channel data
const neuralSample = {
timestamp: Date.now(),
session_time: Date.now() - sessionStartTime,
channels: {}
};
for (let i = 0; i < CONFIG.NEURAL_CHANNELS; i++) {
// Generate simulated neural signal
const baseSignal = Math.sin(Date.now() / 1000 * (i + 1)) * CONFIG.SIGNAL_STRENGTH;
const noise = (Math.random() - 0.5) * 2 * CONFIG.NOISE_LEVEL;
const intentModulation = calculateIntentModulation(i);
neuralSample.channels[`channel_${i}`] = baseSignal + noise + intentModulation;
}
// Add intent context
neuralSample.intent_context = {
mouse_movement: [mouse.dx, mouse.dy],
keyboard_state: { ...keyboard },
camera_rotation: [camera.rotation.x, camera.rotation.y, camera.rotation.z],
active_targets: targets.filter(t => t.userData.active).length
};
neuralData.push(neuralSample);
// Update UI
updateNeuralChannels(neuralSample.channels);
}
function collectIntentStream() {
const intentSample = {
timestamp: Date.now(),
session_time: Date.now() - sessionStartTime,
mouse: {
position: [mouse.x, mouse.y],
delta: [mouse.dx, mouse.dy],
buttons: keyboard['mouse'] ? 1 : 0
},
keyboard: { ...keyboard },
camera: {
position: camera.position.toArray(),
rotation: [camera.rotation.x, camera.rotation.y, camera.rotation.z]
},
environment: {
active_targets: targets.filter(t => t.userData.active).map(t => ({
position: t.position.toArray(),
distance: t.position.distanceTo(camera.position)
})),
fps: currentFps
}
};
intentStream.push(intentSample);
// Update data stream display
updateDataStream(intentSample);
}
function recordIntent(intent) {
intentStream.push({
...intent,
session_time: Date.now() - sessionStartTime,
neural_context: getCurrentNeuralContext()
});
}
function recordNeuralData(data) {
neuralData.push({
...data,
timestamp: Date.now(),
session_time: Date.now() - sessionStartTime,
intent_context: getCurrentIntentContext(),
neural_context: getCurrentNeuralContext()
});
}
// ========== UI FUNCTIONS ==========
function initNeuralUI() {
// Create neural channels
const channelsDiv = document.getElementById('neuralChannels');
channelsDiv.innerHTML = '';
for (let i = 0; i < 8; i++) { // Show first 8 channels
const channel = document.createElement('div');
channel.className = 'neural-channel';
channel.innerHTML = `
<div class="channel-label">CH ${i}</div>
<div class="channel-value" id="neuralChannel${i}">0.00</div>
`;
channelsDiv.appendChild(channel);
}
// Create intent grid
const intentGrid = document.getElementById('intentGrid');
intentGrid.innerHTML = '';
const intents = ['MOVE', 'AIM', 'FIRE', 'JUMP', 'RELOAD', 'CROUCH'];
intents.forEach(intent => {
const item = document.createElement('div');
item.className = 'intent-item';
item.id = `intent-${intent.toLowerCase()}`;
item.innerHTML = `
<div class="intent-label">${intent}</div>
<div class="intent-value">0%</div>
`;
intentGrid.appendChild(item);
});
}
function initDataStream() {
dataStreamInterval = setInterval(() => {
if (intentStream.length > 0) {
const sample = intentStream[intentStream.length - 1];
const line = `[${sample.timestamp}] INTENT: ${JSON.stringify(sample.mouse.delta)}<br>`;
const stream = document.getElementById('dataStream');
stream.innerHTML = line + stream.innerHTML;
if (stream.children.length > 20) {
stream.removeChild(stream.lastChild);
}
}
}, 100);
}
function updateNeuralChannels(channels) {
for (let i = 0; i < 8; i++) {
const value = channels[`channel_${i}`];
const element = document.getElementById(`neuralChannel${i}`);
if (element && value !== undefined) {
element.textContent = value.toFixed(2);
// Color based on activity
const absValue = Math.abs(value);
if (absValue > 0.5) {
element.style.color = '#ff0';
} else if (absValue > 0.2) {
element.style.color = '#0f0';
} else {
element.style.color = '#0a0';
}
}
}
}
function updatePerformanceMetrics() {
// Calculate bandwidth (samples per second)
const bandwidth = neuralData.filter(d =>
Date.now() - d.timestamp < 1000
).length;
document.getElementById('bandwidthValue').textContent = `${bandwidth} Hz`;
// Calculate accuracy
const hits = neuralData.filter(d =>
d.type === 'motor_imagery_trial' && d.accuracy > 0.7
).length;
const totalTrials = neuralData.filter(d =>
d.type === 'motor_imagery_trial'
).length;
const accuracy = totalTrials > 0 ? Math.round((hits / totalTrials) * 100) : 0;
document.getElementById('accuracyValue').textContent = `${accuracy}%`;
// Calculate latency (simulated)
const latency = Math.random() * 50 + 50; // 50-100ms
document.getElementById('latencyValue').textContent = `${latency.toFixed(1)} ms`;
// Count simultaneous intents
const simultaneous = keyboard['w'] + keyboard['a'] + keyboard['s'] + keyboard['d'] + keyboard['mouse'];
document.getElementById('intentsValue').textContent = simultaneous;
// Update intent indicators
updateIntentIndicators();
}
function updateIntentIndicators() {
const intents = ['move', 'aim', 'fire', 'jump', 'reload', 'crouch'];
intents.forEach(intent => {
const element = document.getElementById(`intent-${intent}`);
if (element) {
// Simulate intent detection
let value = 0;
switch(intent) {
case 'move':
value = (keyboard['w'] || keyboard['a'] || keyboard['s'] || keyboard['d']) ? 100 : 0;
break;
case 'aim':
value = Math.abs(mouse.dx) > 1 || Math.abs(mouse.dy) > 1 ? 80 : 20;
break;
case 'fire':
value = keyboard['mouse'] ? 100 : 0;
break;
case 'jump':
value = keyboard[' '] ? 100 : 0;
break;
}
element.querySelector('.intent-value').textContent = `${value}%`;
if (value > 50) {
element.classList.add('intent-active');
} else {
element.classList.remove('intent-active');
}
}
});
}
function updateDataStream(sample) {
const stream = document.getElementById('dataStream');
const time = new Date(sample.timestamp).toISOString().substr(11, 12);
const line = `[${time}] INTENT: Δ(${sample.mouse.delta[0].toFixed(2)}, ${sample.mouse.delta[1].toFixed(2)})<br>`;
stream.innerHTML = line + stream.innerHTML;
// Keep only last 20 lines
const lines = stream.innerHTML.split('<br>');
if (lines.length > 20) {
stream.innerHTML = lines.slice(0, 20).join('<br>');
}
}
// ========== VISUAL STIMULI ==========
function startVisualStimuli() {
// Position stimuli around screen
const positions = [
{ x: '20%', y: '20%' },
{ x: '80%', y: '20%' },
{ x: '50%', y: '50%' },
{ x: '20%', y: '80%' },
{ x: '80%', y: '80%' }
];
CONFIG.VISUAL_STIMULI_FREQUENCIES.forEach((freq, index) => {
const stim = document.getElementById(`vstim-${index}`);
if (stim) {
stim.style.left = positions[index].x;
stim.style.top = positions[index].y;
stim.classList.add('vstim-active');
// Flash at specified frequency
setInterval(() => {
stim.style.opacity = stim.style.opacity === '1' ? '0.3' : '1';
}, 1000 / freq);
// Record stimulus events
setInterval(() => {
recordNeuralData({
type: 'visual_stimulus',
stimulus_id: index,
frequency: freq,
position: positions[index],
timestamp: Date.now()
});
}, 1000);
}
});
}
function stopVisualStimuli() {
for (let i = 0; i < 5; i++) {
const stim = document.getElementById(`vstim-${i}`);
if (stim) {
stim.classList.remove('vstim-active');
stim.style.opacity = '0';
}
}
}
function toggleVisualStimuli() {
const anyActive = document.querySelector('.vstim-active');
if (anyActive) {
stopVisualStimuli();
} else {
startVisualStimuli();
}
}
// ========== HANDWRITING TRAINING ==========
function showHandwritingPrompt(letter) {
const prompt = document.getElementById('handwritingPrompt');
const canvas = document.getElementById('handwritingCanvas');
prompt.textContent = `Trace the letter: ${letter}`;
prompt.style.display = 'block';
canvas.style.display = 'block';
// Clear canvas
const ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.strokeStyle = '#0f0';
ctx.lineWidth = 3;
ctx.font = '200px Courier New';
ctx.fillStyle = 'rgba(0, 255, 0, 0.1)';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText(letter, canvas.width/2, canvas.height/2);
// Start drawing
let drawing = false;
canvas.onmousedown = () => {
drawing = true;
ctx.beginPath();
};
canvas.onmousemove = (e) => {
if (!drawing) return;
const rect = canvas.getBoundingClientRect();
const x = e.clientX - rect.left;
const y = e.clientY - rect.top;
ctx.lineTo(x, y);
ctx.stroke();
};
canvas.onmouseup = () => {
drawing = false;
};
}
function hideHandwritingPrompt() {
document.getElementById('handwritingPrompt').style.display = 'none';
document.getElementById('handwritingCanvas').style.display = 'none';
}
// ========== TASK MANAGEMENT ==========
function showTaskIndicator(title, description, status) {
document.getElementById('taskTitle').textContent = title;
document.getElementById('taskDescription').textContent = description;
document.getElementById('taskStatus').textContent = status;
document.getElementById('taskProgress').style.width = '0%';
document.getElementById('taskIndicator').style.display = 'block';
}
function updateTaskIndicator(status, description) {
document.getElementById('taskStatus').textContent = status;
if (description) {
document.getElementById('taskDescription').textContent = description;
}
// Update progress
const progress = totalTasks > 0 ? (currentTask / totalTasks) * 100 :
(Date.now() - taskStartTime) / 60000 * 100; // For time-based tasks
document.getElementById('taskProgress').style.width = `${Math.min(100, progress)}%`;
}
function hideTaskIndicator() {
document.getElementById('taskIndicator').style.display = 'none';
}
function completeTraining() {
hideTaskIndicator();
stopVisualStimuli();
// Calculate session statistics
const sessionDuration = Date.now() - sessionStartTime;
const totalSamples = neuralData.length + intentStream.length;
const bandwidth = Math.round(totalSamples / (sessionDuration / 1000));
// Show results
document.getElementById('experimentResults').innerHTML = `
<div class="result-item">
<div class="result-label">Training Mode</div>
<div class="result-value">${currentMode.replace('_', ' ').toUpperCase()}</div>
</div>
<div class="result-item">
<div class="result-label">Duration</div>
<div class="result-value">${Math.round(sessionDuration / 1000)}s</div>
</div>
<div class="result-item">
<div class="result-label">Samples Collected</div>
<div class="result-value">${totalSamples.toLocaleString()}</div>
</div>
<div class="result-item">
<div class="result-label">Data Bandwidth</div>
<div class="result-value">${bandwidth} Hz</div>
</div>
<div class="result-item">
<div class="result-label">Neural Channels</div>
<div class="result-value">${CONFIG.NEURAL_CHANNELS}</div>
</div>
<div class="result-item">
<div class="result-label">File Size</div>
<div class="result-value">${Math.round((totalSamples * 0.1) / 1024)} KB</div>
</div>
`;
document.getElementById('experimentComplete').style.display = 'flex';
}
// ========== EXPORT FUNCTIONALITY ==========
async function exportDataset() {
const sessionId = `bci_fps_${currentMode}_${Date.now()}`;
// Create dataset metadata following Hugging Face format
const metadata = {
dataset_info: {
name: `BCI-FPS_${currentMode.toUpperCase()}_Dataset`,
description: `High-bandwidth neural training data for BCI research. Mode: ${currentMode}`,
version: "1.0.0",
license: "MIT",
citation: `@misc{bci_fps_${currentMode}_2024,\n title={BCI-FPS ${currentMode} Training Dataset},\n author={Neuralink Research},\n year={2024},\n note={High-frequency intent decoding data for brain-computer interface development}\n}`
},
session_info: {
session_id: sessionId,
mode: currentMode,
start_time: new Date(sessionStartTime).toISOString(),
duration_ms: Date.now() - sessionStartTime,
sampling_rate_hz: CONFIG.SAMPLING_RATE,
neural_channels: CONFIG.NEURAL_CHANNELS
},
data_schema: {
neural_data: {
timestamp: "UNIX timestamp in milliseconds",
session_time: "Time since session start in milliseconds",
channels: "Object mapping channel names to neural signal values",
intent_context: "Contextual information about user intent"
},
intent_stream: {
timestamp: "UNIX timestamp in milliseconds",
mouse: "Mouse position and movement data",
keyboard: "Keyboard state",
camera: "Camera position and rotation",
environment: "Game environment state"
},
handwriting_samples: {
letter: "Letter being traced",
samples: "Array of handwriting samples with position and pressure data"
}
},
research_applications: [
"Motor imagery decoding for prosthetic control",
"Simultaneous intent decoding for fluid BCI interfaces",
"Visual evoked potential (c-VEP) calibration",
"Handwriting intent recognition for text entry",
"Neural network training for brain-computer interfaces"
],
huggingface: {
compatible: true,
task_categories: ["brain-computer-interface", "neural-decoding", "human-computer-interaction"],
task_ids: ["motor-imagery", "intent-decoding", "visual-evoked-potentials", "handwriting-recognition"],
language: ["en"],
size_categories: ["10K<n<100K"]
}
};
// Create ZIP archive
const zip = new JSZip();
// Add data files in Apache Arrow compatible format
// For now using JSONL, but could be converted to Parquet
zip.file("neural_data.jsonl",
neuralData.map(d => JSON.stringify(d)).join('\n'));
zip.file("intent_stream.jsonl",
intentStream.map(d => JSON.stringify(d)).join('\n'));
if (handwritingSamples.length > 0) {
zip.file("handwriting_samples.json",
JSON.stringify(handwritingSamples, null, 2));
}
zip.file("metadata.json", JSON.stringify(metadata, null, 2));
// Create Hugging Face dataset card
const datasetCard = `---
language:
- en
tags:
- brain-computer-interface
- neural-decoding
- motor-imagery
- human-computer-interaction
- neuralink
task_categories:
- brain-computer-interface
task_ids:
- motor-imagery
- intent-decoding
- visual-evoked-potentials
- handwriting-recognition
size_categories:
- 10K<n<100K
---
# Dataset Card for BCI-FPS ${currentMode.toUpperCase()} Dataset
## Dataset Description
This dataset contains high-bandwidth neural training data collected from BCI-FPS, a specialized training platform for brain-computer interface research.
### Dataset Summary
- **Training Mode**: ${currentMode.replace('_', ' ').toUpperCase()}
- **Session ID**: ${sessionId}
- **Duration**: ${Math.round((Date.now() - sessionStartTime) / 1000)} seconds
- **Sampling Rate**: ${CONFIG.SAMPLING_RATE} Hz
- **Neural Channels**: ${CONFIG.NEURAL_CHANNELS}
- **Data Points**: ${(neuralData.length + intentStream.length).toLocaleString()}
### Supported Tasks
- **${getTaskDescription(currentMode)}**
- **Neural Decoding**: Training models to decode user intent from neural signals
- **BCI Calibration**: Providing ground truth data for BCI system calibration
- **Disability Research**: Supporting development of assistive technologies
### Languages
English (interface and documentation)
## Dataset Structure
### Data Instances
\`\`\`json
${JSON.stringify(neuralData[0] || {}, null, 2)}
\`\`\`
### Data Fields
See \`metadata.json\` for complete schema documentation.
## Dataset Creation
### Source Data
- **Platform**: Web-based BCI-FPS Training Environment
- **Sampling Rate**: ${CONFIG.SAMPLING_RATE} Hz
- **Collection Method**: Real-time telemetry during BCI training tasks
- **Neural Simulation**: Synthetic neural data representing ideal BCI signals
### Annotations
- **Annotation process**: Automatic intent labeling during gameplay
- **Annotation types**: Motor imagery, visual stimuli, handwriting intent
- **Who annotated**: System automatically labels based on game state
### Personal and Sensitive Information
No personal information is collected. All data is synthetic/anonymous.
## Considerations for Using the Data
### Social Impact
This dataset enables research in:
- Neuralink-style brain-computer interfaces
- Assistive technologies for disabled individuals
- Human-AI interaction systems
- Neural decoding algorithms
### Discussion of Biases
Synthetic neural data may not perfectly represent biological signals. Results should be validated with real neural recordings.
### Other Known Limitations
- Simulated neural signals
- Idealized game environment
- Limited to specific training tasks
## Additional Information
### Dataset Curators
BCI-FPS Research Team
### Licensing Information
MIT License
### Citation Information
\`\`\`bibtex
@misc{bci_fps_${currentMode}_2024,
title={BCI-FPS ${currentMode} Training Dataset},
author={Neuralink Research},
year={2024},
note={High-frequency intent decoding data for brain-computer interface development}
}
\`\`\`
`;
zip.file("README.md", datasetCard);
// Generate Python loading script
const loadScript = `import json
import pandas as pd
from datasets import Dataset, DatasetDict
def load_bci_fps_dataset(data_dir):
"""
Load BCI-FPS dataset for Hugging Face.
Args:
data_dir (str): Path to dataset directory
Returns:
DatasetDict: Hugging Face dataset
"""
# Load neural data
neural_data = []
with open(f"{data_dir}/neural_data.jsonl", 'r') as f:
for line in f:
if line.strip():
neural_data.append(json.loads(line))
# Load intent stream
intent_stream = []
with open(f"{data_dir}/intent_stream.jsonl", 'r') as f:
for line in f:
if line.strip():
intent_stream.append(json.loads(line))
# Create datasets
datasets = {
"neural_data": Dataset.from_list(neural_data),
"intent_stream": Dataset.from_list(intent_stream)
}
# Load handwriting samples if exists
try:
with open(f"{data_dir}/handwriting_samples.json", 'r') as f:
handwriting = json.load(f)
datasets["handwriting"] = Dataset.from_list(handwriting)
except:
pass
# Load metadata
with open(f"{data_dir}/metadata.json", 'r') as f:
metadata = json.load(f)
dataset_dict = DatasetDict(datasets)
dataset_dict.info.metadata = metadata
return dataset_dict
# Example usage for Neuralink research
if __name__ == "__main__":
dataset = load_bci_fps_dataset("./bci_data")
print(f"Dataset keys: {list(dataset.keys())}")
print(f"Neural data samples: {len(dataset['neural_data'])}")
print(f"Intent stream samples: {len(dataset['intent_stream'])}")
# Example: Extract motor imagery trials
motor_trials = [d for d in dataset['neural_data'] if d.get('type') == 'motor_imagery_trial']
print(f"Motor imagery trials: {len(motor_trials)}")
`;
zip.file("load_dataset.py", loadScript);
// Generate and download ZIP
const content = await zip.generateAsync({
type: "blob",
compression: "DEFLATE",
compressionOptions: { level: 6 }
});
saveAs(content, `${sessionId}.zip`);
// Show success message
alert(`Dataset exported successfully!\n\nFile: ${sessionId}.zip\nSize: ${(content.size / (1024 * 1024)).toFixed(2)} MB\n\nReady for upload to Hugging Face.`);
}
function showDataPreview() {
const preview = `Dataset Preview:
Training Mode: ${currentMode}
Session Duration: ${Math.round((Date.now() - sessionStartTime) / 1000)}s
Neural Samples: ${neuralData.length}
Intent Samples: ${intentStream.length}
Handwriting Samples: ${handwritingSamples.length}
Total Data Points: ${neuralData.length + intentStream.length}
Latest Neural Sample:
${JSON.stringify(neuralData[neuralData.length - 1] || {}, null, 2)}
Latest Intent Sample:
${JSON.stringify(intentStream[intentStream.length - 1] || {}, null, 2)}`;
alert(preview);
}
// ========== HELPER FUNCTIONS ==========
function getTaskDescription(mode) {
switch(mode) {
case 'motor_imagery': return 'Motor Imagery Training for prosthetic control';
case 'simultaneous_intent': return 'Simultaneous Intent Decoding for fluid BCI interfaces';
case 'visual_evoked': return 'Visual Evoked Potentials for non-verbal communication';
case 'handwriting_intent': return 'Handwriting Intent Recognition for text entry';
case 'full_spectrum': return 'Full Spectrum BCI Training';
default: return 'BCI Training';
}
}
function calculateAccuracy(target) {
// Calculate aiming accuracy
const targetDirection = new THREE.Vector3()
.subVectors(target.position, camera.position)
.normalize();
const aimDirection = new THREE.Vector3(0, 0, -1)
.applyQuaternion(camera.quaternion);
const dot = targetDirection.dot(aimDirection);
return Math.max(0, (dot + 1) / 2); // Convert to 0-1 range
}
function countSimultaneousActions() {
let count = 0;
if (keyboard['w'] || keyboard['a'] || keyboard['s'] || keyboard['d']) count++;
if (Math.abs(mouse.dx) > 1 || Math.abs(mouse.dy) > 1) count++;
if (keyboard['mouse']) count++;
if (keyboard[' ']) count++;
return count;
}
function calculateIntentModulation(channel) {
// Simulate intent modulation on neural channels
let modulation = 0;
// Movement intent affects low channels
if (channel < 8 && (keyboard['w'] || keyboard['a'] || keyboard['s'] || keyboard['d'])) {
modulation += 0.3;
}
// Visual attention affects mid channels
if (channel >= 8 && channel < 16 && targets.some(t => t.userData.active)) {
modulation += 0.2;
}
// Motor intent affects high channels
if (channel >= 24 && keyboard['mouse']) {
modulation += 0.4;
}
return modulation;
}
function getCurrentNeuralContext() {
if (neuralData.length === 0) return null;
return neuralData[neuralData.length - 1].channels;
}
function getCurrentIntentContext() {
if (intentStream.length === 0) return null;
const last = intentStream[intentStream.length - 1];
return {
mouse: last.mouse,
keyboard: last.keyboard,
camera: last.camera
};
}
function getAimedTarget() {
const raycaster = new THREE.Raycaster();
raycaster.setFromCamera(new THREE.Vector2(0, 0), camera);
const intersects = raycaster.intersectObjects(targets);
if (intersects.length > 0) {
return {
id: targets.indexOf(intersects[0].object),
position: intersects[0].object.position.toArray(),
distance: intersects[0].distance
};
}
return null;
}
// ========== GAME LOOP ==========
function animate(time) {
requestAnimationFrame(animate);
// Update FPS counter
fpsCounter++;
if (time - lastFpsTime > 1000) {
currentFps = fpsCounter;
fpsCounter = 0;
lastFpsTime = time;
}
// Handle player movement
if (keyboard['w']) camera.translateZ(-0.1);
if (keyboard['s']) camera.translateZ(0.1);
if (keyboard['a']) camera.translateX(-0.1);
if (keyboard['d']) camera.translateX(0.1);
if (keyboard[' ']) camera.position.y += 0.1;
// Update target visuals
targets.forEach(target => {
if (target.userData.active) {
target.material.emissiveIntensity = 0.5 + 0.5 * Math.sin(time * 0.005);
}
});
renderer.render(scene, camera);
}
// ========== CONTROL FUNCTIONS ==========
function pauseTraining() {
// Toggle pause state
// Implementation depends on specific requirements
}
function skipTask() {
// Skip current task
// Implementation depends on current mode
}
function endSession() {
completeTraining();
}
function restartTraining() {
location.reload();
}
function returnToMenu() {
document.getElementById('experimentComplete').style.display = 'none';
document.getElementById('mainMenu').style.display = 'flex';
document.getElementById('gameContainer').style.display = 'none';
document.getElementById('uiOverlay').style.display = 'none';
document.getElementById('crosshair').style.display = 'none';
document.getElementById('bciControlPanel').style.display = 'none';
// Clean up intervals
clearInterval(neuralBackgroundInterval);
clearInterval(dataStreamInterval);
}
// ========== INITIALIZATION ==========
window.onload = function() {
initNeuralBackground();
initThreeJS();
};
</script>
</body>
</html>