webxos commited on
Commit
cd72078
·
verified ·
1 Parent(s): 94f02fd

Upload webXOS_BCI-FPS_alphav1.html

Browse files
Files changed (1) hide show
  1. webXOS_BCI-FPS_alphav1.html +1686 -0
webXOS_BCI-FPS_alphav1.html ADDED
@@ -0,0 +1,1686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- CONTINUATION OF THE PREVIOUS HTML WITH COMPLETE GAME CODE -->
2
+ <!DOCTYPE html>
3
+ <html lang="en">
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <title>BCI-FPS: Neuralink Brain-Computer Interface Training</title>
8
+ <style>
9
+ /* Previous styles remain the same, adding only new styles */
10
+
11
+ /* === VISUAL STIMULI === */
12
+ .vstim-target {
13
+ position: absolute;
14
+ width: 100px;
15
+ height: 100px;
16
+ border: 3px solid #0ff;
17
+ border-radius: 50%;
18
+ pointer-events: none;
19
+ z-index: 6;
20
+ box-shadow: 0 0 50px #0ff;
21
+ opacity: 0;
22
+ }
23
+
24
+ .vstim-active {
25
+ animation: vstimPulse 0.5s infinite alternate;
26
+ }
27
+
28
+ @keyframes vstimPulse {
29
+ from { opacity: 0.3; }
30
+ to { opacity: 1; }
31
+ }
32
+
33
+ /* === HANDWRITING TRAINING UI === */
34
+ #handwritingCanvas {
35
+ position: fixed;
36
+ top: 50%;
37
+ left: 50%;
38
+ transform: translate(-50%, -50%);
39
+ background: rgba(0, 10, 0, 0.9);
40
+ border: 2px solid #0f0;
41
+ border-radius: 10px;
42
+ z-index: 100;
43
+ display: none;
44
+ }
45
+
46
+ .handwriting-prompt {
47
+ position: fixed;
48
+ top: 30%;
49
+ left: 50%;
50
+ transform: translateX(-50%);
51
+ color: #0f0;
52
+ font-size: 24px;
53
+ text-align: center;
54
+ z-index: 101;
55
+ background: rgba(0, 20, 0, 0.9);
56
+ padding: 20px;
57
+ border: 2px solid #0f0;
58
+ border-radius: 10px;
59
+ display: none;
60
+ }
61
+ </style>
62
+ </head>
63
+ <body>
64
+ <!-- NEURAL NETWORK BACKGROUND -->
65
+ <div class="neural-background" id="neuralBackground"></div>
66
+
67
+ <!-- MAIN MENU -->
68
+ <div id="mainMenu">
69
+ <div class="menu-container">
70
+ <h1 class="bci-title">BCI-FPS</h1>
71
+ <p class="bci-subtitle">Neuralink Brain-Computer Interface Training Platform</p>
72
+
73
+ <div class="research-mission">
74
+ <div class="mission-title">RESEARCH MISSION</div>
75
+ <p class="mission-text">
76
+ *UNDER DEVELOPMENT* by webXOS 2025 // webxos.netlify.pp. Record FPS Game Data for Hugging Face.
77
+ This platform generates high-bandwidth neural training data for frontier BCI research.
78
+ Through FPS gameplay, we capture simultaneous intent decoding, calibration-free interface training,
79
+ and task-optimized neural models. All data supports disability research and Neuralink development.
80
+ </p>
81
+ </div>
82
+
83
+ <div class="menu-buttons">
84
+ <button class="bci-btn" onclick="startBCITraining('motor_imagery')">
85
+ <span class="btn-icon">🧠</span> MOTOR IMAGERY TRAINING
86
+ </button>
87
+ <button class="bci-btn" onclick="startBCITraining('simultaneous_intent')">
88
+ <span class="btn-icon">🎯</span> SIMULTANEOUS INTENT DECODING
89
+ </button>
90
+ <button class="bci-btn" onclick="startBCITraining('visual_evoked')">
91
+ <span class="btn-icon">👁️</span> VISUAL EVOKED POTENTIALS
92
+ </button>
93
+ <button class="bci-btn" onclick="startBCITraining('handwriting_intent')">
94
+ <span class="btn-icon">✍️</span> HANDWRITING INTENT
95
+ </button>
96
+ <button class="bci-btn" onclick="startBCITraining('full_spectrum')">
97
+ <span class="btn-icon">⚡</span> FULL SPECTRUM TRAINING
98
+ </button>
99
+ </div>
100
+
101
+ <div class="research-mission" style="margin-top: 30px;">
102
+ <div class="mission-title">EXPORT OPTIONS</div>
103
+ <div style="display: flex; gap: 15px; justify-content: center; margin-top: 15px;">
104
+ <button class="bci-btn" onclick="exportDataset()" style="width: 200px; padding: 15px 30px;">
105
+ <span class="btn-icon">📊</span> EXPORT DATASET
106
+ </button>
107
+ <button class="bci-btn" onclick="showDataPreview()" style="width: 200px; padding: 15px 30px;">
108
+ <span class="btn-icon">👁️</span> PREVIEW DATA
109
+ </button>
110
+ </div>
111
+ </div>
112
+ </div>
113
+ </div>
114
+
115
+ <!-- GAME CONTAINER -->
116
+ <div id="gameContainer"></div>
117
+
118
+ <!-- UI OVERLAY -->
119
+ <div id="uiOverlay" style="display: none;">
120
+ <!-- NEURAL ACTIVITY PANEL -->
121
+ <div id="neuralPanel" class="hud-panel">
122
+ <div class="neural-header">NEURAL ACTIVITY</div>
123
+ <div class="neural-grid" id="neuralChannels">
124
+ <!-- Neural channels will be populated dynamically -->
125
+ </div>
126
+ </div>
127
+
128
+ <!-- BCI INTENT PANEL -->
129
+ <div id="intentPanel" class="hud-panel">
130
+ <div class="neural-header">INTENT DECODING</div>
131
+ <div class="intent-grid" id="intentGrid">
132
+ <!-- Intent items will be populated dynamically -->
133
+ </div>
134
+ </div>
135
+
136
+ <!-- PERFORMANCE PANEL -->
137
+ <div id="performancePanel" class="hud-panel">
138
+ <div class="neural-header">PERFORMANCE METRICS</div>
139
+ <div class="performance-grid">
140
+ <div class="metric-item">
141
+ <div class="metric-label">Bandwidth</div>
142
+ <div class="metric-value" id="bandwidthValue">60 Hz</div>
143
+ </div>
144
+ <div class="metric-item">
145
+ <div class="metric-label">Accuracy</div>
146
+ <div class="metric-value" id="accuracyValue">0%</div>
147
+ </div>
148
+ <div class="metric-item">
149
+ <div class="metric-label">Intent Latency</div>
150
+ <div class="metric-value" id="latencyValue">0 ms</div>
151
+ </div>
152
+ <div class="metric-item">
153
+ <div class="metric-label">Simultaneous Intents</div>
154
+ <div class="metric-value" id="intentsValue">0</div>
155
+ </div>
156
+ </div>
157
+ </div>
158
+
159
+ <!-- DATA STREAM PANEL -->
160
+ <div id="dataStreamPanel" class="hud-panel">
161
+ <div class="neural-header">DATA STREAM</div>
162
+ <div class="data-stream">
163
+ <div class="stream-line" id="dataStream"></div>
164
+ </div>
165
+ </div>
166
+ </div>
167
+
168
+ <!-- CROSSHAIR -->
169
+ <div id="crosshair" style="display: none;">
170
+ <div class="crosshair-dot"></div>
171
+ <div class="crosshair-line horizontal left"></div>
172
+ <div class="crosshair-line horizontal right"></div>
173
+ <div class="crosshair-line vertical top"></div>
174
+ <div class="crosshair-line vertical bottom"></div>
175
+ </div>
176
+
177
+ <!-- TASK INDICATOR -->
178
+ <div id="taskIndicator">
179
+ <div class="task-title" id="taskTitle">MOTOR IMAGERY TRAINING</div>
180
+ <div class="task-description" id="taskDescription">
181
+ Imagine moving your cursor to the target. This trains motor cortex decoding.
182
+ </div>
183
+ <div class="task-progress">
184
+ <div class="task-progress-bar" id="taskProgress"></div>
185
+ </div>
186
+ <div style="color: #0a0; font-size: 14px;" id="taskStatus">Starting...</div>
187
+ </div>
188
+
189
+ <!-- EXPERIMENT COMPLETE MODAL -->
190
+ <div id="experimentComplete">
191
+ <div class="experiment-content">
192
+ <div class="experiment-title">TRAINING SESSION COMPLETE</div>
193
+ <p style="color: #0a0; margin: 20px 0; line-height: 1.6;">
194
+ High-bandwidth neural training data has been successfully recorded.<br>
195
+ This dataset can be used for Neuralink research and disability support development.
196
+ </p>
197
+
198
+ <div class="experiment-results" id="experimentResults">
199
+ <!-- Results populated dynamically -->
200
+ </div>
201
+
202
+ <div style="margin: 30px 0;">
203
+ <button class="bci-btn" onclick="exportDataset()" style="width: 250px; margin: 10px;">
204
+ <span class="btn-icon">📊</span> EXPORT TO HUGGING FACE
205
+ </button>
206
+ <button class="bci-btn" onclick="restartTraining()" style="width: 250px; margin: 10px;">
207
+ <span class="btn-icon">🔄</span> RESTART TRAINING
208
+ </button>
209
+ <button class="bci-btn" onclick="returnToMenu()" style="width: 250px; margin: 10px;">
210
+ <span class="btn-icon">🏠</span> RETURN TO MENU
211
+ </button>
212
+ </div>
213
+ </div>
214
+ </div>
215
+
216
+ <!-- BCI CONTROL PANEL -->
217
+ <div id="bciControlPanel" style="display: none;">
218
+ <button class="bci-control-btn" onclick="pauseTraining()">⏸ PAUSE</button>
219
+ <button class="bci-control-btn" onclick="skipTask()">⏭ SKIP</button>
220
+ <button class="bci-control-btn" onclick="endSession()">⏹ END</button>
221
+ <button class="bci-control-btn" onclick="toggleVisualStimuli()">💡 STIMULI</button>
222
+ </div>
223
+
224
+ <!-- HANDWRITING CANVAS -->
225
+ <canvas id="handwritingCanvas" width="800" height="600"></canvas>
226
+ <div class="handwriting-prompt" id="handwritingPrompt"></div>
227
+
228
+ <!-- Three.js & JSZip Libraries -->
229
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
230
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"></script>
231
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/2.0.5/FileSaver.min.js"></script>
232
+
233
+ <script>
234
+ // ========== GLOBAL CONFIGURATION ==========
235
+ const CONFIG = {
236
+ // BCI Research Parameters
237
+ SAMPLING_RATE: 1000, // Hz - Neuralink-level sampling
238
+ BANDWIDTH: 60, // FPS for visual rendering
239
+ INTENT_DECODING_WINDOW: 100, // ms window for intent analysis
240
+
241
+ // Game Parameters
242
+ VISUAL_STIMULI_FREQUENCIES: [5, 10, 15, 20, 25], // Hz for c-VEP
243
+ MOTOR_IMAGERY_TRIALS: 50,
244
+ SIMULTANEOUS_INTENT_TASKS: 20,
245
+ HANDWRITING_SAMPLES: 10,
246
+
247
+ // Data Collection
248
+ MAX_SAMPLES: 1000000,
249
+ COMPRESSION_ENABLED: true,
250
+ EXPORT_FORMAT: 'arrow', // 'arrow', 'jsonl', 'parquet'
251
+
252
+ // Neural Simulation
253
+ NEURAL_CHANNELS: 32,
254
+ NOISE_LEVEL: 0.1,
255
+ SIGNAL_STRENGTH: 0.8
256
+ };
257
+
258
+ // ========== GLOBAL STATE ==========
259
+ let scene, camera, renderer;
260
+ let player, controls = {};
261
+ let targets = [];
262
+ let neuralData = [];
263
+ let intentStream = [];
264
+ let visualStimuli = [];
265
+ let handwritingSamples = [];
266
+
267
+ let currentMode = null;
268
+ let currentTask = 0;
269
+ let totalTasks = 0;
270
+ let taskStartTime = 0;
271
+ let sessionStartTime = 0;
272
+
273
+ let mouse = { x: 0, y: 0, dx: 0, dy: 0 };
274
+ let keyboard = {};
275
+
276
+ let fpsCounter = 0;
277
+ let lastFpsTime = 0;
278
+ let currentFps = 60;
279
+
280
+ let neuralBackgroundInterval;
281
+ let dataStreamInterval;
282
+
283
+ // ========== INITIALIZATION ==========
284
+ function initNeuralBackground() {
285
+ const bg = document.getElementById('neuralBackground');
286
+ bg.innerHTML = '';
287
+
288
+ // Create neural nodes
289
+ for (let i = 0; i < 50; i++) {
290
+ const node = document.createElement('div');
291
+ node.className = 'neural-node';
292
+ node.style.left = `${Math.random() * 100}%`;
293
+ node.style.top = `${Math.random() * 100}%`;
294
+ bg.appendChild(node);
295
+ }
296
+
297
+ // Create connections
298
+ const nodes = bg.querySelectorAll('.neural-node');
299
+ nodes.forEach((node1, i) => {
300
+ nodes.forEach((node2, j) => {
301
+ if (i < j && Math.random() < 0.1) {
302
+ const x1 = parseFloat(node1.style.left);
303
+ const y1 = parseFloat(node1.style.top);
304
+ const x2 = parseFloat(node2.style.left);
305
+ const y2 = parseFloat(node2.style.top);
306
+
307
+ const length = Math.sqrt(Math.pow(x2 - x1, 2) + Math.pow(y2 - y1, 2));
308
+ const angle = Math.atan2(y2 - y1, x2 - x1) * 180 / Math.PI;
309
+
310
+ const connection = document.createElement('div');
311
+ connection.className = 'neural-connection';
312
+ connection.style.width = `${length}%`;
313
+ connection.style.left = `${x1}%`;
314
+ connection.style.top = `${y1}%`;
315
+ connection.style.transform = `rotate(${angle}deg)`;
316
+ bg.appendChild(connection);
317
+ }
318
+ });
319
+ });
320
+
321
+ // Animate nodes
322
+ neuralBackgroundInterval = setInterval(() => {
323
+ nodes.forEach(node => {
324
+ node.style.left = `${Math.random() * 100}%`;
325
+ node.style.top = `${Math.random() * 100}%`;
326
+ });
327
+ }, 3000);
328
+ }
329
+
330
+ function initThreeJS() {
331
+ scene = new THREE.Scene();
332
+ scene.background = new THREE.Color(0x000000);
333
+ scene.fog = new THREE.Fog(0x000000, 50, 200);
334
+
335
+ camera = new THREE.PerspectiveCamera(90, window.innerWidth / window.innerHeight, 0.1, 1000);
336
+ camera.position.y = 1.6;
337
+
338
+ renderer = new THREE.WebGLRenderer({ antialias: true });
339
+ renderer.setSize(window.innerWidth, window.innerHeight);
340
+ renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
341
+ document.getElementById('gameContainer').appendChild(renderer.domElement);
342
+
343
+ // Add lighting
344
+ const ambientLight = new THREE.AmbientLight(0x00ff00, 0.1);
345
+ scene.add(ambientLight);
346
+
347
+ const directionalLight = new THREE.DirectionalLight(0x00ff00, 0.5);
348
+ directionalLight.position.set(10, 20, 5);
349
+ scene.add(directionalLight);
350
+
351
+ // Create environment
352
+ createEnvironment();
353
+
354
+ // Setup controls
355
+ setupControls();
356
+
357
+ // Start animation loop
358
+ animate();
359
+ }
360
+
361
+ function createEnvironment() {
362
+ // Ground
363
+ const groundGeometry = new THREE.PlaneGeometry(100, 100, 50, 50);
364
+ const groundMaterial = new THREE.MeshBasicMaterial({
365
+ color: 0x003300,
366
+ wireframe: true,
367
+ transparent: true,
368
+ opacity: 0.3
369
+ });
370
+ const ground = new THREE.Mesh(groundGeometry, groundMaterial);
371
+ ground.rotation.x = -Math.PI / 2;
372
+ scene.add(ground);
373
+
374
+ // Neural training targets
375
+ for (let i = 0; i < 20; i++) {
376
+ const geometry = new THREE.SphereGeometry(0.5, 8, 8);
377
+ const material = new THREE.MeshBasicMaterial({
378
+ color: 0x00ff00,
379
+ wireframe: true,
380
+ transparent: true,
381
+ opacity: 0.6
382
+ });
383
+ const target = new THREE.Mesh(geometry, material);
384
+
385
+ target.position.set(
386
+ (Math.random() - 0.5) * 80,
387
+ 1 + Math.random() * 5,
388
+ (Math.random() - 0.5) * 80
389
+ );
390
+
391
+ target.userData = {
392
+ type: 'neural_target',
393
+ active: false,
394
+ frequency: CONFIG.VISUAL_STIMULI_FREQUENCIES[Math.floor(Math.random() * CONFIG.VISUAL_STIMULI_FREQUENCIES.length)],
395
+ lastFlash: 0
396
+ };
397
+
398
+ scene.add(target);
399
+ targets.push(target);
400
+ }
401
+
402
+ // Create visual stimuli targets in DOM
403
+ for (let i = 0; i < 5; i++) {
404
+ const stim = document.createElement('div');
405
+ stim.className = 'vstim-target';
406
+ stim.id = `vstim-${i}`;
407
+ document.body.appendChild(stim);
408
+ }
409
+ }
410
+
411
+ function setupControls() {
412
+ // Mouse control
413
+ document.addEventListener('mousemove', (e) => {
414
+ if (document.pointerLockElement === document.body) {
415
+ mouse.dx = e.movementX;
416
+ mouse.dy = e.movementY;
417
+
418
+ mouse.x += mouse.dx * 0.002;
419
+ mouse.y += mouse.dy * 0.002;
420
+
421
+ mouse.y = Math.max(-Math.PI/2, Math.min(Math.PI/2, mouse.y));
422
+
423
+ camera.rotation.order = 'YXZ';
424
+ camera.rotation.y = -mouse.x;
425
+ camera.rotation.x = -mouse.y;
426
+ }
427
+ });
428
+
429
+ // Keyboard controls
430
+ document.addEventListener('keydown', (e) => {
431
+ const key = e.key.toLowerCase();
432
+ keyboard[key] = true;
433
+
434
+ // Record key press as intent
435
+ if (['w', 'a', 's', 'd', ' '].includes(key)) {
436
+ recordIntent({
437
+ type: 'key_press',
438
+ key: key,
439
+ timestamp: Date.now(),
440
+ position: camera.position.toArray(),
441
+ rotation: [camera.rotation.x, camera.rotation.y, camera.rotation.z]
442
+ });
443
+ }
444
+ });
445
+
446
+ document.addEventListener('keyup', (e) => {
447
+ const key = e.key.toLowerCase();
448
+ keyboard[key] = false;
449
+ });
450
+
451
+ // Mouse click
452
+ document.addEventListener('mousedown', () => {
453
+ keyboard['mouse'] = true;
454
+ recordIntent({
455
+ type: 'mouse_click',
456
+ button: 'left',
457
+ timestamp: Date.now(),
458
+ target: getAimedTarget()
459
+ });
460
+ });
461
+
462
+ document.addEventListener('mouseup', () => {
463
+ keyboard['mouse'] = false;
464
+ });
465
+
466
+ // Pointer lock
467
+ document.body.addEventListener('click', () => {
468
+ if (!document.pointerLockElement) {
469
+ document.body.requestPointerLock();
470
+ }
471
+ });
472
+
473
+ // Window resize
474
+ window.addEventListener('resize', () => {
475
+ camera.aspect = window.innerWidth / window.innerHeight;
476
+ camera.updateProjectionMatrix();
477
+ renderer.setSize(window.innerWidth, window.innerHeight);
478
+ });
479
+ }
480
+
481
+ // ========== BCI TRAINING MODES ==========
482
+ function startBCITraining(mode) {
483
+ currentMode = mode;
484
+ sessionStartTime = Date.now();
485
+
486
+ // Hide menu, show game
487
+ document.getElementById('mainMenu').style.display = 'none';
488
+ document.getElementById('gameContainer').style.display = 'block';
489
+ document.getElementById('uiOverlay').style.display = 'grid';
490
+ document.getElementById('crosshair').style.display = 'block';
491
+ document.getElementById('bciControlPanel').style.display = 'flex';
492
+
493
+ // Initialize UI
494
+ initNeuralUI();
495
+ initDataStream();
496
+
497
+ // Start specific training mode
498
+ switch(mode) {
499
+ case 'motor_imagery':
500
+ startMotorImageryTraining();
501
+ break;
502
+ case 'simultaneous_intent':
503
+ startSimultaneousIntentTraining();
504
+ break;
505
+ case 'visual_evoked':
506
+ startVisualEvokedTraining();
507
+ break;
508
+ case 'handwriting_intent':
509
+ startHandwritingIntentTraining();
510
+ break;
511
+ case 'full_spectrum':
512
+ startFullSpectrumTraining();
513
+ break;
514
+ }
515
+
516
+ // Start data collection
517
+ startDataCollection();
518
+ }
519
+
520
+ function startMotorImageryTraining() {
521
+ totalTasks = CONFIG.MOTOR_IMAGERY_TRIALS;
522
+ currentTask = 0;
523
+
524
+ showTaskIndicator(
525
+ "MOTOR IMAGERY TRAINING",
526
+ "Imagine moving your cursor to the target. This trains motor cortex decoding for prosthetic control.",
527
+ "Starting trial..."
528
+ );
529
+
530
+ setTimeout(() => {
531
+ nextMotorImageryTask();
532
+ }, 2000);
533
+ }
534
+
535
+ function nextMotorImageryTask() {
536
+ if (currentTask >= totalTasks) {
537
+ completeTraining();
538
+ return;
539
+ }
540
+
541
+ currentTask++;
542
+ taskStartTime = Date.now();
543
+
544
+ // Activate a random target
545
+ const target = targets[Math.floor(Math.random() * targets.length)];
546
+ target.userData.active = true;
547
+ target.material.color.setHex(0xffff00);
548
+
549
+ updateTaskIndicator(
550
+ `Trial ${currentTask}/${totalTasks}`,
551
+ `Imagine moving to the glowing target. Focus on the intent to move.`
552
+ );
553
+
554
+ // Task completion timer
555
+ setTimeout(() => {
556
+ target.userData.active = false;
557
+ target.material.color.setHex(0x00ff00);
558
+
559
+ // Record completion
560
+ recordNeuralData({
561
+ type: 'motor_imagery_trial',
562
+ trial: currentTask,
563
+ duration: Date.now() - taskStartTime,
564
+ target_position: target.position.toArray(),
565
+ accuracy: calculateAccuracy(target)
566
+ });
567
+
568
+ // Next task
569
+ setTimeout(() => {
570
+ nextMotorImageryTask();
571
+ }, 1000);
572
+ }, 3000);
573
+ }
574
+
575
+ function startSimultaneousIntentTraining() {
576
+ totalTasks = CONFIG.SIMULTANEOUS_INTENT_TASKS;
577
+ currentTask = 0;
578
+
579
+ showTaskIndicator(
580
+ "SIMULTANEOUS INTENT DECODING",
581
+ "Move (WASD) while aiming at targets. This trains decoding multiple simultaneous intents.",
582
+ "Starting task..."
583
+ );
584
+
585
+ setTimeout(() => {
586
+ nextSimultaneousIntentTask();
587
+ }, 2000);
588
+ }
589
+
590
+ function nextSimultaneousIntentTask() {
591
+ if (currentTask >= totalTasks) {
592
+ completeTraining();
593
+ return;
594
+ }
595
+
596
+ currentTask++;
597
+ taskStartTime = Date.now();
598
+
599
+ // Activate multiple targets
600
+ const activeTargets = [];
601
+ for (let i = 0; i < 3; i++) {
602
+ const target = targets[Math.floor(Math.random() * targets.length)];
603
+ target.userData.active = true;
604
+ target.material.color.setHex(0xff0000);
605
+ activeTargets.push(target);
606
+ }
607
+
608
+ updateTaskIndicator(
609
+ `Task ${currentTask}/${totalTasks}`,
610
+ `Move while aiming at all red targets. Focus on simultaneous movement and aiming.`
611
+ );
612
+
613
+ // Task runs for 5 seconds
614
+ setTimeout(() => {
615
+ activeTargets.forEach(target => {
616
+ target.userData.active = false;
617
+ target.material.color.setHex(0x00ff00);
618
+ });
619
+
620
+ // Record completion
621
+ recordNeuralData({
622
+ type: 'simultaneous_intent_task',
623
+ task: currentTask,
624
+ duration: Date.now() - taskStartTime,
625
+ active_targets: activeTargets.map(t => t.position.toArray()),
626
+ simultaneous_actions: countSimultaneousActions()
627
+ });
628
+
629
+ // Next task
630
+ setTimeout(() => {
631
+ nextSimultaneousIntentTask();
632
+ }, 1000);
633
+ }, 5000);
634
+ }
635
+
636
+ function startVisualEvokedTraining() {
637
+ showTaskIndicator(
638
+ "VISUAL EVOKED POTENTIALS",
639
+ "Focus on the flashing targets. This trains c-VEP decoding for non-verbal communication.",
640
+ "Starting visual stimulation..."
641
+ );
642
+
643
+ // Start visual stimuli
644
+ startVisualStimuli();
645
+
646
+ // Run for 60 seconds
647
+ setTimeout(() => {
648
+ stopVisualStimuli();
649
+ completeTraining();
650
+ }, 60000);
651
+ }
652
+
653
+ function startHandwritingIntentTraining() {
654
+ totalTasks = CONFIG.HANDWRITING_SAMPLES;
655
+ currentTask = 0;
656
+
657
+ showTaskIndicator(
658
+ "HANDWRITING INTENT TRAINING",
659
+ "Trace the letters with precision aiming. This trains fine motor control decoding.",
660
+ "Starting letter tracing..."
661
+ );
662
+
663
+ setTimeout(() => {
664
+ nextHandwritingTask();
665
+ }, 2000);
666
+ }
667
+
668
+ function nextHandwritingTask() {
669
+ if (currentTask >= totalTasks) {
670
+ completeTraining();
671
+ return;
672
+ }
673
+
674
+ currentTask++;
675
+
676
+ const letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
677
+ const letter = letters[Math.floor(Math.random() * letters.length)];
678
+
679
+ showHandwritingPrompt(letter);
680
+
681
+ // Record handwriting session
682
+ const startTime = Date.now();
683
+ const handwritingSession = {
684
+ letter: letter,
685
+ start_time: startTime,
686
+ samples: []
687
+ };
688
+
689
+ // Sample mouse movements for 3 seconds
690
+ const sampleInterval = setInterval(() => {
691
+ handwritingSession.samples.push({
692
+ timestamp: Date.now(),
693
+ position: [mouse.x, mouse.y],
694
+ velocity: [mouse.dx, mouse.dy],
695
+ pressure: Math.random() // Simulated pressure data
696
+ });
697
+ }, 16); // ~60Hz sampling
698
+
699
+ setTimeout(() => {
700
+ clearInterval(sampleInterval);
701
+ handwritingSession.end_time = Date.now();
702
+ handwritingSession.duration = Date.now() - startTime;
703
+ handwritingSamples.push(handwritingSession);
704
+
705
+ hideHandwritingPrompt();
706
+
707
+ // Record completion
708
+ recordNeuralData({
709
+ type: 'handwriting_sample',
710
+ sample: currentTask,
711
+ letter: letter,
712
+ duration: handwritingSession.duration,
713
+ samples_count: handwritingSession.samples.length
714
+ });
715
+
716
+ // Next task
717
+ setTimeout(() => {
718
+ nextHandwritingTask();
719
+ }, 1000);
720
+ }, 3000);
721
+ }
722
+
723
+ function startFullSpectrumTraining() {
724
+ // Run all training modes sequentially
725
+ const modes = [
726
+ { mode: 'motor_imagery', duration: 30000 },
727
+ { mode: 'simultaneous_intent', duration: 30000 },
728
+ { mode: 'visual_evoked', duration: 30000 },
729
+ { mode: 'handwriting_intent', duration: 30000 }
730
+ ];
731
+
732
+ let currentModeIndex = 0;
733
+
734
+ function runNextMode() {
735
+ if (currentModeIndex >= modes.length) {
736
+ completeTraining();
737
+ return;
738
+ }
739
+
740
+ const mode = modes[currentModeIndex];
741
+ currentModeIndex++;
742
+
743
+ showTaskIndicator(
744
+ `FULL SPECTRUM TRAINING - ${mode.mode.toUpperCase()}`,
745
+ `Complete the ${mode.mode.replace('_', ' ')} task.`,
746
+ "Starting in 3 seconds..."
747
+ );
748
+
749
+ setTimeout(() => {
750
+ // Run the mode for specified duration
751
+ const startTime = Date.now();
752
+
753
+ // Set up mode-specific tasks
754
+ switch(mode.mode) {
755
+ case 'motor_imagery':
756
+ // Activate random targets
757
+ const interval = setInterval(() => {
758
+ const target = targets[Math.floor(Math.random() * targets.length)];
759
+ target.material.color.setHex(0xffff00);
760
+ setTimeout(() => {
761
+ target.material.color.setHex(0x00ff00);
762
+ }, 500);
763
+ }, 1000);
764
+
765
+ setTimeout(() => {
766
+ clearInterval(interval);
767
+ runNextMode();
768
+ }, mode.duration);
769
+ break;
770
+
771
+ case 'simultaneous_intent':
772
+ // Keep targets active
773
+ targets.forEach(t => {
774
+ t.material.color.setHex(0xff0000);
775
+ t.userData.active = true;
776
+ });
777
+
778
+ setTimeout(() => {
779
+ targets.forEach(t => {
780
+ t.material.color.setHex(0x00ff00);
781
+ t.userData.active = false;
782
+ });
783
+ runNextMode();
784
+ }, mode.duration);
785
+ break;
786
+
787
+ case 'visual_evoked':
788
+ startVisualStimuli();
789
+ setTimeout(() => {
790
+ stopVisualStimuli();
791
+ runNextMode();
792
+ }, mode.duration);
793
+ break;
794
+
795
+ case 'handwriting_intent':
796
+ // Show random letters
797
+ const letters = 'ABCD';
798
+ let letterIndex = 0;
799
+
800
+ const letterInterval = setInterval(() => {
801
+ if (letterIndex >= letters.length) {
802
+ clearInterval(letterInterval);
803
+ runNextMode();
804
+ return;
805
+ }
806
+
807
+ showHandwritingPrompt(letters[letterIndex]);
808
+ letterIndex++;
809
+
810
+ setTimeout(() => {
811
+ hideHandwritingPrompt();
812
+ }, 2000);
813
+ }, 3000);
814
+ break;
815
+ }
816
+ }, 3000);
817
+ }
818
+
819
+ runNextMode();
820
+ }
821
+
822
+ // ========== DATA COLLECTION ==========
823
+ function startDataCollection() {
824
+ // High-frequency data collection (1000Hz simulated)
825
+ setInterval(() => {
826
+ collectNeuralData();
827
+ }, 1); // 1ms interval ≈ 1000Hz
828
+
829
+ // Intent stream collection (60Hz)
830
+ setInterval(() => {
831
+ collectIntentStream();
832
+ }, 16.67); // 60Hz
833
+
834
+ // Performance metrics (1Hz)
835
+ setInterval(() => {
836
+ updatePerformanceMetrics();
837
+ }, 1000);
838
+ }
839
+
840
+ function collectNeuralData() {
841
+ // Simulate neural channel data
842
+ const neuralSample = {
843
+ timestamp: Date.now(),
844
+ session_time: Date.now() - sessionStartTime,
845
+ channels: {}
846
+ };
847
+
848
+ for (let i = 0; i < CONFIG.NEURAL_CHANNELS; i++) {
849
+ // Generate simulated neural signal
850
+ const baseSignal = Math.sin(Date.now() / 1000 * (i + 1)) * CONFIG.SIGNAL_STRENGTH;
851
+ const noise = (Math.random() - 0.5) * 2 * CONFIG.NOISE_LEVEL;
852
+ const intentModulation = calculateIntentModulation(i);
853
+
854
+ neuralSample.channels[`channel_${i}`] = baseSignal + noise + intentModulation;
855
+ }
856
+
857
+ // Add intent context
858
+ neuralSample.intent_context = {
859
+ mouse_movement: [mouse.dx, mouse.dy],
860
+ keyboard_state: { ...keyboard },
861
+ camera_rotation: [camera.rotation.x, camera.rotation.y, camera.rotation.z],
862
+ active_targets: targets.filter(t => t.userData.active).length
863
+ };
864
+
865
+ neuralData.push(neuralSample);
866
+
867
+ // Update UI
868
+ updateNeuralChannels(neuralSample.channels);
869
+ }
870
+
871
+ function collectIntentStream() {
872
+ const intentSample = {
873
+ timestamp: Date.now(),
874
+ session_time: Date.now() - sessionStartTime,
875
+ mouse: {
876
+ position: [mouse.x, mouse.y],
877
+ delta: [mouse.dx, mouse.dy],
878
+ buttons: keyboard['mouse'] ? 1 : 0
879
+ },
880
+ keyboard: { ...keyboard },
881
+ camera: {
882
+ position: camera.position.toArray(),
883
+ rotation: [camera.rotation.x, camera.rotation.y, camera.rotation.z]
884
+ },
885
+ environment: {
886
+ active_targets: targets.filter(t => t.userData.active).map(t => ({
887
+ position: t.position.toArray(),
888
+ distance: t.position.distanceTo(camera.position)
889
+ })),
890
+ fps: currentFps
891
+ }
892
+ };
893
+
894
+ intentStream.push(intentSample);
895
+
896
+ // Update data stream display
897
+ updateDataStream(intentSample);
898
+ }
899
+
900
+ function recordIntent(intent) {
901
+ intentStream.push({
902
+ ...intent,
903
+ session_time: Date.now() - sessionStartTime,
904
+ neural_context: getCurrentNeuralContext()
905
+ });
906
+ }
907
+
908
+ function recordNeuralData(data) {
909
+ neuralData.push({
910
+ ...data,
911
+ timestamp: Date.now(),
912
+ session_time: Date.now() - sessionStartTime,
913
+ intent_context: getCurrentIntentContext(),
914
+ neural_context: getCurrentNeuralContext()
915
+ });
916
+ }
917
+
918
+ // ========== UI FUNCTIONS ==========
919
+ function initNeuralUI() {
920
+ // Create neural channels
921
+ const channelsDiv = document.getElementById('neuralChannels');
922
+ channelsDiv.innerHTML = '';
923
+
924
+ for (let i = 0; i < 8; i++) { // Show first 8 channels
925
+ const channel = document.createElement('div');
926
+ channel.className = 'neural-channel';
927
+ channel.innerHTML = `
928
+ <div class="channel-label">CH ${i}</div>
929
+ <div class="channel-value" id="neuralChannel${i}">0.00</div>
930
+ `;
931
+ channelsDiv.appendChild(channel);
932
+ }
933
+
934
+ // Create intent grid
935
+ const intentGrid = document.getElementById('intentGrid');
936
+ intentGrid.innerHTML = '';
937
+
938
+ const intents = ['MOVE', 'AIM', 'FIRE', 'JUMP', 'RELOAD', 'CROUCH'];
939
+ intents.forEach(intent => {
940
+ const item = document.createElement('div');
941
+ item.className = 'intent-item';
942
+ item.id = `intent-${intent.toLowerCase()}`;
943
+ item.innerHTML = `
944
+ <div class="intent-label">${intent}</div>
945
+ <div class="intent-value">0%</div>
946
+ `;
947
+ intentGrid.appendChild(item);
948
+ });
949
+ }
950
+
951
+ function initDataStream() {
952
+ dataStreamInterval = setInterval(() => {
953
+ if (intentStream.length > 0) {
954
+ const sample = intentStream[intentStream.length - 1];
955
+ const line = `[${sample.timestamp}] INTENT: ${JSON.stringify(sample.mouse.delta)}<br>`;
956
+ const stream = document.getElementById('dataStream');
957
+ stream.innerHTML = line + stream.innerHTML;
958
+
959
+ if (stream.children.length > 20) {
960
+ stream.removeChild(stream.lastChild);
961
+ }
962
+ }
963
+ }, 100);
964
+ }
965
+
966
+ function updateNeuralChannels(channels) {
967
+ for (let i = 0; i < 8; i++) {
968
+ const value = channels[`channel_${i}`];
969
+ const element = document.getElementById(`neuralChannel${i}`);
970
+ if (element && value !== undefined) {
971
+ element.textContent = value.toFixed(2);
972
+
973
+ // Color based on activity
974
+ const absValue = Math.abs(value);
975
+ if (absValue > 0.5) {
976
+ element.style.color = '#ff0';
977
+ } else if (absValue > 0.2) {
978
+ element.style.color = '#0f0';
979
+ } else {
980
+ element.style.color = '#0a0';
981
+ }
982
+ }
983
+ }
984
+ }
985
+
986
+ function updatePerformanceMetrics() {
987
+ // Calculate bandwidth (samples per second)
988
+ const bandwidth = neuralData.filter(d =>
989
+ Date.now() - d.timestamp < 1000
990
+ ).length;
991
+
992
+ document.getElementById('bandwidthValue').textContent = `${bandwidth} Hz`;
993
+
994
+ // Calculate accuracy
995
+ const hits = neuralData.filter(d =>
996
+ d.type === 'motor_imagery_trial' && d.accuracy > 0.7
997
+ ).length;
998
+ const totalTrials = neuralData.filter(d =>
999
+ d.type === 'motor_imagery_trial'
1000
+ ).length;
1001
+
1002
+ const accuracy = totalTrials > 0 ? Math.round((hits / totalTrials) * 100) : 0;
1003
+ document.getElementById('accuracyValue').textContent = `${accuracy}%`;
1004
+
1005
+ // Calculate latency (simulated)
1006
+ const latency = Math.random() * 50 + 50; // 50-100ms
1007
+ document.getElementById('latencyValue').textContent = `${latency.toFixed(1)} ms`;
1008
+
1009
+ // Count simultaneous intents
1010
+ const simultaneous = keyboard['w'] + keyboard['a'] + keyboard['s'] + keyboard['d'] + keyboard['mouse'];
1011
+ document.getElementById('intentsValue').textContent = simultaneous;
1012
+
1013
+ // Update intent indicators
1014
+ updateIntentIndicators();
1015
+ }
1016
+
1017
+ function updateIntentIndicators() {
1018
+ const intents = ['move', 'aim', 'fire', 'jump', 'reload', 'crouch'];
1019
+ intents.forEach(intent => {
1020
+ const element = document.getElementById(`intent-${intent}`);
1021
+ if (element) {
1022
+ // Simulate intent detection
1023
+ let value = 0;
1024
+ switch(intent) {
1025
+ case 'move':
1026
+ value = (keyboard['w'] || keyboard['a'] || keyboard['s'] || keyboard['d']) ? 100 : 0;
1027
+ break;
1028
+ case 'aim':
1029
+ value = Math.abs(mouse.dx) > 1 || Math.abs(mouse.dy) > 1 ? 80 : 20;
1030
+ break;
1031
+ case 'fire':
1032
+ value = keyboard['mouse'] ? 100 : 0;
1033
+ break;
1034
+ case 'jump':
1035
+ value = keyboard[' '] ? 100 : 0;
1036
+ break;
1037
+ }
1038
+
1039
+ element.querySelector('.intent-value').textContent = `${value}%`;
1040
+
1041
+ if (value > 50) {
1042
+ element.classList.add('intent-active');
1043
+ } else {
1044
+ element.classList.remove('intent-active');
1045
+ }
1046
+ }
1047
+ });
1048
+ }
1049
+
1050
+ function updateDataStream(sample) {
1051
+ const stream = document.getElementById('dataStream');
1052
+ const time = new Date(sample.timestamp).toISOString().substr(11, 12);
1053
+ const line = `[${time}] INTENT: Δ(${sample.mouse.delta[0].toFixed(2)}, ${sample.mouse.delta[1].toFixed(2)})<br>`;
1054
+ stream.innerHTML = line + stream.innerHTML;
1055
+
1056
+ // Keep only last 20 lines
1057
+ const lines = stream.innerHTML.split('<br>');
1058
+ if (lines.length > 20) {
1059
+ stream.innerHTML = lines.slice(0, 20).join('<br>');
1060
+ }
1061
+ }
1062
+
1063
+ // ========== VISUAL STIMULI ==========
1064
+ function startVisualStimuli() {
1065
+ // Position stimuli around screen
1066
+ const positions = [
1067
+ { x: '20%', y: '20%' },
1068
+ { x: '80%', y: '20%' },
1069
+ { x: '50%', y: '50%' },
1070
+ { x: '20%', y: '80%' },
1071
+ { x: '80%', y: '80%' }
1072
+ ];
1073
+
1074
+ CONFIG.VISUAL_STIMULI_FREQUENCIES.forEach((freq, index) => {
1075
+ const stim = document.getElementById(`vstim-${index}`);
1076
+ if (stim) {
1077
+ stim.style.left = positions[index].x;
1078
+ stim.style.top = positions[index].y;
1079
+ stim.classList.add('vstim-active');
1080
+
1081
+ // Flash at specified frequency
1082
+ setInterval(() => {
1083
+ stim.style.opacity = stim.style.opacity === '1' ? '0.3' : '1';
1084
+ }, 1000 / freq);
1085
+
1086
+ // Record stimulus events
1087
+ setInterval(() => {
1088
+ recordNeuralData({
1089
+ type: 'visual_stimulus',
1090
+ stimulus_id: index,
1091
+ frequency: freq,
1092
+ position: positions[index],
1093
+ timestamp: Date.now()
1094
+ });
1095
+ }, 1000);
1096
+ }
1097
+ });
1098
+ }
1099
+
1100
+ function stopVisualStimuli() {
1101
+ for (let i = 0; i < 5; i++) {
1102
+ const stim = document.getElementById(`vstim-${i}`);
1103
+ if (stim) {
1104
+ stim.classList.remove('vstim-active');
1105
+ stim.style.opacity = '0';
1106
+ }
1107
+ }
1108
+ }
1109
+
1110
+ function toggleVisualStimuli() {
1111
+ const anyActive = document.querySelector('.vstim-active');
1112
+ if (anyActive) {
1113
+ stopVisualStimuli();
1114
+ } else {
1115
+ startVisualStimuli();
1116
+ }
1117
+ }
1118
+
1119
+ // ========== HANDWRITING TRAINING ==========
1120
+ function showHandwritingPrompt(letter) {
1121
+ const prompt = document.getElementById('handwritingPrompt');
1122
+ const canvas = document.getElementById('handwritingCanvas');
1123
+
1124
+ prompt.textContent = `Trace the letter: ${letter}`;
1125
+ prompt.style.display = 'block';
1126
+ canvas.style.display = 'block';
1127
+
1128
+ // Clear canvas
1129
+ const ctx = canvas.getContext('2d');
1130
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
1131
+ ctx.strokeStyle = '#0f0';
1132
+ ctx.lineWidth = 3;
1133
+ ctx.font = '200px Courier New';
1134
+ ctx.fillStyle = 'rgba(0, 255, 0, 0.1)';
1135
+ ctx.textAlign = 'center';
1136
+ ctx.textBaseline = 'middle';
1137
+ ctx.fillText(letter, canvas.width/2, canvas.height/2);
1138
+
1139
+ // Start drawing
1140
+ let drawing = false;
1141
+
1142
+ canvas.onmousedown = () => {
1143
+ drawing = true;
1144
+ ctx.beginPath();
1145
+ };
1146
+
1147
+ canvas.onmousemove = (e) => {
1148
+ if (!drawing) return;
1149
+
1150
+ const rect = canvas.getBoundingClientRect();
1151
+ const x = e.clientX - rect.left;
1152
+ const y = e.clientY - rect.top;
1153
+
1154
+ ctx.lineTo(x, y);
1155
+ ctx.stroke();
1156
+ };
1157
+
1158
+ canvas.onmouseup = () => {
1159
+ drawing = false;
1160
+ };
1161
+ }
1162
+
1163
+ function hideHandwritingPrompt() {
1164
+ document.getElementById('handwritingPrompt').style.display = 'none';
1165
+ document.getElementById('handwritingCanvas').style.display = 'none';
1166
+ }
1167
+
1168
+ // ========== TASK MANAGEMENT ==========
1169
+ function showTaskIndicator(title, description, status) {
1170
+ document.getElementById('taskTitle').textContent = title;
1171
+ document.getElementById('taskDescription').textContent = description;
1172
+ document.getElementById('taskStatus').textContent = status;
1173
+ document.getElementById('taskProgress').style.width = '0%';
1174
+ document.getElementById('taskIndicator').style.display = 'block';
1175
+ }
1176
+
1177
+ function updateTaskIndicator(status, description) {
1178
+ document.getElementById('taskStatus').textContent = status;
1179
+ if (description) {
1180
+ document.getElementById('taskDescription').textContent = description;
1181
+ }
1182
+
1183
+ // Update progress
1184
+ const progress = totalTasks > 0 ? (currentTask / totalTasks) * 100 :
1185
+ (Date.now() - taskStartTime) / 60000 * 100; // For time-based tasks
1186
+ document.getElementById('taskProgress').style.width = `${Math.min(100, progress)}%`;
1187
+ }
1188
+
1189
+ function hideTaskIndicator() {
1190
+ document.getElementById('taskIndicator').style.display = 'none';
1191
+ }
1192
+
1193
+ function completeTraining() {
1194
+ hideTaskIndicator();
1195
+ stopVisualStimuli();
1196
+
1197
+ // Calculate session statistics
1198
+ const sessionDuration = Date.now() - sessionStartTime;
1199
+ const totalSamples = neuralData.length + intentStream.length;
1200
+ const bandwidth = Math.round(totalSamples / (sessionDuration / 1000));
1201
+
1202
+ // Show results
1203
+ document.getElementById('experimentResults').innerHTML = `
1204
+ <div class="result-item">
1205
+ <div class="result-label">Training Mode</div>
1206
+ <div class="result-value">${currentMode.replace('_', ' ').toUpperCase()}</div>
1207
+ </div>
1208
+ <div class="result-item">
1209
+ <div class="result-label">Duration</div>
1210
+ <div class="result-value">${Math.round(sessionDuration / 1000)}s</div>
1211
+ </div>
1212
+ <div class="result-item">
1213
+ <div class="result-label">Samples Collected</div>
1214
+ <div class="result-value">${totalSamples.toLocaleString()}</div>
1215
+ </div>
1216
+ <div class="result-item">
1217
+ <div class="result-label">Data Bandwidth</div>
1218
+ <div class="result-value">${bandwidth} Hz</div>
1219
+ </div>
1220
+ <div class="result-item">
1221
+ <div class="result-label">Neural Channels</div>
1222
+ <div class="result-value">${CONFIG.NEURAL_CHANNELS}</div>
1223
+ </div>
1224
+ <div class="result-item">
1225
+ <div class="result-label">File Size</div>
1226
+ <div class="result-value">${Math.round((totalSamples * 0.1) / 1024)} KB</div>
1227
+ </div>
1228
+ `;
1229
+
1230
+ document.getElementById('experimentComplete').style.display = 'flex';
1231
+ }
1232
+
1233
+ // ========== EXPORT FUNCTIONALITY ==========
1234
+ async function exportDataset() {
1235
+ const sessionId = `bci_fps_${currentMode}_${Date.now()}`;
1236
+
1237
+ // Create dataset metadata following Hugging Face format
1238
+ const metadata = {
1239
+ dataset_info: {
1240
+ name: `BCI-FPS_${currentMode.toUpperCase()}_Dataset`,
1241
+ description: `High-bandwidth neural training data for BCI research. Mode: ${currentMode}`,
1242
+ version: "1.0.0",
1243
+ license: "MIT",
1244
+ citation: `@misc{bci_fps_${currentMode}_2024,\n title={BCI-FPS ${currentMode} Training Dataset},\n author={Neuralink Research},\n year={2024},\n note={High-frequency intent decoding data for brain-computer interface development}\n}`
1245
+ },
1246
+
1247
+ session_info: {
1248
+ session_id: sessionId,
1249
+ mode: currentMode,
1250
+ start_time: new Date(sessionStartTime).toISOString(),
1251
+ duration_ms: Date.now() - sessionStartTime,
1252
+ sampling_rate_hz: CONFIG.SAMPLING_RATE,
1253
+ neural_channels: CONFIG.NEURAL_CHANNELS
1254
+ },
1255
+
1256
+ data_schema: {
1257
+ neural_data: {
1258
+ timestamp: "UNIX timestamp in milliseconds",
1259
+ session_time: "Time since session start in milliseconds",
1260
+ channels: "Object mapping channel names to neural signal values",
1261
+ intent_context: "Contextual information about user intent"
1262
+ },
1263
+ intent_stream: {
1264
+ timestamp: "UNIX timestamp in milliseconds",
1265
+ mouse: "Mouse position and movement data",
1266
+ keyboard: "Keyboard state",
1267
+ camera: "Camera position and rotation",
1268
+ environment: "Game environment state"
1269
+ },
1270
+ handwriting_samples: {
1271
+ letter: "Letter being traced",
1272
+ samples: "Array of handwriting samples with position and pressure data"
1273
+ }
1274
+ },
1275
+
1276
+ research_applications: [
1277
+ "Motor imagery decoding for prosthetic control",
1278
+ "Simultaneous intent decoding for fluid BCI interfaces",
1279
+ "Visual evoked potential (c-VEP) calibration",
1280
+ "Handwriting intent recognition for text entry",
1281
+ "Neural network training for brain-computer interfaces"
1282
+ ],
1283
+
1284
+ huggingface: {
1285
+ compatible: true,
1286
+ task_categories: ["brain-computer-interface", "neural-decoding", "human-computer-interaction"],
1287
+ task_ids: ["motor-imagery", "intent-decoding", "visual-evoked-potentials", "handwriting-recognition"],
1288
+ language: ["en"],
1289
+ size_categories: ["10K<n<100K"]
1290
+ }
1291
+ };
1292
+
1293
+ // Create ZIP archive
1294
+ const zip = new JSZip();
1295
+
1296
+ // Add data files in Apache Arrow compatible format
1297
+ // For now using JSONL, but could be converted to Parquet
1298
+ zip.file("neural_data.jsonl",
1299
+ neuralData.map(d => JSON.stringify(d)).join('\n'));
1300
+
1301
+ zip.file("intent_stream.jsonl",
1302
+ intentStream.map(d => JSON.stringify(d)).join('\n'));
1303
+
1304
+ if (handwritingSamples.length > 0) {
1305
+ zip.file("handwriting_samples.json",
1306
+ JSON.stringify(handwritingSamples, null, 2));
1307
+ }
1308
+
1309
+ zip.file("metadata.json", JSON.stringify(metadata, null, 2));
1310
+
1311
+ // Create Hugging Face dataset card
1312
+ const datasetCard = `---
1313
+ language:
1314
+ - en
1315
+ tags:
1316
+ - brain-computer-interface
1317
+ - neural-decoding
1318
+ - motor-imagery
1319
+ - human-computer-interaction
1320
+ - neuralink
1321
+ task_categories:
1322
+ - brain-computer-interface
1323
+ task_ids:
1324
+ - motor-imagery
1325
+ - intent-decoding
1326
+ - visual-evoked-potentials
1327
+ - handwriting-recognition
1328
+ size_categories:
1329
+ - 10K<n<100K
1330
+ ---
1331
+
1332
+ # Dataset Card for BCI-FPS ${currentMode.toUpperCase()} Dataset
1333
+
1334
+ ## Dataset Description
1335
+
1336
+ This dataset contains high-bandwidth neural training data collected from BCI-FPS, a specialized training platform for brain-computer interface research.
1337
+
1338
+ ### Dataset Summary
1339
+
1340
+ - **Training Mode**: ${currentMode.replace('_', ' ').toUpperCase()}
1341
+ - **Session ID**: ${sessionId}
1342
+ - **Duration**: ${Math.round((Date.now() - sessionStartTime) / 1000)} seconds
1343
+ - **Sampling Rate**: ${CONFIG.SAMPLING_RATE} Hz
1344
+ - **Neural Channels**: ${CONFIG.NEURAL_CHANNELS}
1345
+ - **Data Points**: ${(neuralData.length + intentStream.length).toLocaleString()}
1346
+
1347
+ ### Supported Tasks
1348
+
1349
+ - **${getTaskDescription(currentMode)}**
1350
+ - **Neural Decoding**: Training models to decode user intent from neural signals
1351
+ - **BCI Calibration**: Providing ground truth data for BCI system calibration
1352
+ - **Disability Research**: Supporting development of assistive technologies
1353
+
1354
+ ### Languages
1355
+
1356
+ English (interface and documentation)
1357
+
1358
+ ## Dataset Structure
1359
+
1360
+ ### Data Instances
1361
+
1362
+ \`\`\`json
1363
+ ${JSON.stringify(neuralData[0] || {}, null, 2)}
1364
+ \`\`\`
1365
+
1366
+ ### Data Fields
1367
+
1368
+ See \`metadata.json\` for complete schema documentation.
1369
+
1370
+ ## Dataset Creation
1371
+
1372
+ ### Source Data
1373
+
1374
+ - **Platform**: Web-based BCI-FPS Training Environment
1375
+ - **Sampling Rate**: ${CONFIG.SAMPLING_RATE} Hz
1376
+ - **Collection Method**: Real-time telemetry during BCI training tasks
1377
+ - **Neural Simulation**: Synthetic neural data representing ideal BCI signals
1378
+
1379
+ ### Annotations
1380
+
1381
+ - **Annotation process**: Automatic intent labeling during gameplay
1382
+ - **Annotation types**: Motor imagery, visual stimuli, handwriting intent
1383
+ - **Who annotated**: System automatically labels based on game state
1384
+
1385
+ ### Personal and Sensitive Information
1386
+
1387
+ No personal information is collected. All data is synthetic/anonymous.
1388
+
1389
+ ## Considerations for Using the Data
1390
+
1391
+ ### Social Impact
1392
+
1393
+ This dataset enables research in:
1394
+ - Neuralink-style brain-computer interfaces
1395
+ - Assistive technologies for disabled individuals
1396
+ - Human-AI interaction systems
1397
+ - Neural decoding algorithms
1398
+
1399
+ ### Discussion of Biases
1400
+
1401
+ Synthetic neural data may not perfectly represent biological signals. Results should be validated with real neural recordings.
1402
+
1403
+ ### Other Known Limitations
1404
+
1405
+ - Simulated neural signals
1406
+ - Idealized game environment
1407
+ - Limited to specific training tasks
1408
+
1409
+ ## Additional Information
1410
+
1411
+ ### Dataset Curators
1412
+
1413
+ BCI-FPS Research Team
1414
+
1415
+ ### Licensing Information
1416
+
1417
+ MIT License
1418
+
1419
+ ### Citation Information
1420
+
1421
+ \`\`\`bibtex
1422
+ @misc{bci_fps_${currentMode}_2024,
1423
+ title={BCI-FPS ${currentMode} Training Dataset},
1424
+ author={Neuralink Research},
1425
+ year={2024},
1426
+ note={High-frequency intent decoding data for brain-computer interface development}
1427
+ }
1428
+ \`\`\`
1429
+ `;
1430
+
1431
+ zip.file("README.md", datasetCard);
1432
+
1433
+ // Generate Python loading script
1434
+ const loadScript = `import json
1435
+ import pandas as pd
1436
+ from datasets import Dataset, DatasetDict
1437
+
1438
+ def load_bci_fps_dataset(data_dir):
1439
+ """
1440
+ Load BCI-FPS dataset for Hugging Face.
1441
+
1442
+ Args:
1443
+ data_dir (str): Path to dataset directory
1444
+
1445
+ Returns:
1446
+ DatasetDict: Hugging Face dataset
1447
+ """
1448
+ # Load neural data
1449
+ neural_data = []
1450
+ with open(f"{data_dir}/neural_data.jsonl", 'r') as f:
1451
+ for line in f:
1452
+ if line.strip():
1453
+ neural_data.append(json.loads(line))
1454
+
1455
+ # Load intent stream
1456
+ intent_stream = []
1457
+ with open(f"{data_dir}/intent_stream.jsonl", 'r') as f:
1458
+ for line in f:
1459
+ if line.strip():
1460
+ intent_stream.append(json.loads(line))
1461
+
1462
+ # Create datasets
1463
+ datasets = {
1464
+ "neural_data": Dataset.from_list(neural_data),
1465
+ "intent_stream": Dataset.from_list(intent_stream)
1466
+ }
1467
+
1468
+ # Load handwriting samples if exists
1469
+ try:
1470
+ with open(f"{data_dir}/handwriting_samples.json", 'r') as f:
1471
+ handwriting = json.load(f)
1472
+ datasets["handwriting"] = Dataset.from_list(handwriting)
1473
+ except:
1474
+ pass
1475
+
1476
+ # Load metadata
1477
+ with open(f"{data_dir}/metadata.json", 'r') as f:
1478
+ metadata = json.load(f)
1479
+
1480
+ dataset_dict = DatasetDict(datasets)
1481
+ dataset_dict.info.metadata = metadata
1482
+
1483
+ return dataset_dict
1484
+
1485
+ # Example usage for Neuralink research
1486
+ if __name__ == "__main__":
1487
+ dataset = load_bci_fps_dataset("./bci_data")
1488
+
1489
+ print(f"Dataset keys: {list(dataset.keys())}")
1490
+ print(f"Neural data samples: {len(dataset['neural_data'])}")
1491
+ print(f"Intent stream samples: {len(dataset['intent_stream'])}")
1492
+
1493
+ # Example: Extract motor imagery trials
1494
+ motor_trials = [d for d in dataset['neural_data'] if d.get('type') == 'motor_imagery_trial']
1495
+ print(f"Motor imagery trials: {len(motor_trials)}")
1496
+ `;
1497
+
1498
+ zip.file("load_dataset.py", loadScript);
1499
+
1500
+ // Generate and download ZIP
1501
+ const content = await zip.generateAsync({
1502
+ type: "blob",
1503
+ compression: "DEFLATE",
1504
+ compressionOptions: { level: 6 }
1505
+ });
1506
+
1507
+ saveAs(content, `${sessionId}.zip`);
1508
+
1509
+ // Show success message
1510
+ alert(`Dataset exported successfully!\n\nFile: ${sessionId}.zip\nSize: ${(content.size / (1024 * 1024)).toFixed(2)} MB\n\nReady for upload to Hugging Face.`);
1511
+ }
1512
+
1513
+ function showDataPreview() {
1514
+ const preview = `Dataset Preview:
1515
+
1516
+ Training Mode: ${currentMode}
1517
+ Session Duration: ${Math.round((Date.now() - sessionStartTime) / 1000)}s
1518
+ Neural Samples: ${neuralData.length}
1519
+ Intent Samples: ${intentStream.length}
1520
+ Handwriting Samples: ${handwritingSamples.length}
1521
+ Total Data Points: ${neuralData.length + intentStream.length}
1522
+
1523
+ Latest Neural Sample:
1524
+ ${JSON.stringify(neuralData[neuralData.length - 1] || {}, null, 2)}
1525
+
1526
+ Latest Intent Sample:
1527
+ ${JSON.stringify(intentStream[intentStream.length - 1] || {}, null, 2)}`;
1528
+
1529
+ alert(preview);
1530
+ }
1531
+
1532
+ // ========== HELPER FUNCTIONS ==========
1533
+ function getTaskDescription(mode) {
1534
+ switch(mode) {
1535
+ case 'motor_imagery': return 'Motor Imagery Training for prosthetic control';
1536
+ case 'simultaneous_intent': return 'Simultaneous Intent Decoding for fluid BCI interfaces';
1537
+ case 'visual_evoked': return 'Visual Evoked Potentials for non-verbal communication';
1538
+ case 'handwriting_intent': return 'Handwriting Intent Recognition for text entry';
1539
+ case 'full_spectrum': return 'Full Spectrum BCI Training';
1540
+ default: return 'BCI Training';
1541
+ }
1542
+ }
1543
+
1544
+ function calculateAccuracy(target) {
1545
+ // Calculate aiming accuracy
1546
+ const targetDirection = new THREE.Vector3()
1547
+ .subVectors(target.position, camera.position)
1548
+ .normalize();
1549
+
1550
+ const aimDirection = new THREE.Vector3(0, 0, -1)
1551
+ .applyQuaternion(camera.quaternion);
1552
+
1553
+ const dot = targetDirection.dot(aimDirection);
1554
+ return Math.max(0, (dot + 1) / 2); // Convert to 0-1 range
1555
+ }
1556
+
1557
+ function countSimultaneousActions() {
1558
+ let count = 0;
1559
+ if (keyboard['w'] || keyboard['a'] || keyboard['s'] || keyboard['d']) count++;
1560
+ if (Math.abs(mouse.dx) > 1 || Math.abs(mouse.dy) > 1) count++;
1561
+ if (keyboard['mouse']) count++;
1562
+ if (keyboard[' ']) count++;
1563
+ return count;
1564
+ }
1565
+
1566
+ function calculateIntentModulation(channel) {
1567
+ // Simulate intent modulation on neural channels
1568
+ let modulation = 0;
1569
+
1570
+ // Movement intent affects low channels
1571
+ if (channel < 8 && (keyboard['w'] || keyboard['a'] || keyboard['s'] || keyboard['d'])) {
1572
+ modulation += 0.3;
1573
+ }
1574
+
1575
+ // Visual attention affects mid channels
1576
+ if (channel >= 8 && channel < 16 && targets.some(t => t.userData.active)) {
1577
+ modulation += 0.2;
1578
+ }
1579
+
1580
+ // Motor intent affects high channels
1581
+ if (channel >= 24 && keyboard['mouse']) {
1582
+ modulation += 0.4;
1583
+ }
1584
+
1585
+ return modulation;
1586
+ }
1587
+
1588
+ function getCurrentNeuralContext() {
1589
+ if (neuralData.length === 0) return null;
1590
+ return neuralData[neuralData.length - 1].channels;
1591
+ }
1592
+
1593
+ function getCurrentIntentContext() {
1594
+ if (intentStream.length === 0) return null;
1595
+ const last = intentStream[intentStream.length - 1];
1596
+ return {
1597
+ mouse: last.mouse,
1598
+ keyboard: last.keyboard,
1599
+ camera: last.camera
1600
+ };
1601
+ }
1602
+
1603
+ function getAimedTarget() {
1604
+ const raycaster = new THREE.Raycaster();
1605
+ raycaster.setFromCamera(new THREE.Vector2(0, 0), camera);
1606
+
1607
+ const intersects = raycaster.intersectObjects(targets);
1608
+ if (intersects.length > 0) {
1609
+ return {
1610
+ id: targets.indexOf(intersects[0].object),
1611
+ position: intersects[0].object.position.toArray(),
1612
+ distance: intersects[0].distance
1613
+ };
1614
+ }
1615
+ return null;
1616
+ }
1617
+
1618
+ // ========== GAME LOOP ==========
1619
+ function animate(time) {
1620
+ requestAnimationFrame(animate);
1621
+
1622
+ // Update FPS counter
1623
+ fpsCounter++;
1624
+ if (time - lastFpsTime > 1000) {
1625
+ currentFps = fpsCounter;
1626
+ fpsCounter = 0;
1627
+ lastFpsTime = time;
1628
+ }
1629
+
1630
+ // Handle player movement
1631
+ if (keyboard['w']) camera.translateZ(-0.1);
1632
+ if (keyboard['s']) camera.translateZ(0.1);
1633
+ if (keyboard['a']) camera.translateX(-0.1);
1634
+ if (keyboard['d']) camera.translateX(0.1);
1635
+ if (keyboard[' ']) camera.position.y += 0.1;
1636
+
1637
+ // Update target visuals
1638
+ targets.forEach(target => {
1639
+ if (target.userData.active) {
1640
+ target.material.emissiveIntensity = 0.5 + 0.5 * Math.sin(time * 0.005);
1641
+ }
1642
+ });
1643
+
1644
+ renderer.render(scene, camera);
1645
+ }
1646
+
1647
+ // ========== CONTROL FUNCTIONS ==========
1648
+ function pauseTraining() {
1649
+ // Toggle pause state
1650
+ // Implementation depends on specific requirements
1651
+ }
1652
+
1653
+ function skipTask() {
1654
+ // Skip current task
1655
+ // Implementation depends on current mode
1656
+ }
1657
+
1658
+ function endSession() {
1659
+ completeTraining();
1660
+ }
1661
+
1662
+ function restartTraining() {
1663
+ location.reload();
1664
+ }
1665
+
1666
+ function returnToMenu() {
1667
+ document.getElementById('experimentComplete').style.display = 'none';
1668
+ document.getElementById('mainMenu').style.display = 'flex';
1669
+ document.getElementById('gameContainer').style.display = 'none';
1670
+ document.getElementById('uiOverlay').style.display = 'none';
1671
+ document.getElementById('crosshair').style.display = 'none';
1672
+ document.getElementById('bciControlPanel').style.display = 'none';
1673
+
1674
+ // Clean up intervals
1675
+ clearInterval(neuralBackgroundInterval);
1676
+ clearInterval(dataStreamInterval);
1677
+ }
1678
+
1679
+ // ========== INITIALIZATION ==========
1680
+ window.onload = function() {
1681
+ initNeuralBackground();
1682
+ initThreeJS();
1683
+ };
1684
+ </script>
1685
+ </body>
1686
+ </html>