LimmeDev commited on
Commit
454ecdd
·
verified ·
1 Parent(s): 2ee37bb

Initial MANIFOLD upload - CS2 cheat detection training

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +14 -5
  2. app.py +296 -0
  3. configs/data/generator.yaml +20 -0
  4. configs/default.yaml +42 -0
  5. requirements.txt +9 -0
  6. src/manifold.egg-info/PKG-INFO +24 -0
  7. src/manifold.egg-info/SOURCES.txt +15 -0
  8. src/manifold.egg-info/dependency_links.txt +1 -0
  9. src/manifold.egg-info/requires.txt +20 -0
  10. src/manifold.egg-info/top_level.txt +1 -0
  11. src/manifold/__init__.py +13 -0
  12. src/manifold/__pycache__/__init__.cpython-312.pyc +0 -0
  13. src/manifold/__pycache__/config.cpython-312.pyc +0 -0
  14. src/manifold/config.py +94 -0
  15. src/manifold/data/__init__.py +77 -0
  16. src/manifold/data/__pycache__/__init__.cpython-312.pyc +0 -0
  17. src/manifold/data/__pycache__/cheats.cpython-312.pyc +0 -0
  18. src/manifold/data/__pycache__/dataset.cpython-312.pyc +0 -0
  19. src/manifold/data/__pycache__/generator.cpython-312.pyc +0 -0
  20. src/manifold/data/__pycache__/profiles.cpython-312.pyc +0 -0
  21. src/manifold/data/__pycache__/temporal.cpython-312.pyc +0 -0
  22. src/manifold/data/__pycache__/trajectories.cpython-312.pyc +0 -0
  23. src/manifold/data/cheats.py +219 -0
  24. src/manifold/data/dataset.py +110 -0
  25. src/manifold/data/generator.py +749 -0
  26. src/manifold/data/profiles.py +202 -0
  27. src/manifold/data/temporal.py +270 -0
  28. src/manifold/data/trajectories.py +308 -0
  29. src/manifold/evaluation/__init__.py +16 -0
  30. src/manifold/evaluation/__pycache__/__init__.cpython-312.pyc +0 -0
  31. src/manifold/evaluation/__pycache__/analysis.cpython-312.pyc +0 -0
  32. src/manifold/evaluation/__pycache__/metrics.cpython-312.pyc +0 -0
  33. src/manifold/evaluation/analysis.py +140 -0
  34. src/manifold/evaluation/metrics.py +141 -0
  35. src/manifold/models/__init__.py +5 -0
  36. src/manifold/models/__pycache__/__init__.cpython-312.pyc +0 -0
  37. src/manifold/models/__pycache__/manifold_lite.cpython-312.pyc +0 -0
  38. src/manifold/models/components/__init__.py +57 -0
  39. src/manifold/models/components/__pycache__/__init__.cpython-312.pyc +0 -0
  40. src/manifold/models/components/__pycache__/cca.cpython-312.pyc +0 -0
  41. src/manifold/models/components/__pycache__/hse.cpython-312.pyc +0 -0
  42. src/manifold/models/components/__pycache__/ihe.cpython-312.pyc +0 -0
  43. src/manifold/models/components/__pycache__/mdm.cpython-312.pyc +0 -0
  44. src/manifold/models/components/__pycache__/mpl.cpython-312.pyc +0 -0
  45. src/manifold/models/components/__pycache__/tiv.cpython-312.pyc +0 -0
  46. src/manifold/models/components/__pycache__/verdict.cpython-312.pyc +0 -0
  47. src/manifold/models/components/cca.py +161 -0
  48. src/manifold/models/components/hse.py +137 -0
  49. src/manifold/models/components/ihe.py +164 -0
  50. src/manifold/models/components/mdm.py +176 -0
README.md CHANGED
@@ -1,12 +1,21 @@
1
  ---
2
- title: Manifold Cs2 Training
3
- emoji: 📚
4
  colorFrom: purple
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 6.5.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: MANIFOLD CS2 Cheat Detection
3
+ emoji: 🎯
4
  colorFrom: purple
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
+ hardware: h200
11
  ---
12
 
13
+ # MANIFOLD - CS2 Cheat Detection Training
14
+
15
+ Train the MANIFOLD (Motor-Aware Neural Inference for Faithfulness Of Latent Dynamics) model for CS2 cheat detection.
16
+
17
+ ## Features
18
+ - Synthetic data generation with realistic player behavior
19
+ - 4-stage curriculum learning
20
+ - Evidential deep learning with uncertainty quantification
21
+ - Physics-constrained motor dynamics modeling
app.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """MANIFOLD Training Interface for Hugging Face Spaces."""
3
+
4
+ import gradio as gr
5
+ import torch
6
+ import numpy as np
7
+ import json
8
+ import time
9
+ from pathlib import Path
10
+ from threading import Thread
11
+ import queue
12
+
13
+ # Add src to path for manifold imports
14
+ import sys
15
+ sys.path.insert(0, str(Path(__file__).parent / "src"))
16
+
17
+ from manifold import MANIFOLDLite
18
+ from manifold.config import ModelConfig, TrainingConfig
19
+ from manifold.data.generator import SyntheticDataGenerator
20
+ from manifold.data.dataset import MANIFOLDDataset, create_dataloader
21
+ from manifold.training.trainer import MANIFOLDTrainer
22
+ from manifold.training.callbacks import Callback, ProgressCallback
23
+ from manifold.training.curriculum import CurriculumScheduler
24
+
25
+ # Global state
26
+ training_log = queue.Queue()
27
+ is_training = False
28
+ current_model = None
29
+
30
+
31
+ class GradioCallback(Callback):
32
+ """Callback that logs to Gradio interface."""
33
+
34
+ def on_epoch_end(self, trainer, epoch_info):
35
+ epoch = epoch_info["epoch"]
36
+ stage = epoch_info.get("stage", {}).get("stage_name", "")
37
+ train_loss = epoch_info.get("train", {}).get("loss", 0)
38
+ val_loss = epoch_info.get("val", {}).get("loss", 0)
39
+ val_acc = epoch_info.get("val", {}).get("accuracy", 0)
40
+ lr = epoch_info.get("lr", 0)
41
+
42
+ log_entry = {
43
+ "epoch": epoch + 1,
44
+ "stage": stage,
45
+ "train_loss": f"{train_loss:.4f}",
46
+ "val_loss": f"{val_loss:.4f}",
47
+ "val_acc": f"{val_acc:.4f}",
48
+ "lr": f"{lr:.2e}",
49
+ }
50
+ training_log.put(log_entry)
51
+
52
+
53
+ def get_device_info():
54
+ """Get GPU information."""
55
+ if torch.cuda.is_available():
56
+ gpu_name = torch.cuda.get_device_name(0)
57
+ gpu_mem = torch.cuda.get_device_properties(0).total_memory / 1e9
58
+ return f"GPU: {gpu_name} ({gpu_mem:.1f} GB)"
59
+ return "CPU only (no GPU detected)"
60
+
61
+
62
+ def generate_data(num_legit, num_cheaters, seed, progress=gr.Progress()):
63
+ """Generate synthetic training data."""
64
+ progress(0, desc="Initializing generator...")
65
+
66
+ generator = SyntheticDataGenerator(seed=seed, engagements_per_session=200)
67
+
68
+ all_features = []
69
+ all_labels = []
70
+ total = num_legit + num_cheaters
71
+
72
+ # Generate legit players
73
+ for i in progress.tqdm(range(num_legit), desc="Generating legit players"):
74
+ session = generator.generate_player(is_cheater=False)
75
+ all_features.append(session.to_tensor())
76
+ all_labels.append(0)
77
+
78
+ # Generate cheaters
79
+ for i in progress.tqdm(range(num_cheaters), desc="Generating cheaters"):
80
+ session = generator.generate_player(is_cheater=True)
81
+ all_features.append(session.to_tensor())
82
+ all_labels.append(2)
83
+
84
+ # Convert and shuffle
85
+ features = np.array(all_features)
86
+ labels = np.array(all_labels)
87
+
88
+ rng = np.random.default_rng(seed)
89
+ indices = rng.permutation(total)
90
+ features = features[indices]
91
+ labels = labels[indices]
92
+
93
+ # Split 90/10
94
+ split_idx = int(total * 0.9)
95
+
96
+ # Save to temp location
97
+ data_dir = Path("/tmp/manifold_data")
98
+ data_dir.mkdir(exist_ok=True)
99
+
100
+ np.save(data_dir / "train_features.npy", features[:split_idx])
101
+ np.save(data_dir / "train_labels.npy", labels[:split_idx])
102
+ np.save(data_dir / "val_features.npy", features[split_idx:])
103
+ np.save(data_dir / "val_labels.npy", labels[split_idx:])
104
+
105
+ return f"Generated {total} samples:\n- Train: {split_idx}\n- Val: {total - split_idx}\n- Features shape: {features.shape}"
106
+
107
+
108
+ def train_model(batch_size, learning_rate, max_epochs, progress=gr.Progress()):
109
+ """Train the MANIFOLD model."""
110
+ global is_training, current_model
111
+
112
+ if is_training:
113
+ return "Training already in progress!"
114
+
115
+ is_training = True
116
+ device = "cuda" if torch.cuda.is_available() else "cpu"
117
+
118
+ try:
119
+ # Load data
120
+ data_dir = Path("/tmp/manifold_data")
121
+ if not (data_dir / "train_features.npy").exists():
122
+ is_training = False
123
+ return "No data found! Generate data first."
124
+
125
+ progress(0.1, desc="Loading data...")
126
+ train_features = np.load(data_dir / "train_features.npy")
127
+ train_labels = np.load(data_dir / "train_labels.npy")
128
+ val_features = np.load(data_dir / "val_features.npy")
129
+ val_labels = np.load(data_dir / "val_labels.npy")
130
+
131
+ train_dataset = MANIFOLDDataset(data=train_features, labels=train_labels)
132
+ val_dataset = MANIFOLDDataset(data=val_features, labels=val_labels)
133
+
134
+ train_loader = create_dataloader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
135
+ val_loader = create_dataloader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
136
+
137
+ progress(0.2, desc="Creating model...")
138
+ model_config = ModelConfig()
139
+ model = MANIFOLDLite.from_config(model_config)
140
+
141
+ train_config = TrainingConfig(
142
+ batch_size=batch_size,
143
+ learning_rate=learning_rate,
144
+ max_epochs=max_epochs,
145
+ )
146
+
147
+ callbacks = [GradioCallback()]
148
+
149
+ trainer = MANIFOLDTrainer(
150
+ model=model,
151
+ config=train_config,
152
+ train_dataloader=train_loader,
153
+ val_dataloader=val_loader,
154
+ callbacks=callbacks,
155
+ )
156
+
157
+ progress(0.3, desc="Training...")
158
+
159
+ # Run training
160
+ history = trainer.train()
161
+
162
+ # Save model
163
+ save_path = Path("/tmp/manifold_model.pt")
164
+ trainer.save_checkpoint(save_path)
165
+ current_model = model
166
+
167
+ is_training = False
168
+ return f"Training complete!\nFinal val accuracy: {history['val'][-1].get('accuracy', 'N/A')}\nModel saved to {save_path}"
169
+
170
+ except Exception as e:
171
+ is_training = False
172
+ return f"Training failed: {str(e)}"
173
+
174
+
175
+ def get_training_logs():
176
+ """Get accumulated training logs."""
177
+ logs = []
178
+ while not training_log.empty():
179
+ try:
180
+ logs.append(training_log.get_nowait())
181
+ except:
182
+ break
183
+
184
+ if not logs:
185
+ return "No logs yet. Start training to see progress."
186
+
187
+ # Format as table
188
+ header = "| Epoch | Stage | Train Loss | Val Loss | Val Acc | LR |\n|-------|-------|------------|----------|---------|----|\n"
189
+ rows = "\n".join([
190
+ f"| {l['epoch']} | {l['stage'][:20]} | {l['train_loss']} | {l['val_loss']} | {l['val_acc']} | {l['lr']} |"
191
+ for l in logs
192
+ ])
193
+ return header + rows
194
+
195
+
196
+ def test_inference(num_samples):
197
+ """Test model inference."""
198
+ global current_model
199
+
200
+ if current_model is None:
201
+ # Try to load saved model
202
+ model_path = Path("/tmp/manifold_model.pt")
203
+ if model_path.exists():
204
+ current_model = MANIFOLDLite.from_config(ModelConfig())
205
+ ckpt = torch.load(model_path, map_location="cpu")
206
+ current_model.load_state_dict(ckpt["model_state_dict"])
207
+ else:
208
+ return "No model available! Train a model first."
209
+
210
+ device = "cuda" if torch.cuda.is_available() else "cpu"
211
+ current_model.to(device)
212
+ current_model.eval()
213
+
214
+ # Generate test samples
215
+ generator = SyntheticDataGenerator(seed=12345)
216
+
217
+ results = []
218
+ for i in range(num_samples):
219
+ is_cheater = i % 2 == 1 # Alternate
220
+ session = generator.generate_player(is_cheater=is_cheater)
221
+ features = torch.tensor(session.to_tensor(), dtype=torch.float32).unsqueeze(0).to(device)
222
+
223
+ with torch.no_grad():
224
+ outputs = current_model(features)
225
+
226
+ pred_class = outputs["predicted_class"].item()
227
+ uncertainty = outputs["uncertainty"].item()
228
+ probs = outputs["verdict_probs"][0].cpu().numpy()
229
+
230
+ class_names = ["Clean", "Suspicious", "Cheating"]
231
+ results.append({
232
+ "Sample": i + 1,
233
+ "Actual": "Cheater" if is_cheater else "Legit",
234
+ "Predicted": class_names[pred_class],
235
+ "Confidence": f"{probs.max():.2%}",
236
+ "Uncertainty": f"{uncertainty:.4f}",
237
+ "Correct": "✓" if (pred_class > 0) == is_cheater else "✗",
238
+ })
239
+
240
+ # Format as markdown table
241
+ header = "| # | Actual | Predicted | Confidence | Uncertainty | Correct |\n|---|--------|-----------|------------|-------------|---------|"
242
+ rows = "\n".join([
243
+ f"| {r['Sample']} | {r['Actual']} | {r['Predicted']} | {r['Confidence']} | {r['Uncertainty']} | {r['Correct']} |"
244
+ for r in results
245
+ ])
246
+
247
+ correct = sum(1 for r in results if r["Correct"] == "✓")
248
+ summary = f"\n\n**Accuracy: {correct}/{num_samples} ({100*correct/num_samples:.1f}%)**"
249
+
250
+ return header + "\n" + rows + summary
251
+
252
+
253
+ # Build Gradio interface
254
+ with gr.Blocks(title="MANIFOLD Training") as demo:
255
+ gr.Markdown("# 🎯 MANIFOLD - CS2 Cheat Detection Training")
256
+ gr.Markdown(f"**Device:** {get_device_info()}")
257
+
258
+ with gr.Tabs():
259
+ with gr.TabItem("1. Generate Data"):
260
+ gr.Markdown("Generate synthetic CS2 player behavior data for training.")
261
+ with gr.Row():
262
+ num_legit = gr.Slider(100, 10000, value=1000, step=100, label="Legit Players")
263
+ num_cheaters = gr.Slider(100, 5000, value=500, step=100, label="Cheaters")
264
+ seed = gr.Number(value=42, label="Random Seed")
265
+ gen_btn = gr.Button("Generate Data", variant="primary")
266
+ gen_output = gr.Textbox(label="Generation Status", lines=5)
267
+ gen_btn.click(generate_data, [num_legit, num_cheaters, seed], gen_output)
268
+
269
+ with gr.TabItem("2. Train Model"):
270
+ gr.Markdown("Train the MANIFOLD model with curriculum learning.")
271
+ with gr.Row():
272
+ batch_size = gr.Slider(8, 128, value=32, step=8, label="Batch Size")
273
+ learning_rate = gr.Number(value=3e-4, label="Learning Rate")
274
+ max_epochs = gr.Slider(5, 100, value=20, step=5, label="Max Epochs")
275
+ train_btn = gr.Button("Start Training", variant="primary")
276
+ train_output = gr.Textbox(label="Training Status", lines=5)
277
+ train_btn.click(train_model, [batch_size, learning_rate, max_epochs], train_output)
278
+
279
+ gr.Markdown("### Training Logs")
280
+ logs_output = gr.Markdown("No logs yet.")
281
+ refresh_btn = gr.Button("Refresh Logs")
282
+ refresh_btn.click(get_training_logs, [], logs_output)
283
+
284
+ with gr.TabItem("3. Test Model"):
285
+ gr.Markdown("Test the trained model on synthetic samples.")
286
+ num_test = gr.Slider(5, 50, value=10, step=5, label="Number of Test Samples")
287
+ test_btn = gr.Button("Run Inference", variant="primary")
288
+ test_output = gr.Markdown("Click 'Run Inference' to test the model.")
289
+ test_btn.click(test_inference, [num_test], test_output)
290
+
291
+ gr.Markdown("---")
292
+ gr.Markdown("**MANIFOLD** - Motor-Aware Neural Inference for Faithfulness Of Latent Dynamics")
293
+
294
+
295
+ if __name__ == "__main__":
296
+ demo.launch()
configs/data/generator.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ generator:
2
+ num_legit_players: 70000
3
+ num_cheaters: 30000
4
+ engagements_per_session: 200
5
+ seed: null
6
+
7
+ cheater_distribution:
8
+ blatant_rage: 0.10
9
+ obvious: 0.15
10
+ closet_moderate: 0.30
11
+ closet_subtle: 0.30
12
+ wallhack_only: 0.15
13
+
14
+ rank_distribution:
15
+ silver: 0.20
16
+ gold_nova: 0.25
17
+ master_guardian: 0.25
18
+ legendary_eagle: 0.15
19
+ supreme_global: 0.10
20
+ pro: 0.05
configs/default.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ input_dim: 64
3
+ embed_dim: 256
4
+ sequence_length: 128
5
+
6
+ ihe:
7
+ num_layers: 4
8
+ num_heads: 8
9
+ ff_dim: 1024
10
+ dropout: 0.1
11
+
12
+ mdm:
13
+ hidden_dim: 512
14
+ num_steps: 4
15
+
16
+ mpl:
17
+ latent_dim: 64
18
+ hidden_dim: 256
19
+ kl_weight: 0.001
20
+
21
+ cca:
22
+ num_cf_probes: 16
23
+ num_heads: 8
24
+
25
+ hse:
26
+ manifold_dim: 32
27
+ num_skill_levels: 7
28
+
29
+ tiv:
30
+ num_domains: 4
31
+ adversarial_lambda: 0.1
32
+
33
+ verdict:
34
+ num_classes: 3
35
+ evidence_scale: 10.0
36
+
37
+ training:
38
+ batch_size: 32
39
+ effective_batch_size: 128
40
+ learning_rate: 3e-4
41
+ max_epochs: 50
42
+ use_amp: true
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch>=2.1.0
2
+ numpy>=1.24.0
3
+ scipy>=1.11.0
4
+ pandas>=2.0.0
5
+ pydantic>=2.0.0
6
+ tqdm>=4.66.0
7
+ einops>=0.7.0
8
+ scikit-learn>=1.3.0
9
+ gradio>=4.0.0
src/manifold.egg-info/PKG-INFO ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: manifold
3
+ Version: 0.1.0
4
+ Summary: MANIFOLD: Novel Deep Learning Architecture for CS2 Cheat Detection
5
+ Requires-Python: >=3.11
6
+ Requires-Dist: torch>=2.1.0
7
+ Requires-Dist: numpy>=1.24.0
8
+ Requires-Dist: scipy>=1.11.0
9
+ Requires-Dist: pandas>=2.0.0
10
+ Requires-Dist: pyarrow>=14.0.0
11
+ Requires-Dist: pydantic>=2.0.0
12
+ Requires-Dist: hydra-core>=1.3.0
13
+ Requires-Dist: omegaconf>=2.3.0
14
+ Requires-Dist: wandb>=0.16.0
15
+ Requires-Dist: tqdm>=4.66.0
16
+ Requires-Dist: rich>=13.0.0
17
+ Requires-Dist: einops>=0.7.0
18
+ Requires-Dist: scikit-learn>=1.3.0
19
+ Requires-Dist: matplotlib>=3.8.0
20
+ Provides-Extra: dev
21
+ Requires-Dist: pytest>=7.4.0; extra == "dev"
22
+ Requires-Dist: hypothesis>=6.90.0; extra == "dev"
23
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
24
+ Requires-Dist: mypy>=1.7.0; extra == "dev"
src/manifold.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/manifold/__init__.py
4
+ src/manifold/config.py
5
+ src/manifold.egg-info/PKG-INFO
6
+ src/manifold.egg-info/SOURCES.txt
7
+ src/manifold.egg-info/dependency_links.txt
8
+ src/manifold.egg-info/requires.txt
9
+ src/manifold.egg-info/top_level.txt
10
+ src/manifold/data/__init__.py
11
+ src/manifold/evaluation/__init__.py
12
+ src/manifold/models/__init__.py
13
+ src/manifold/models/components/__init__.py
14
+ src/manifold/models/layers/__init__.py
15
+ src/manifold/training/__init__.py
src/manifold.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/manifold.egg-info/requires.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=2.1.0
2
+ numpy>=1.24.0
3
+ scipy>=1.11.0
4
+ pandas>=2.0.0
5
+ pyarrow>=14.0.0
6
+ pydantic>=2.0.0
7
+ hydra-core>=1.3.0
8
+ omegaconf>=2.3.0
9
+ wandb>=0.16.0
10
+ tqdm>=4.66.0
11
+ rich>=13.0.0
12
+ einops>=0.7.0
13
+ scikit-learn>=1.3.0
14
+ matplotlib>=3.8.0
15
+
16
+ [dev]
17
+ pytest>=7.4.0
18
+ hypothesis>=6.90.0
19
+ ruff>=0.1.0
20
+ mypy>=1.7.0
src/manifold.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ manifold
src/manifold/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MANIFOLD: Novel Deep Learning Architecture for CS2 Cheat Detection"""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ from manifold.config import ModelConfig, TrainingConfig, DataConfig
6
+ from manifold.models.manifold_lite import MANIFOLDLite
7
+
8
+ __all__ = [
9
+ "ModelConfig",
10
+ "TrainingConfig",
11
+ "DataConfig",
12
+ "MANIFOLDLite",
13
+ ]
src/manifold/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (474 Bytes). View file
 
src/manifold/__pycache__/config.cpython-312.pyc ADDED
Binary file (3.49 kB). View file
 
src/manifold/config.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MANIFOLD configuration models using Pydantic v2."""
2
+
3
+ from pydantic import BaseModel
4
+ from typing import Literal, Optional
5
+
6
+
7
+ class ModelConfig(BaseModel):
8
+ """MANIFOLD-Lite model configuration"""
9
+ input_dim: int = 64
10
+ embed_dim: int = 256
11
+ sequence_length: int = 128
12
+
13
+ # IHE
14
+ ihe_layers: int = 4
15
+ ihe_heads: int = 8
16
+ ihe_ff_dim: int = 1024
17
+ ihe_dropout: float = 0.1
18
+
19
+ # MDM
20
+ mdm_hidden: int = 512
21
+ mdm_steps: int = 4
22
+
23
+ # MPL
24
+ latent_dim: int = 64
25
+ mpl_hidden: int = 256
26
+ kl_weight: float = 0.001
27
+
28
+ # CCA
29
+ num_cf_probes: int = 16
30
+ cca_heads: int = 8
31
+
32
+ # HSE
33
+ manifold_dim: int = 32
34
+ num_skill_levels: int = 7
35
+
36
+ # TIV
37
+ num_domains: int = 4
38
+ adversarial_lambda: float = 0.1
39
+
40
+ # Verdict
41
+ num_classes: int = 3
42
+ evidence_scale: float = 10.0
43
+ dropout: float = 0.1
44
+
45
+
46
+ class TrainingConfig(BaseModel):
47
+ """Training configuration"""
48
+ batch_size: int = 32
49
+ effective_batch_size: int = 128
50
+ learning_rate: float = 3e-4
51
+ min_learning_rate: float = 1e-6
52
+ weight_decay: float = 0.01
53
+ warmup_ratio: float = 0.1
54
+ max_epochs: int = 50
55
+ gradient_clip: float = 1.0
56
+ use_amp: bool = True
57
+ amp_dtype: Literal["float16", "bfloat16"] = "float16"
58
+ gradient_checkpointing: bool = True
59
+ save_every_n_epochs: int = 5
60
+
61
+ loss_weights: dict = {
62
+ "classification": 1.0,
63
+ "reconstruction": 0.1,
64
+ "kl_divergence": 0.001,
65
+ "physics_violation": 0.5,
66
+ "invariance": 0.1,
67
+ }
68
+
69
+
70
+ class DataConfig(BaseModel):
71
+ """Data generation configuration"""
72
+ num_legit_players: int = 70000
73
+ num_cheaters: int = 30000
74
+ engagements_per_session: int = 200
75
+ num_features: int = 64
76
+ trajectory_length: int = 128
77
+ seed: Optional[int] = None
78
+
79
+ cheater_distribution: dict = {
80
+ "blatant_rage": 0.10,
81
+ "obvious": 0.15,
82
+ "closet_moderate": 0.30,
83
+ "closet_subtle": 0.30,
84
+ "wallhack_only": 0.15,
85
+ }
86
+
87
+ rank_distribution: dict = {
88
+ "silver": 0.20,
89
+ "gold_nova": 0.25,
90
+ "master_guardian": 0.25,
91
+ "legendary_eagle": 0.15,
92
+ "supreme_global": 0.10,
93
+ "pro": 0.05,
94
+ }
src/manifold/data/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from manifold.data.profiles import (
2
+ SkillVector,
3
+ PlayerProfile,
4
+ Rank,
5
+ SKILL_CORRELATION_MATRIX,
6
+ RANK_STATISTICS,
7
+ generate_correlated_skills,
8
+ )
9
+ from manifold.data.cheats import (
10
+ CheatType,
11
+ TogglePattern,
12
+ CheatConfig,
13
+ CheatBehavior,
14
+ HumanizationConfig,
15
+ CHEAT_PROFILES,
16
+ )
17
+ from manifold.data.trajectories import (
18
+ minimum_jerk,
19
+ signal_dependent_noise,
20
+ generate_micro_corrections,
21
+ generate_human_trajectory,
22
+ generate_aimbot_trajectory,
23
+ fitts_law_time,
24
+ extract_trajectory_features,
25
+ )
26
+ from manifold.data.temporal import (
27
+ SessionState,
28
+ SessionSimulator,
29
+ simulate_round_sequence,
30
+ generate_session_trace,
31
+ )
32
+ from manifold.data.generator import (
33
+ SyntheticDataGenerator,
34
+ PlayerSession,
35
+ EngagementData,
36
+ TRAJECTORY_FEATURE_NAMES,
37
+ extract_extended_trajectory_features,
38
+ )
39
+ from manifold.data.dataset import (
40
+ MANIFOLDDataset,
41
+ create_dataloader,
42
+ collate_fn,
43
+ )
44
+
45
+ __all__ = [
46
+ "SkillVector",
47
+ "PlayerProfile",
48
+ "Rank",
49
+ "SKILL_CORRELATION_MATRIX",
50
+ "RANK_STATISTICS",
51
+ "generate_correlated_skills",
52
+ "CheatType",
53
+ "TogglePattern",
54
+ "CheatConfig",
55
+ "CheatBehavior",
56
+ "HumanizationConfig",
57
+ "CHEAT_PROFILES",
58
+ "minimum_jerk",
59
+ "signal_dependent_noise",
60
+ "generate_micro_corrections",
61
+ "generate_human_trajectory",
62
+ "generate_aimbot_trajectory",
63
+ "fitts_law_time",
64
+ "extract_trajectory_features",
65
+ "SessionState",
66
+ "SessionSimulator",
67
+ "simulate_round_sequence",
68
+ "generate_session_trace",
69
+ "SyntheticDataGenerator",
70
+ "PlayerSession",
71
+ "EngagementData",
72
+ "TRAJECTORY_FEATURE_NAMES",
73
+ "extract_extended_trajectory_features",
74
+ "MANIFOLDDataset",
75
+ "create_dataloader",
76
+ "collate_fn",
77
+ ]
src/manifold/data/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (1.49 kB). View file
 
src/manifold/data/__pycache__/cheats.cpython-312.pyc ADDED
Binary file (9.18 kB). View file
 
src/manifold/data/__pycache__/dataset.cpython-312.pyc ADDED
Binary file (5.73 kB). View file
 
src/manifold/data/__pycache__/generator.cpython-312.pyc ADDED
Binary file (30.7 kB). View file
 
src/manifold/data/__pycache__/profiles.cpython-312.pyc ADDED
Binary file (7.17 kB). View file
 
src/manifold/data/__pycache__/temporal.cpython-312.pyc ADDED
Binary file (11.4 kB). View file
 
src/manifold/data/__pycache__/trajectories.cpython-312.pyc ADDED
Binary file (11.3 kB). View file
 
src/manifold/data/cheats.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional, Dict, Any, Set, Tuple
5
+ from enum import Enum
6
+
7
+
8
+ class CheatType(Enum):
9
+ AIMBOT = "aimbot"
10
+ WALLHACK = "wallhack"
11
+ TRIGGERBOT = "triggerbot"
12
+
13
+
14
+ class TogglePattern(Enum):
15
+ ALWAYS = "always"
16
+ CLUTCH_ONLY = "clutch_only"
17
+ LOSING_ONLY = "losing_only"
18
+ RANDOM = "random"
19
+
20
+
21
+ @dataclass
22
+ class HumanizationConfig:
23
+ """Cheater's attempt to appear legit - but leaves artifacts."""
24
+ reaction_delay_ms: Tuple[float, float] = (0.0, 20.0)
25
+ aim_smoothing: Tuple[float, float] = (0.0, 0.2)
26
+ random_miss_rate: Tuple[float, float] = (0.0, 0.05)
27
+ fov_degrees: Tuple[float, float] = (90.0, 180.0)
28
+ noise_amplitude: Tuple[float, float] = (0.0, 0.0)
29
+ prefire_suppression: float = 0.0
30
+ check_delay_ms: Tuple[float, float] = (0.0, 0.0)
31
+
32
+ def sample(self, rng: np.random.Generator) -> Dict[str, float]:
33
+ """Sample concrete values from ranges."""
34
+ return {
35
+ "reaction_delay_ms": rng.uniform(*self.reaction_delay_ms),
36
+ "aim_smoothing": rng.uniform(*self.aim_smoothing),
37
+ "random_miss_rate": rng.uniform(*self.random_miss_rate),
38
+ "fov_degrees": rng.uniform(*self.fov_degrees),
39
+ "noise_amplitude": rng.uniform(*self.noise_amplitude),
40
+ "prefire_suppression": self.prefire_suppression,
41
+ "check_delay_ms": rng.uniform(*self.check_delay_ms) if self.check_delay_ms[1] > 0 else 0.0,
42
+ }
43
+
44
+
45
+ CHEAT_PROFILES: Dict[str, Dict[str, Any]] = {
46
+ "blatant_rage": {
47
+ "intensity": (0.8, 1.0),
48
+ "toggle_pattern": TogglePattern.ALWAYS,
49
+ "cheat_types": {CheatType.AIMBOT, CheatType.WALLHACK, CheatType.TRIGGERBOT},
50
+ "humanization": HumanizationConfig(
51
+ reaction_delay_ms=(0.0, 20.0),
52
+ aim_smoothing=(0.0, 0.2),
53
+ random_miss_rate=(0.0, 0.05),
54
+ fov_degrees=(90.0, 180.0),
55
+ ),
56
+ "base_skill_multiplier": (0.3, 0.5),
57
+ },
58
+ "obvious": {
59
+ "intensity": (0.5, 0.8),
60
+ "toggle_pattern": TogglePattern.ALWAYS,
61
+ "cheat_types": {CheatType.AIMBOT, CheatType.TRIGGERBOT},
62
+ "humanization": HumanizationConfig(
63
+ reaction_delay_ms=(30.0, 80.0),
64
+ aim_smoothing=(0.2, 0.5),
65
+ random_miss_rate=(0.05, 0.10),
66
+ fov_degrees=(30.0, 60.0),
67
+ ),
68
+ "base_skill_multiplier": (0.4, 0.6),
69
+ },
70
+ "closet_moderate": {
71
+ "intensity": (0.3, 0.5),
72
+ "toggle_pattern": TogglePattern.CLUTCH_ONLY,
73
+ "cheat_types": {CheatType.AIMBOT},
74
+ "humanization": HumanizationConfig(
75
+ reaction_delay_ms=(80.0, 150.0),
76
+ aim_smoothing=(0.5, 0.8),
77
+ random_miss_rate=(0.10, 0.18),
78
+ fov_degrees=(10.0, 25.0),
79
+ ),
80
+ "base_skill_multiplier": (0.5, 0.7),
81
+ },
82
+ "closet_subtle": {
83
+ "intensity": (0.15, 0.35),
84
+ "toggle_pattern": TogglePattern.LOSING_ONLY,
85
+ "cheat_types": {CheatType.AIMBOT, CheatType.TRIGGERBOT},
86
+ "humanization": HumanizationConfig(
87
+ reaction_delay_ms=(150.0, 250.0),
88
+ aim_smoothing=(0.8, 0.95),
89
+ random_miss_rate=(0.15, 0.25),
90
+ fov_degrees=(3.0, 10.0),
91
+ noise_amplitude=(0.5, 1.5),
92
+ ),
93
+ "base_skill_multiplier": (0.6, 0.8),
94
+ },
95
+ "wallhack_only": {
96
+ "intensity": (0.4, 0.7),
97
+ "toggle_pattern": TogglePattern.ALWAYS,
98
+ "cheat_types": {CheatType.WALLHACK},
99
+ "humanization": HumanizationConfig(
100
+ prefire_suppression=0.7,
101
+ check_delay_ms=(500.0, 1500.0),
102
+ ),
103
+ "base_skill_multiplier": (0.7, 0.9),
104
+ },
105
+ }
106
+
107
+
108
+ @dataclass
109
+ class CheatConfig:
110
+ """Configuration for a specific cheater."""
111
+ profile_name: str
112
+ cheat_types: Set[CheatType]
113
+ intensity: float
114
+ toggle_pattern: TogglePattern
115
+ humanization: Dict[str, float]
116
+ base_skill_multiplier: float
117
+
118
+ @classmethod
119
+ def from_profile(
120
+ cls,
121
+ profile_name: str,
122
+ seed: Optional[int] = None,
123
+ ) -> CheatConfig:
124
+ """Create config from predefined profile."""
125
+ if profile_name not in CHEAT_PROFILES:
126
+ raise ValueError(f"Unknown profile: {profile_name}. Valid: {list(CHEAT_PROFILES.keys())}")
127
+
128
+ rng = np.random.default_rng(seed)
129
+ profile = CHEAT_PROFILES[profile_name]
130
+
131
+ intensity = rng.uniform(*profile["intensity"])
132
+ base_skill_mult = rng.uniform(*profile["base_skill_multiplier"])
133
+ humanization = profile["humanization"].sample(rng)
134
+
135
+ return cls(
136
+ profile_name=profile_name,
137
+ cheat_types=profile["cheat_types"].copy(),
138
+ intensity=intensity,
139
+ toggle_pattern=profile["toggle_pattern"],
140
+ humanization=humanization,
141
+ base_skill_multiplier=base_skill_mult,
142
+ )
143
+
144
+
145
+ @dataclass
146
+ class CheatBehavior:
147
+ """Runtime cheat behavior with toggle logic."""
148
+ config: CheatConfig
149
+ is_active: bool = True
150
+ rounds_since_toggle: int = 0
151
+
152
+ @classmethod
153
+ def from_profile(cls, profile_name: str, seed: Optional[int] = None) -> CheatBehavior:
154
+ config = CheatConfig.from_profile(profile_name, seed)
155
+ return cls(config=config)
156
+
157
+ @property
158
+ def toggle_pattern(self) -> TogglePattern:
159
+ return self.config.toggle_pattern
160
+
161
+ def should_activate(
162
+ self,
163
+ is_clutch: bool = False,
164
+ is_losing: bool = False,
165
+ round_number: int = 0,
166
+ rng: Optional[np.random.Generator] = None,
167
+ ) -> bool:
168
+ """Determine if cheat should be active this round."""
169
+ pattern = self.config.toggle_pattern
170
+
171
+ if pattern == TogglePattern.ALWAYS:
172
+ return True
173
+ elif pattern == TogglePattern.CLUTCH_ONLY:
174
+ return is_clutch
175
+ elif pattern == TogglePattern.LOSING_ONLY:
176
+ return is_losing
177
+ elif pattern == TogglePattern.RANDOM:
178
+ if rng is None:
179
+ rng = np.random.default_rng()
180
+ return rng.random() < 0.5
181
+
182
+ return True
183
+
184
+ def get_aim_modification(self, target_angle: float, current_angle: float) -> float:
185
+ """Calculate aim correction from cheat."""
186
+ if CheatType.AIMBOT not in self.config.cheat_types:
187
+ return 0.0
188
+ if not self.is_active:
189
+ return 0.0
190
+
191
+ angle_diff = target_angle - current_angle
192
+ fov_limit = self.config.humanization["fov_degrees"]
193
+
194
+ if abs(angle_diff) > fov_limit:
195
+ return 0.0
196
+
197
+ smoothing = self.config.humanization["aim_smoothing"]
198
+ intensity = self.config.intensity
199
+
200
+ # Smoothed correction
201
+ correction = angle_diff * (1.0 - smoothing) * intensity
202
+
203
+ # Add humanization noise
204
+ noise_amp = self.config.humanization.get("noise_amplitude", 0.0)
205
+ if noise_amp > 0:
206
+ correction += np.random.normal(0, noise_amp)
207
+
208
+ return correction
209
+
210
+ def get_reaction_delay(self) -> float:
211
+ """Get reaction delay in ms."""
212
+ return self.config.humanization["reaction_delay_ms"]
213
+
214
+ def should_miss_intentionally(self, rng: Optional[np.random.Generator] = None) -> bool:
215
+ """Check if should intentionally miss (humanization)."""
216
+ if rng is None:
217
+ rng = np.random.default_rng()
218
+ miss_rate = self.config.humanization["random_miss_rate"]
219
+ return rng.random() < miss_rate
src/manifold/data/dataset.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import torch
3
+ from torch.utils.data import Dataset, DataLoader
4
+ import numpy as np
5
+ from pathlib import Path
6
+ from typing import Optional, Dict, Any, List, Union
7
+ import pyarrow.parquet as pq
8
+
9
+ from manifold.data.generator import SyntheticDataGenerator, PlayerSession
10
+
11
+
12
+ class MANIFOLDDataset(Dataset):
13
+ """
14
+ PyTorch Dataset for MANIFOLD training data.
15
+
16
+ Supports loading from Parquet files and on-the-fly generation.
17
+ Handles sequence padding/truncation for consistent tensor shapes.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ data: Optional[np.ndarray] = None,
23
+ labels: Optional[np.ndarray] = None,
24
+ sequence_length: int = 128,
25
+ pad_value: float = 0.0,
26
+ ):
27
+ self.data = data
28
+ self.labels = labels
29
+ self.sequence_length = sequence_length
30
+ self.pad_value = pad_value
31
+
32
+ def __len__(self) -> int:
33
+ return len(self.data) if self.data is not None else 0
34
+
35
+ def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
36
+ features = self.data[idx]
37
+ label = self.labels[idx]
38
+
39
+ seq_len = features.shape[0]
40
+ if seq_len < self.sequence_length:
41
+ padding = np.full(
42
+ (self.sequence_length - seq_len, features.shape[1]),
43
+ self.pad_value
44
+ )
45
+ features = np.concatenate([features, padding], axis=0)
46
+ mask = np.concatenate([
47
+ np.ones(seq_len),
48
+ np.zeros(self.sequence_length - seq_len)
49
+ ])
50
+ else:
51
+ features = features[:self.sequence_length]
52
+ mask = np.ones(self.sequence_length)
53
+
54
+ return {
55
+ "features": torch.tensor(features, dtype=torch.float32),
56
+ "labels": torch.tensor(label, dtype=torch.long),
57
+ "mask": torch.tensor(mask, dtype=torch.float32),
58
+ }
59
+
60
+ @classmethod
61
+ def from_parquet(cls, path: Union[str, Path], **kwargs) -> "MANIFOLDDataset":
62
+ table = pq.read_table(path)
63
+ df = table.to_pandas()
64
+ data = np.array(df["features"].tolist())
65
+ labels = np.array(df["label"].tolist())
66
+ return cls(data=data, labels=labels, **kwargs)
67
+
68
+ @classmethod
69
+ def from_generator(
70
+ cls,
71
+ num_samples: int,
72
+ cheater_ratio: float = 0.3,
73
+ seed: Optional[int] = None,
74
+ **kwargs
75
+ ) -> "MANIFOLDDataset":
76
+ gen = SyntheticDataGenerator(seed=seed)
77
+ num_cheaters = int(num_samples * cheater_ratio)
78
+ num_legit = num_samples - num_cheaters
79
+
80
+ sessions = gen.generate_batch(num_legit, num_cheaters)
81
+
82
+ data = np.array([s.to_tensor() for s in sessions])
83
+ labels = np.array([2 if s.is_cheater else 0 for s in sessions])
84
+
85
+ return cls(data=data, labels=labels, **kwargs)
86
+
87
+
88
+ def create_dataloader(
89
+ dataset: MANIFOLDDataset,
90
+ batch_size: int = 32,
91
+ shuffle: bool = True,
92
+ num_workers: int = 4,
93
+ pin_memory: bool = True,
94
+ ) -> DataLoader:
95
+ return DataLoader(
96
+ dataset,
97
+ batch_size=batch_size,
98
+ shuffle=shuffle,
99
+ num_workers=num_workers,
100
+ pin_memory=pin_memory,
101
+ drop_last=True,
102
+ )
103
+
104
+
105
+ def collate_fn(batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
106
+ return {
107
+ "features": torch.stack([b["features"] for b in batch]),
108
+ "labels": torch.stack([b["labels"] for b in batch]),
109
+ "mask": torch.stack([b["mask"] for b in batch]),
110
+ }
src/manifold/data/generator.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional, Dict, Any, List, Iterator
5
+ import random
6
+
7
+ from manifold.data.profiles import PlayerProfile, SkillVector, RANK_STATISTICS, generate_correlated_skills, Rank
8
+ from manifold.data.cheats import CheatBehavior, CHEAT_PROFILES, CheatType
9
+ from manifold.data.trajectories import (
10
+ generate_human_trajectory,
11
+ generate_aimbot_trajectory,
12
+ fitts_law_time,
13
+ extract_trajectory_features,
14
+ )
15
+ from manifold.data.temporal import SessionSimulator, SessionState
16
+
17
+
18
+ # Extended trajectory feature names for 25 features
19
+ TRAJECTORY_FEATURE_NAMES = [
20
+ "max_jerk", "mean_jerk", "jerk_variance", "jerk_skewness", "jerk_kurtosis",
21
+ "path_efficiency", "velocity_peak_timing", "max_velocity", "mean_velocity", "velocity_variance",
22
+ "max_acceleration", "mean_acceleration", "acceleration_variance",
23
+ "total_distance", "direct_distance", "x_displacement", "y_displacement",
24
+ "direction_changes", "smoothness_index", "curvature_mean", "curvature_variance",
25
+ "overshoot_magnitude", "correction_count", "final_error", "movement_duration_ratio",
26
+ ]
27
+
28
+
29
+ def extract_extended_trajectory_features(trajectory: np.ndarray) -> Dict[str, float]:
30
+ """
31
+ Extract 25 features from trajectory for cheat detection.
32
+
33
+ Extends the base extract_trajectory_features with additional metrics.
34
+
35
+ Args:
36
+ trajectory: Delta trajectory [n_ticks, 2]
37
+
38
+ Returns:
39
+ Dict of 25 extracted features
40
+ """
41
+ # Start with base features
42
+ base_features = extract_trajectory_features(trajectory)
43
+
44
+ if len(trajectory) < 3:
45
+ # Return zeros for all features
46
+ return {name: 0.0 for name in TRAJECTORY_FEATURE_NAMES}
47
+
48
+ # Compute velocity, acceleration, jerk
49
+ velocity = np.linalg.norm(trajectory, axis=1)
50
+ acceleration = np.diff(velocity) if len(velocity) > 1 else np.array([0.0])
51
+ jerk = np.diff(acceleration) if len(acceleration) > 1 else np.array([0.0])
52
+
53
+ # Jerk statistics (extended)
54
+ jerk_skewness = float(_safe_skewness(jerk)) if len(jerk) > 2 else 0.0
55
+ jerk_kurtosis = float(_safe_kurtosis(jerk)) if len(jerk) > 3 else 0.0
56
+
57
+ # Velocity statistics
58
+ max_velocity = float(np.max(velocity)) if len(velocity) > 0 else 0.0
59
+ mean_velocity = float(np.mean(velocity)) if len(velocity) > 0 else 0.0
60
+ velocity_variance = float(np.var(velocity)) if len(velocity) > 0 else 0.0
61
+
62
+ # Acceleration statistics
63
+ max_acceleration = float(np.max(np.abs(acceleration))) if len(acceleration) > 0 else 0.0
64
+ mean_acceleration = float(np.mean(np.abs(acceleration))) if len(acceleration) > 0 else 0.0
65
+ acceleration_variance = float(np.var(acceleration)) if len(acceleration) > 0 else 0.0
66
+
67
+ # Distance metrics
68
+ total_distance = float(np.sum(velocity))
69
+ cumulative_displacement = np.cumsum(trajectory, axis=0)
70
+ direct_distance = float(np.linalg.norm(cumulative_displacement[-1])) if len(cumulative_displacement) > 0 else 0.0
71
+ x_displacement = float(cumulative_displacement[-1, 0]) if len(cumulative_displacement) > 0 else 0.0
72
+ y_displacement = float(cumulative_displacement[-1, 1]) if len(cumulative_displacement) > 0 else 0.0
73
+
74
+ # Direction changes
75
+ if len(trajectory) > 1:
76
+ angles = np.arctan2(trajectory[:, 1], trajectory[:, 0])
77
+ angle_diffs = np.abs(np.diff(angles))
78
+ direction_changes = float(np.sum(angle_diffs > np.pi / 4)) # Count significant direction changes
79
+ else:
80
+ direction_changes = 0.0
81
+
82
+ # Smoothness index (inverse of total squared jerk)
83
+ smoothness_index = 1.0 / (1.0 + np.sum(jerk**2)) if len(jerk) > 0 else 1.0
84
+
85
+ # Curvature statistics
86
+ if len(trajectory) > 2:
87
+ # Approximate curvature as angle change per unit distance
88
+ curvatures = []
89
+ for i in range(1, len(trajectory) - 1):
90
+ v1 = trajectory[i]
91
+ v2 = trajectory[i + 1]
92
+ cross = v1[0] * v2[1] - v1[1] * v2[0]
93
+ norm_product = (np.linalg.norm(v1) * np.linalg.norm(v2)) + 1e-8
94
+ curvatures.append(abs(cross / norm_product))
95
+ curvature_mean = float(np.mean(curvatures)) if curvatures else 0.0
96
+ curvature_variance = float(np.var(curvatures)) if curvatures else 0.0
97
+ else:
98
+ curvature_mean = 0.0
99
+ curvature_variance = 0.0
100
+
101
+ # Overshoot detection (when trajectory goes past target then comes back)
102
+ positions = np.cumsum(trajectory, axis=0)
103
+ if len(positions) > 1:
104
+ final_pos = positions[-1]
105
+ distances_to_final = np.linalg.norm(positions - final_pos, axis=1)
106
+ # Overshoot = minimum distance was achieved before the end
107
+ min_dist_idx = np.argmin(distances_to_final[:-1]) if len(distances_to_final) > 1 else 0
108
+ overshoot_magnitude = float(np.max(distances_to_final[min_dist_idx:])) if min_dist_idx < len(distances_to_final) - 1 else 0.0
109
+ else:
110
+ overshoot_magnitude = 0.0
111
+
112
+ # Correction count (velocity sign changes)
113
+ if len(velocity) > 1:
114
+ vel_diff = np.diff(velocity)
115
+ correction_count = float(np.sum(np.abs(np.diff(np.sign(vel_diff))) > 0)) if len(vel_diff) > 1 else 0.0
116
+ else:
117
+ correction_count = 0.0
118
+
119
+ # Final error (assuming target is at cumulative endpoint - would need target info)
120
+ # For now, use variance of final positions as proxy
121
+ final_error = float(np.linalg.norm(trajectory[-1])) if len(trajectory) > 0 else 0.0
122
+
123
+ # Movement duration ratio (proportion of time with significant movement)
124
+ movement_threshold = 0.01 * max_velocity if max_velocity > 0 else 0.01
125
+ movement_duration_ratio = float(np.mean(velocity > movement_threshold)) if len(velocity) > 0 else 0.0
126
+
127
+ return {
128
+ "max_jerk": base_features["max_jerk"],
129
+ "mean_jerk": base_features["mean_jerk"],
130
+ "jerk_variance": base_features["jerk_variance"],
131
+ "jerk_skewness": jerk_skewness,
132
+ "jerk_kurtosis": jerk_kurtosis,
133
+ "path_efficiency": base_features["path_efficiency"],
134
+ "velocity_peak_timing": base_features["velocity_peak_timing"],
135
+ "max_velocity": max_velocity,
136
+ "mean_velocity": mean_velocity,
137
+ "velocity_variance": velocity_variance,
138
+ "max_acceleration": max_acceleration,
139
+ "mean_acceleration": mean_acceleration,
140
+ "acceleration_variance": acceleration_variance,
141
+ "total_distance": total_distance,
142
+ "direct_distance": direct_distance,
143
+ "x_displacement": x_displacement,
144
+ "y_displacement": y_displacement,
145
+ "direction_changes": direction_changes,
146
+ "smoothness_index": smoothness_index,
147
+ "curvature_mean": curvature_mean,
148
+ "curvature_variance": curvature_variance,
149
+ "overshoot_magnitude": overshoot_magnitude,
150
+ "correction_count": correction_count,
151
+ "final_error": final_error,
152
+ "movement_duration_ratio": movement_duration_ratio,
153
+ }
154
+
155
+
156
+ def _safe_skewness(arr: np.ndarray) -> float:
157
+ """Compute skewness safely."""
158
+ if len(arr) < 3:
159
+ return 0.0
160
+ mean = np.mean(arr)
161
+ std = np.std(arr)
162
+ if std < 1e-8:
163
+ return 0.0
164
+ return float(np.mean(((arr - mean) / std) ** 3))
165
+
166
+
167
+ def _safe_kurtosis(arr: np.ndarray) -> float:
168
+ """Compute kurtosis safely."""
169
+ if len(arr) < 4:
170
+ return 0.0
171
+ mean = np.mean(arr)
172
+ std = np.std(arr)
173
+ if std < 1e-8:
174
+ return 0.0
175
+ return float(np.mean(((arr - mean) / std) ** 4) - 3.0)
176
+
177
+
178
+ @dataclass
179
+ class EngagementData:
180
+ """Single engagement - the atomic training unit with 64 features."""
181
+ # Context features [12]
182
+ enemy_distance: float
183
+ enemy_velocity: float
184
+ player_velocity: float
185
+ player_health: float
186
+ enemy_health: float
187
+ weapon_type: int
188
+ is_scoped: bool
189
+ is_crouched: bool
190
+ round_time_remaining: float
191
+ score_differential: float
192
+ is_clutch: bool
193
+ enemies_alive: int
194
+
195
+ # Pre-engagement features [8]
196
+ crosshair_angle_to_hidden_enemy: float
197
+ time_tracking_hidden_ms: float
198
+ prefire_indicator: bool
199
+ check_pattern_efficiency: float
200
+ rotation_timing_vs_enemy: float
201
+ flank_awareness_score: float
202
+ info_advantage_score: float
203
+ position_optimality: float
204
+
205
+ # Trajectory features [25] - from extract_trajectory_features
206
+ trajectory_features: Dict[str, float] = field(default_factory=dict)
207
+
208
+ # Timing features [10]
209
+ reaction_time_ms: float = 0.0
210
+ time_to_first_shot_ms: float = 0.0
211
+ time_to_damage_ms: float = 0.0
212
+ time_to_kill_ms: float = 0.0
213
+ shot_timing_variance: float = 0.0
214
+ inter_shot_interval_mean: float = 0.0
215
+ inter_shot_interval_cv: float = 0.0
216
+ crosshair_on_enemy_to_shot_ms: float = 0.0
217
+ anticipatory_shot_rate: float = 0.0
218
+ perfect_timing_rate: float = 0.0
219
+
220
+ # Accuracy features [9]
221
+ shots_fired: int = 0
222
+ shots_hit: int = 0
223
+ headshots: int = 0
224
+ damage_dealt: float = 0.0
225
+ spray_accuracy: float = 0.0
226
+ first_bullet_accuracy: float = 0.0
227
+ headshot_rate: float = 0.0
228
+ damage_efficiency: float = 0.0
229
+ kill_secured: bool = False
230
+
231
+ # Labels
232
+ is_cheater: bool = False
233
+ cheat_type: str = "none"
234
+ cheat_intensity: float = 0.0
235
+ cheat_active_this_engagement: bool = False
236
+
237
+ def to_tensor(self) -> np.ndarray:
238
+ """Convert to 64-dim feature vector."""
239
+ # Context features [12]
240
+ context = [
241
+ self.enemy_distance,
242
+ self.enemy_velocity,
243
+ self.player_velocity,
244
+ self.player_health,
245
+ self.enemy_health,
246
+ float(self.weapon_type),
247
+ float(self.is_scoped),
248
+ float(self.is_crouched),
249
+ self.round_time_remaining,
250
+ self.score_differential,
251
+ float(self.is_clutch),
252
+ float(self.enemies_alive),
253
+ ]
254
+
255
+ # Pre-engagement features [8]
256
+ pre_engagement = [
257
+ self.crosshair_angle_to_hidden_enemy,
258
+ self.time_tracking_hidden_ms,
259
+ float(self.prefire_indicator),
260
+ self.check_pattern_efficiency,
261
+ self.rotation_timing_vs_enemy,
262
+ self.flank_awareness_score,
263
+ self.info_advantage_score,
264
+ self.position_optimality,
265
+ ]
266
+
267
+ # Trajectory features [25]
268
+ trajectory = [
269
+ self.trajectory_features.get(name, 0.0)
270
+ for name in TRAJECTORY_FEATURE_NAMES
271
+ ]
272
+
273
+ # Timing features [10]
274
+ timing = [
275
+ self.reaction_time_ms,
276
+ self.time_to_first_shot_ms,
277
+ self.time_to_damage_ms,
278
+ self.time_to_kill_ms,
279
+ self.shot_timing_variance,
280
+ self.inter_shot_interval_mean,
281
+ self.inter_shot_interval_cv,
282
+ self.crosshair_on_enemy_to_shot_ms,
283
+ self.anticipatory_shot_rate,
284
+ self.perfect_timing_rate,
285
+ ]
286
+
287
+ # Accuracy features [9]
288
+ accuracy = [
289
+ float(self.shots_fired),
290
+ float(self.shots_hit),
291
+ float(self.headshots),
292
+ self.damage_dealt,
293
+ self.spray_accuracy,
294
+ self.first_bullet_accuracy,
295
+ self.headshot_rate,
296
+ self.damage_efficiency,
297
+ float(self.kill_secured),
298
+ ]
299
+
300
+ # Concatenate all: 12 + 8 + 25 + 10 + 9 = 64
301
+ features = context + pre_engagement + trajectory + timing + accuracy
302
+ return np.array(features, dtype=np.float32)
303
+
304
+
305
+ @dataclass
306
+ class PlayerSession:
307
+ """Complete player session with multiple engagements."""
308
+ player_id: str
309
+ profile: PlayerProfile
310
+ engagements: List[EngagementData]
311
+ is_cheater: bool
312
+ cheat_profile: Optional[str] = None
313
+ rank: str = "gold_nova"
314
+
315
+ def to_tensor(self) -> np.ndarray:
316
+ """Convert to tensor [num_engagements, 64]."""
317
+ return np.stack([e.to_tensor() for e in self.engagements])
318
+
319
+
320
+ # Weapon type mapping
321
+ WEAPON_TYPES = {
322
+ "rifle": 0,
323
+ "smg": 1,
324
+ "pistol": 2,
325
+ "awp": 3,
326
+ "shotgun": 4,
327
+ "machine_gun": 5,
328
+ }
329
+
330
+
331
+ class SyntheticDataGenerator:
332
+ """
333
+ Generate synthetic CS2 player behavior data.
334
+
335
+ Orchestrates all data modules to create realistic player sessions
336
+ with proper skill modeling, cheat injection, and temporal dynamics.
337
+ """
338
+
339
+ def __init__(
340
+ self,
341
+ seed: Optional[int] = None,
342
+ engagements_per_session: int = 200,
343
+ ):
344
+ self.rng = np.random.default_rng(seed)
345
+ self.engagements_per_session = engagements_per_session
346
+ self._seed = seed
347
+
348
+ def generate_player(
349
+ self,
350
+ is_cheater: bool = False,
351
+ rank: Optional[str] = None,
352
+ cheat_profile: Optional[str] = None,
353
+ ) -> PlayerSession:
354
+ """Generate a single player session."""
355
+ # 1. Create player profile with correlated skills
356
+ if rank is None:
357
+ rank = self.rng.choice(["silver", "gold_nova", "master_guardian", "legendary_eagle", "supreme_global"])
358
+
359
+ profile = PlayerProfile.generate(
360
+ rank=rank,
361
+ seed=int(self.rng.integers(0, 2**31)),
362
+ )
363
+ profile.is_cheater = is_cheater
364
+
365
+ # 2. Initialize session simulator for temporal dynamics
366
+ session = SessionSimulator.from_skill(
367
+ mental_resilience=profile.skill_vector.mental_resilience,
368
+ seed=int(self.rng.integers(0, 2**31)),
369
+ )
370
+
371
+ # 3. Setup cheat behavior if cheater
372
+ cheat_behavior: Optional[CheatBehavior] = None
373
+ if is_cheater:
374
+ if cheat_profile is None:
375
+ cheat_profile = self.rng.choice(list(CHEAT_PROFILES.keys()))
376
+ cheat_behavior = CheatBehavior.from_profile(
377
+ cheat_profile,
378
+ seed=int(self.rng.integers(0, 2**31)),
379
+ )
380
+
381
+ # 4. Generate engagements
382
+ engagements: List[EngagementData] = []
383
+
384
+ rounds_per_match = 24
385
+ base_engagements_per_round = self.engagements_per_session // rounds_per_match
386
+ extra_engagements = self.engagements_per_session % rounds_per_match
387
+
388
+ score_differential = 0
389
+ for round_num in range(rounds_per_match):
390
+ engagements_this_round = base_engagements_per_round + (1 if round_num < extra_engagements else 0)
391
+ # Determine round context
392
+ is_losing = score_differential < -3
393
+ round_won = self.rng.random() < 0.5
394
+
395
+ for eng_num in range(engagements_this_round):
396
+ is_clutch = (eng_num == engagements_this_round - 1) and self.rng.random() < 0.2
397
+
398
+ # Update session state
399
+ event = "idle"
400
+ if eng_num == 0:
401
+ event = "round_win" if round_won else "round_loss"
402
+ elif self.rng.random() < 0.3:
403
+ event = "kill" if self.rng.random() < 0.5 else "death"
404
+
405
+ if is_clutch:
406
+ session.update("clutch_situation", 1000.0)
407
+ else:
408
+ session.update(event, 3000.0)
409
+
410
+ # Generate game context
411
+ game_context = {
412
+ "round_time_remaining": 115.0 - (eng_num * 10.0) + self.rng.uniform(-5, 5),
413
+ "score_differential": score_differential,
414
+ "is_clutch": is_clutch,
415
+ "enemies_alive": max(1, 5 - eng_num // 2),
416
+ "is_losing": is_losing,
417
+ }
418
+
419
+ # Determine if cheat is active this engagement
420
+ cheat_active = False
421
+ if cheat_behavior is not None:
422
+ cheat_active = cheat_behavior.should_activate(
423
+ is_clutch=is_clutch,
424
+ is_losing=is_losing,
425
+ round_number=round_num,
426
+ rng=self.rng,
427
+ )
428
+ cheat_behavior.is_active = cheat_active
429
+
430
+ engagement = self.generate_engagement(
431
+ profile=profile,
432
+ session=session,
433
+ cheat_behavior=cheat_behavior if cheat_active else None,
434
+ game_context=game_context,
435
+ )
436
+
437
+ # Set labels
438
+ engagement.is_cheater = is_cheater
439
+ engagement.cheat_type = cheat_profile if is_cheater else "none"
440
+ engagement.cheat_intensity = cheat_behavior.config.intensity if cheat_behavior else 0.0
441
+ engagement.cheat_active_this_engagement = cheat_active
442
+
443
+ engagements.append(engagement)
444
+
445
+ # Update score
446
+ if round_won:
447
+ score_differential += 1
448
+ else:
449
+ score_differential -= 1
450
+
451
+ return PlayerSession(
452
+ player_id=profile.profile_id,
453
+ profile=profile,
454
+ engagements=engagements,
455
+ is_cheater=is_cheater,
456
+ cheat_profile=cheat_profile,
457
+ rank=rank,
458
+ )
459
+
460
+ def generate_engagement(
461
+ self,
462
+ profile: PlayerProfile,
463
+ session: SessionSimulator,
464
+ cheat_behavior: Optional[CheatBehavior] = None,
465
+ game_context: Optional[Dict[str, Any]] = None,
466
+ ) -> EngagementData:
467
+ """Generate a single engagement."""
468
+ if game_context is None:
469
+ game_context = {}
470
+
471
+ skills = profile.skill_vector
472
+ rank_stats = RANK_STATISTICS[profile.rank]
473
+
474
+ # Get session modifiers
475
+ modifiers = session.get_modifiers()
476
+
477
+ # 1. Generate context features
478
+ enemy_distance = self.rng.uniform(5.0, 50.0) # meters
479
+ enemy_velocity = self.rng.uniform(0.0, 250.0) # units/s
480
+ player_velocity = self.rng.uniform(0.0, 250.0)
481
+ player_health = self.rng.uniform(20.0, 100.0)
482
+ enemy_health = self.rng.uniform(20.0, 100.0)
483
+ weapon_type = int(self.rng.integers(0, len(WEAPON_TYPES)))
484
+ is_scoped = weapon_type == WEAPON_TYPES["awp"] and self.rng.random() < 0.7
485
+ is_crouched = self.rng.random() < 0.3
486
+ round_time_remaining = game_context.get("round_time_remaining", 90.0)
487
+ score_differential = game_context.get("score_differential", 0)
488
+ is_clutch = game_context.get("is_clutch", False)
489
+ enemies_alive = game_context.get("enemies_alive", 3)
490
+
491
+ # 2. Generate pre-engagement features
492
+ # These are affected by game sense and wallhack cheats
493
+ has_wallhack = (
494
+ cheat_behavior is not None and
495
+ CheatType.WALLHACK in cheat_behavior.config.cheat_types
496
+ )
497
+
498
+ game_sense_effective = skills.game_sense * modifiers.get("game_sense_mult", 1.0)
499
+
500
+ # Crosshair angle to hidden enemy (wallhackers track better)
501
+ if has_wallhack:
502
+ crosshair_angle_to_hidden = self.rng.uniform(0.0, 15.0) # Suspiciously good
503
+ time_tracking_hidden = self.rng.uniform(500.0, 2000.0) # Long tracking time
504
+ prefire_indicator = self.rng.random() < (0.3 * (1.0 - cheat_behavior.config.humanization.get("prefire_suppression", 0.0)))
505
+ else:
506
+ crosshair_angle_to_hidden = self.rng.uniform(20.0, 90.0) * (1.0 - game_sense_effective / 150.0)
507
+ time_tracking_hidden = self.rng.exponential(200.0)
508
+ prefire_indicator = self.rng.random() < (0.05 * game_sense_effective / 100.0)
509
+
510
+ check_pattern_efficiency = (game_sense_effective / 100.0) * self.rng.uniform(0.7, 1.0)
511
+ rotation_timing_vs_enemy = self.rng.uniform(0.3, 1.0) * (0.5 + game_sense_effective / 200.0)
512
+ flank_awareness_score = self.rng.uniform(0.2, 1.0) * (0.5 + game_sense_effective / 200.0)
513
+ info_advantage_score = self.rng.uniform(0.0, 1.0)
514
+ position_optimality = self.rng.uniform(0.3, 1.0) * (0.5 + game_sense_effective / 200.0)
515
+
516
+ if has_wallhack:
517
+ check_pattern_efficiency = min(1.0, check_pattern_efficiency * 1.3)
518
+ rotation_timing_vs_enemy = min(1.0, rotation_timing_vs_enemy * 1.2)
519
+ flank_awareness_score = min(1.0, flank_awareness_score * 1.4)
520
+
521
+ # 3. Generate trajectory
522
+ # Start and target angles
523
+ start_angle = np.array([
524
+ self.rng.uniform(-30.0, 30.0),
525
+ self.rng.uniform(-20.0, 20.0),
526
+ ])
527
+ target_angle = np.array([
528
+ self.rng.uniform(-5.0, 5.0),
529
+ self.rng.uniform(-3.0, 3.0),
530
+ ])
531
+
532
+ # Movement time based on Fitts' law and skill
533
+ target_width = 3.0 if weapon_type == WEAPON_TYPES["awp"] else 8.0 # Hitbox size in degrees
534
+ distance = np.linalg.norm(target_angle - start_angle)
535
+
536
+ base_movement_time = fitts_law_time(distance, target_width, a=0.1, b=0.15)
537
+ skill_factor = (100.0 - skills.raw_aim) / 100.0 # Lower skill = longer time
538
+ movement_time_s = base_movement_time * (1.0 + skill_factor * 0.5) * modifiers.get("reaction_time_mult", 1.0)
539
+ movement_time_ms = movement_time_s * 1000.0
540
+
541
+ # Generate human trajectory
542
+ trajectory = generate_human_trajectory(
543
+ start_angle=start_angle,
544
+ target_angle=target_angle,
545
+ skill_aim=skills.raw_aim,
546
+ skill_consistency=skills.consistency,
547
+ duration_ms=movement_time_ms,
548
+ tick_rate=128,
549
+ rng=self.rng,
550
+ )
551
+
552
+ # 4. Apply aimbot modification if active
553
+ has_aimbot = (
554
+ cheat_behavior is not None and
555
+ CheatType.AIMBOT in cheat_behavior.config.cheat_types
556
+ )
557
+
558
+ if has_aimbot:
559
+ n_ticks = len(trajectory)
560
+ # Target positions (enemy moves slightly)
561
+ target_positions = np.tile(target_angle, (n_ticks, 1))
562
+ target_positions += self.rng.normal(0, 0.5, (n_ticks, 2)) # Small enemy movement
563
+
564
+ trajectory = generate_aimbot_trajectory(
565
+ natural_trajectory=trajectory,
566
+ target_positions=target_positions,
567
+ intensity=cheat_behavior.config.intensity,
568
+ humanization=cheat_behavior.config.humanization,
569
+ rng=self.rng,
570
+ )
571
+
572
+ # 5. Extract trajectory features
573
+ trajectory_features = extract_extended_trajectory_features(trajectory)
574
+
575
+ # 6. Compute timing features
576
+ rt_low, rt_high = rank_stats["reaction_time_ms"]
577
+ base_reaction_time = self.rng.uniform(rt_low, rt_high)
578
+
579
+ # Apply session modifiers
580
+ reaction_time_ms = base_reaction_time * modifiers.get("reaction_time_mult", 1.0) * modifiers.get("focus_reaction_mult", 1.0)
581
+
582
+ # Aimbot affects reaction time (but adds artificial delay for humanization)
583
+ if has_aimbot:
584
+ cheat_delay = cheat_behavior.config.humanization.get("reaction_delay_ms", 0.0)
585
+ reaction_time_ms = max(50.0, reaction_time_ms * 0.7 + cheat_delay)
586
+
587
+ # Triggerbot affects time to first shot
588
+ has_triggerbot = (
589
+ cheat_behavior is not None and
590
+ CheatType.TRIGGERBOT in cheat_behavior.config.cheat_types
591
+ )
592
+
593
+ time_to_first_shot_ms = reaction_time_ms + movement_time_ms * 0.5
594
+ if has_triggerbot:
595
+ # Triggerbot fires instantly when crosshair is on target
596
+ time_to_first_shot_ms *= 0.6 # Suspiciously fast
597
+
598
+ time_to_damage_ms = time_to_first_shot_ms + self.rng.uniform(0, 100)
599
+ time_to_kill_ms = time_to_damage_ms + self.rng.uniform(100, 500)
600
+
601
+ shot_timing_variance = self.rng.uniform(10.0, 50.0) * (1.0 - skills.consistency / 150.0)
602
+ if has_triggerbot:
603
+ shot_timing_variance *= 0.3 # Too consistent
604
+
605
+ inter_shot_interval_mean = 100.0 + self.rng.uniform(-20, 50) # ms
606
+ inter_shot_interval_cv = self.rng.uniform(0.1, 0.4) * (1.0 - skills.consistency / 150.0)
607
+
608
+ crosshair_on_enemy_to_shot_ms = self.rng.uniform(50, 200) * (1.0 - skills.reaction_speed / 150.0)
609
+ if has_triggerbot:
610
+ crosshair_on_enemy_to_shot_ms = self.rng.uniform(5, 30) # Inhuman speed
611
+
612
+ anticipatory_shot_rate = 0.02 + skills.game_sense / 1000.0
613
+ if prefire_indicator:
614
+ anticipatory_shot_rate += 0.1
615
+
616
+ perfect_timing_rate = skills.consistency / 200.0
617
+ if has_triggerbot:
618
+ perfect_timing_rate = min(1.0, perfect_timing_rate * 2.0)
619
+
620
+ # 7. Compute accuracy features
621
+ base_accuracy = self.rng.uniform(*rank_stats["accuracy"])
622
+ accuracy = base_accuracy * modifiers.get("accuracy_mult", 1.0)
623
+
624
+ if has_aimbot:
625
+ # Aimbot improves accuracy but may intentionally miss
626
+ if cheat_behavior.should_miss_intentionally(self.rng):
627
+ accuracy *= 0.5
628
+ else:
629
+ accuracy = min(1.0, accuracy + 0.3 * cheat_behavior.config.intensity)
630
+
631
+ shots_fired = int(self.rng.integers(3, 15))
632
+ shots_hit = int(np.clip(shots_fired * accuracy * self.rng.uniform(0.8, 1.2), 0, shots_fired))
633
+
634
+ hs_low, hs_high = rank_stats["hs_percent"]
635
+ hs_rate = self.rng.uniform(hs_low, hs_high)
636
+ if has_aimbot:
637
+ hs_rate = min(1.0, hs_rate + 0.2 * cheat_behavior.config.intensity)
638
+
639
+ headshots = int(np.clip(shots_hit * hs_rate, 0, shots_hit))
640
+
641
+ damage_per_hit = 25.0 + headshots * 75.0 / max(1, shots_hit) # Headshots do more damage
642
+ damage_dealt = shots_hit * damage_per_hit
643
+
644
+ spray_accuracy = accuracy * self.rng.uniform(0.6, 1.0) * (0.5 + skills.spray_control / 200.0)
645
+ first_bullet_accuracy = accuracy * self.rng.uniform(0.8, 1.2) * (0.5 + skills.crosshair_placement / 200.0)
646
+
647
+ headshot_rate = headshots / max(1, shots_hit)
648
+ damage_efficiency = damage_dealt / max(1, shots_fired * 100.0)
649
+
650
+ kill_secured = damage_dealt >= enemy_health
651
+
652
+ return EngagementData(
653
+ # Context features
654
+ enemy_distance=enemy_distance,
655
+ enemy_velocity=enemy_velocity,
656
+ player_velocity=player_velocity,
657
+ player_health=player_health,
658
+ enemy_health=enemy_health,
659
+ weapon_type=weapon_type,
660
+ is_scoped=is_scoped,
661
+ is_crouched=is_crouched,
662
+ round_time_remaining=round_time_remaining,
663
+ score_differential=score_differential,
664
+ is_clutch=is_clutch,
665
+ enemies_alive=enemies_alive,
666
+ # Pre-engagement features
667
+ crosshair_angle_to_hidden_enemy=crosshair_angle_to_hidden,
668
+ time_tracking_hidden_ms=time_tracking_hidden,
669
+ prefire_indicator=prefire_indicator,
670
+ check_pattern_efficiency=check_pattern_efficiency,
671
+ rotation_timing_vs_enemy=rotation_timing_vs_enemy,
672
+ flank_awareness_score=flank_awareness_score,
673
+ info_advantage_score=info_advantage_score,
674
+ position_optimality=position_optimality,
675
+ # Trajectory features
676
+ trajectory_features=trajectory_features,
677
+ # Timing features
678
+ reaction_time_ms=reaction_time_ms,
679
+ time_to_first_shot_ms=time_to_first_shot_ms,
680
+ time_to_damage_ms=time_to_damage_ms,
681
+ time_to_kill_ms=time_to_kill_ms,
682
+ shot_timing_variance=shot_timing_variance,
683
+ inter_shot_interval_mean=inter_shot_interval_mean,
684
+ inter_shot_interval_cv=inter_shot_interval_cv,
685
+ crosshair_on_enemy_to_shot_ms=crosshair_on_enemy_to_shot_ms,
686
+ anticipatory_shot_rate=anticipatory_shot_rate,
687
+ perfect_timing_rate=perfect_timing_rate,
688
+ # Accuracy features
689
+ shots_fired=shots_fired,
690
+ shots_hit=shots_hit,
691
+ headshots=headshots,
692
+ damage_dealt=damage_dealt,
693
+ spray_accuracy=spray_accuracy,
694
+ first_bullet_accuracy=first_bullet_accuracy,
695
+ headshot_rate=headshot_rate,
696
+ damage_efficiency=damage_efficiency,
697
+ kill_secured=kill_secured,
698
+ )
699
+
700
+ def generate_batch(
701
+ self,
702
+ num_legit: int,
703
+ num_cheaters: int,
704
+ cheater_distribution: Optional[Dict[str, float]] = None,
705
+ ) -> List[PlayerSession]:
706
+ """Generate batch of player sessions."""
707
+ if cheater_distribution is None:
708
+ # Default distribution across cheat profiles
709
+ cheater_distribution = {
710
+ "blatant_rage": 0.1,
711
+ "obvious": 0.2,
712
+ "closet_moderate": 0.3,
713
+ "closet_subtle": 0.3,
714
+ "wallhack_only": 0.1,
715
+ }
716
+
717
+ sessions: List[PlayerSession] = []
718
+
719
+ # Generate legit players
720
+ for _ in range(num_legit):
721
+ sessions.append(self.generate_player(is_cheater=False))
722
+
723
+ # Generate cheaters with distribution
724
+ cheat_profiles = list(cheater_distribution.keys())
725
+ cheat_probs = list(cheater_distribution.values())
726
+
727
+ for _ in range(num_cheaters):
728
+ profile = self.rng.choice(cheat_profiles, p=cheat_probs)
729
+ sessions.append(self.generate_player(is_cheater=True, cheat_profile=profile))
730
+
731
+ # Shuffle
732
+ self.rng.shuffle(sessions)
733
+
734
+ return sessions
735
+
736
+ def generate_stream(
737
+ self,
738
+ num_legit: int,
739
+ num_cheaters: int,
740
+ ) -> Iterator[PlayerSession]:
741
+ """Memory-efficient streaming generator."""
742
+ total = num_legit + num_cheaters
743
+
744
+ # Create shuffled indices for legit vs cheater
745
+ is_cheater_flags = [False] * num_legit + [True] * num_cheaters
746
+ self.rng.shuffle(is_cheater_flags)
747
+
748
+ for is_cheater in is_cheater_flags:
749
+ yield self.generate_player(is_cheater=is_cheater)
src/manifold/data/profiles.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional, Dict, Any, Tuple
5
+ from enum import Enum
6
+
7
+
8
+ # Skill correlation matrix - represents realistic correlations between player skills
9
+ # Rows/cols: raw_aim, spray_control, crosshair_placement, reaction_speed, game_sense, movement, consistency, mental_resilience
10
+ SKILL_CORRELATION_MATRIX = np.array([
11
+ # aim spray cross react sense move cons mental
12
+ [1.00, 0.70, 0.50, 0.30, 0.20, 0.40, 0.30, 0.10], # raw_aim
13
+ [0.70, 1.00, 0.40, 0.20, 0.30, 0.30, 0.40, 0.10], # spray_control
14
+ [0.50, 0.40, 1.00, 0.20, 0.60, 0.50, 0.50, 0.20], # crosshair_placement
15
+ [0.30, 0.20, 0.20, 1.00, 0.30, 0.40, 0.20, 0.30], # reaction_speed
16
+ [0.20, 0.30, 0.60, 0.30, 1.00, 0.50, 0.40, 0.40], # game_sense
17
+ [0.40, 0.30, 0.50, 0.40, 0.50, 1.00, 0.30, 0.20], # movement
18
+ [0.30, 0.40, 0.50, 0.20, 0.40, 0.30, 1.00, 0.50], # consistency
19
+ [0.10, 0.10, 0.20, 0.30, 0.40, 0.20, 0.50, 1.00], # mental_resilience
20
+ ])
21
+
22
+ SKILL_STD = np.array([15.0, 15.0, 15.0, 12.0, 15.0, 12.0, 18.0, 20.0])
23
+ SKILL_NAMES = ["raw_aim", "spray_control", "crosshair_placement", "reaction_speed",
24
+ "game_sense", "movement", "consistency", "mental_resilience"]
25
+
26
+
27
+ class Rank(Enum):
28
+ SILVER = "silver"
29
+ GOLD_NOVA = "gold_nova"
30
+ MASTER_GUARDIAN = "master_guardian"
31
+ LEGENDARY_EAGLE = "legendary_eagle"
32
+ SUPREME_GLOBAL = "supreme_global"
33
+ PRO = "pro"
34
+
35
+
36
+ RANK_STATISTICS = {
37
+ Rank.SILVER: {
38
+ "skill_mean": 25.0,
39
+ "hs_percent": (0.25, 0.35),
40
+ "accuracy": (0.06, 0.08),
41
+ "reaction_time_ms": (280.0, 350.0),
42
+ "kd_ratio": (0.5, 0.85),
43
+ "adr": (35.0, 55.0),
44
+ "edpi": (1200, 2400),
45
+ "hours_played": (0, 300),
46
+ },
47
+ Rank.GOLD_NOVA: {
48
+ "skill_mean": 40.0,
49
+ "hs_percent": (0.35, 0.42),
50
+ "accuracy": (0.08, 0.10),
51
+ "reaction_time_ms": (250.0, 290.0),
52
+ "kd_ratio": (0.85, 1.1),
53
+ "adr": (50.0, 70.0),
54
+ "edpi": (1000, 1800),
55
+ "hours_played": (200, 600),
56
+ },
57
+ Rank.MASTER_GUARDIAN: {
58
+ "skill_mean": 55.0,
59
+ "hs_percent": (0.40, 0.48),
60
+ "accuracy": (0.10, 0.12),
61
+ "reaction_time_ms": (220.0, 260.0),
62
+ "kd_ratio": (1.0, 1.2),
63
+ "adr": (60.0, 80.0),
64
+ "edpi": (800, 1400),
65
+ "hours_played": (400, 1200),
66
+ },
67
+ Rank.LEGENDARY_EAGLE: {
68
+ "skill_mean": 70.0,
69
+ "hs_percent": (0.45, 0.52),
70
+ "accuracy": (0.12, 0.14),
71
+ "reaction_time_ms": (200.0, 240.0),
72
+ "kd_ratio": (1.15, 1.35),
73
+ "adr": (70.0, 90.0),
74
+ "edpi": (700, 1200),
75
+ "hours_played": (800, 2000),
76
+ },
77
+ Rank.SUPREME_GLOBAL: {
78
+ "skill_mean": 82.0,
79
+ "hs_percent": (0.48, 0.55),
80
+ "accuracy": (0.14, 0.16),
81
+ "reaction_time_ms": (180.0, 220.0),
82
+ "kd_ratio": (1.25, 1.5),
83
+ "adr": (75.0, 95.0),
84
+ "edpi": (600, 1100),
85
+ "hours_played": (1200, 3500),
86
+ },
87
+ Rank.PRO: {
88
+ "skill_mean": 92.0,
89
+ "hs_percent": (0.55, 0.66),
90
+ "accuracy": (0.17, 0.20),
91
+ "reaction_time_ms": (140.0, 180.0),
92
+ "kd_ratio": (1.3, 2.0),
93
+ "adr": (85.0, 110.0),
94
+ "edpi": (550, 1000),
95
+ "hours_played": (3000, 10000),
96
+ },
97
+ }
98
+
99
+
100
+ @dataclass
101
+ class SkillVector:
102
+ """8-dimensional skill vector with realistic correlations."""
103
+ raw_aim: float
104
+ spray_control: float
105
+ crosshair_placement: float
106
+ reaction_speed: float
107
+ game_sense: float
108
+ movement: float
109
+ consistency: float
110
+ mental_resilience: float
111
+
112
+ def to_array(self) -> np.ndarray:
113
+ return np.array([
114
+ self.raw_aim, self.spray_control, self.crosshair_placement,
115
+ self.reaction_speed, self.game_sense, self.movement,
116
+ self.consistency, self.mental_resilience
117
+ ])
118
+
119
+ @classmethod
120
+ def from_array(cls, arr: np.ndarray) -> SkillVector:
121
+ return cls(*arr.tolist())
122
+
123
+ @property
124
+ def mean_skill(self) -> float:
125
+ return float(self.to_array().mean())
126
+
127
+
128
+ def generate_correlated_skills(
129
+ rank: Rank,
130
+ rng: Optional[np.random.Generator] = None
131
+ ) -> np.ndarray:
132
+ """Generate correlated skill vector using Cholesky decomposition."""
133
+ if rng is None:
134
+ rng = np.random.default_rng()
135
+
136
+ stats = RANK_STATISTICS[rank]
137
+ skill_mean = stats["skill_mean"]
138
+
139
+ # Create covariance matrix from correlation and std
140
+ cov = np.outer(SKILL_STD, SKILL_STD) * SKILL_CORRELATION_MATRIX
141
+
142
+ # Cholesky decomposition for correlated sampling
143
+ L = np.linalg.cholesky(cov)
144
+
145
+ # Generate uncorrelated standard normal
146
+ z = rng.standard_normal(8)
147
+
148
+ # Transform to correlated with correct mean/cov
149
+ skills = skill_mean + L @ z
150
+
151
+ # Clip to valid range [0, 100]
152
+ skills = np.clip(skills, 0.0, 100.0)
153
+
154
+ return skills
155
+
156
+
157
+ @dataclass
158
+ class PlayerProfile:
159
+ """Complete player profile with skill and metadata."""
160
+ profile_id: str
161
+ rank: Rank
162
+ skill_vector: SkillVector
163
+ hours_played: int
164
+ is_cheater: bool = False
165
+
166
+ @classmethod
167
+ def generate(
168
+ cls,
169
+ rank: str | Rank,
170
+ seed: Optional[int] = None,
171
+ profile_id: Optional[str] = None,
172
+ ) -> PlayerProfile:
173
+ """Generate a random player profile for given rank."""
174
+ if isinstance(rank, str):
175
+ rank = Rank(rank)
176
+
177
+ rng = np.random.default_rng(seed)
178
+
179
+ # Generate correlated skills
180
+ skills = generate_correlated_skills(rank, rng)
181
+ skill_vector = SkillVector.from_array(skills)
182
+
183
+ # Generate hours played
184
+ stats = RANK_STATISTICS[rank]
185
+ hours_low, hours_high = stats["hours_played"]
186
+ hours_played = int(rng.uniform(hours_low, hours_high))
187
+
188
+ # Generate profile ID if not provided
189
+ if profile_id is None:
190
+ profile_id = f"player_{rng.integers(0, 2**32):08x}"
191
+
192
+ return cls(
193
+ profile_id=profile_id,
194
+ rank=rank,
195
+ skill_vector=skill_vector,
196
+ hours_played=hours_played,
197
+ is_cheater=False,
198
+ )
199
+
200
+ def get_expected_stats(self) -> Dict[str, Tuple[float, float]]:
201
+ """Get expected stat ranges based on rank and skill."""
202
+ return RANK_STATISTICS[self.rank].copy()
src/manifold/data/temporal.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional, Dict, Any
5
+
6
+
7
+ @dataclass
8
+ class SessionState:
9
+ """
10
+ Tracks player state over a session.
11
+ Models fatigue, tilt, and focus using Ornstein-Uhlenbeck processes.
12
+ """
13
+ fatigue: float = 0.0 # 0 = fresh, 1 = exhausted
14
+ tilt: float = 0.0 # -1 = tilted, 0 = neutral, 1 = confident
15
+ focus: float = 1.0 # 0 = distracted, 1 = locked in
16
+ time_elapsed_ms: float = 0.0
17
+
18
+ def to_dict(self) -> Dict[str, float]:
19
+ return {
20
+ "fatigue": self.fatigue,
21
+ "tilt": self.tilt,
22
+ "focus": self.focus,
23
+ "time_elapsed_ms": self.time_elapsed_ms,
24
+ }
25
+
26
+ @classmethod
27
+ def from_dict(cls, d: Dict[str, float]) -> SessionState:
28
+ return cls(
29
+ fatigue=d.get("fatigue", 0.0),
30
+ tilt=d.get("tilt", 0.0),
31
+ focus=d.get("focus", 1.0),
32
+ time_elapsed_ms=d.get("time_elapsed_ms", 0.0),
33
+ )
34
+
35
+
36
+ @dataclass
37
+ class SessionSimulator:
38
+ """
39
+ Simulates player state evolution over a gaming session.
40
+
41
+ Uses Ornstein-Uhlenbeck processes for realistic temporal dynamics:
42
+ - Fatigue: Slowly increases, slowly recovers
43
+ - Tilt: Affected by wins/losses, mean-reverts
44
+ - Focus: Affected by round importance
45
+ """
46
+ # OU process parameters
47
+ fatigue_reversion: float = 0.001 # Slow recovery
48
+ fatigue_drift: float = 0.0001 # Gradual increase
49
+ fatigue_volatility: float = 0.001
50
+
51
+ tilt_reversion: float = 0.01 # Faster emotional recovery
52
+ tilt_volatility: float = 0.01
53
+
54
+ focus_reversion: float = 0.005
55
+ focus_volatility: float = 0.005
56
+
57
+ # Mental resilience affects tilt response (0-100)
58
+ mental_resilience: float = 50.0
59
+
60
+ # Current state
61
+ state: SessionState = field(default_factory=SessionState)
62
+
63
+ # Random generator
64
+ _rng: Optional[np.random.Generator] = field(default=None, repr=False)
65
+
66
+ def __post_init__(self):
67
+ if self._rng is None:
68
+ self._rng = np.random.default_rng()
69
+
70
+ @classmethod
71
+ def from_skill(
72
+ cls,
73
+ mental_resilience: float = 50.0,
74
+ seed: Optional[int] = None,
75
+ ) -> SessionSimulator:
76
+ """Create simulator with skill-based parameters."""
77
+ return cls(
78
+ mental_resilience=mental_resilience,
79
+ _rng=np.random.default_rng(seed),
80
+ )
81
+
82
+ def update(self, event: str, dt_ms: float) -> None:
83
+ """
84
+ Update session state based on game event.
85
+
86
+ Args:
87
+ event: One of "round_win", "round_loss", "kill", "death",
88
+ "clutch_situation", "idle"
89
+ dt_ms: Time elapsed in milliseconds
90
+ """
91
+ self.state.time_elapsed_ms += dt_ms
92
+
93
+ # Fatigue: OU process with drift (slowly increases)
94
+ # dF = θ(0 - F)dt + drift*dt + σ*dW
95
+ fatigue_noise = self._rng.normal(0, self.fatigue_volatility * np.sqrt(dt_ms))
96
+ self.state.fatigue += (
97
+ self.fatigue_reversion * (0 - self.state.fatigue) * dt_ms +
98
+ self.fatigue_drift * dt_ms +
99
+ fatigue_noise
100
+ )
101
+ self.state.fatigue = np.clip(self.state.fatigue, 0.0, 1.0)
102
+
103
+ # Tilt: affected by outcomes
104
+ tilt_impact = 0.0
105
+ if event == "round_loss":
106
+ # More impact if low mental resilience
107
+ tilt_impact = -0.1 * (1.0 - self.mental_resilience / 100.0)
108
+ elif event == "round_win":
109
+ tilt_impact = 0.05
110
+ elif event == "death":
111
+ tilt_impact = -0.02 * (1.0 - self.mental_resilience / 100.0)
112
+ elif event == "kill":
113
+ tilt_impact = 0.01
114
+
115
+ self.state.tilt += tilt_impact
116
+
117
+ # Tilt mean reversion (OU process)
118
+ tilt_noise = self._rng.normal(0, self.tilt_volatility * np.sqrt(dt_ms))
119
+ self.state.tilt += (
120
+ self.tilt_reversion * (0 - self.state.tilt) * dt_ms +
121
+ tilt_noise
122
+ )
123
+ self.state.tilt = np.clip(self.state.tilt, -1.0, 1.0)
124
+
125
+ # Focus: affected by round importance
126
+ if event == "clutch_situation":
127
+ # Focus increases in clutch (if mentally strong)
128
+ focus_boost = 0.2 * (0.5 + self.mental_resilience / 200.0)
129
+ self.state.focus = min(1.0, self.state.focus + focus_boost)
130
+
131
+ # Focus mean reversion
132
+ focus_noise = self._rng.normal(0, self.focus_volatility * np.sqrt(dt_ms))
133
+ self.state.focus += (
134
+ self.focus_reversion * (1.0 - self.state.focus) * dt_ms +
135
+ focus_noise
136
+ )
137
+ self.state.focus = np.clip(self.state.focus, 0.0, 1.0)
138
+
139
+ def get_modifiers(self) -> Dict[str, float]:
140
+ """
141
+ Get performance modifiers based on current state.
142
+
143
+ Returns:
144
+ Dict with multipliers for different stats
145
+ """
146
+ return {
147
+ # Fatigue slows reactions and reduces accuracy
148
+ "reaction_time_mult": 1.0 + self.state.fatigue * 0.2, # Up to 20% slower
149
+ "accuracy_mult": 1.0 - self.state.fatigue * 0.15, # Up to 15% less accurate
150
+
151
+ # Tilt affects consistency
152
+ "consistency_mult": (
153
+ 1.0 - abs(self.state.tilt) * 0.3 if self.state.tilt < 0
154
+ else 1.0 + self.state.tilt * 0.1
155
+ ),
156
+
157
+ # Tilt affects game sense when negative
158
+ "game_sense_mult": 1.0 - max(0, -self.state.tilt) * 0.2,
159
+
160
+ # Focus improves reaction time
161
+ "focus_reaction_mult": 1.0 - (self.state.focus - 0.5) * 0.1,
162
+ }
163
+
164
+ def apply_to_stats(self, base_stats: Dict[str, float]) -> Dict[str, float]:
165
+ """
166
+ Apply session modifiers to base stats.
167
+
168
+ Args:
169
+ base_stats: Dict with keys like "reaction_time", "accuracy", etc.
170
+
171
+ Returns:
172
+ Modified stats dict
173
+ """
174
+ mods = self.get_modifiers()
175
+ modified = base_stats.copy()
176
+
177
+ if "reaction_time" in modified:
178
+ modified["reaction_time"] *= mods["reaction_time_mult"] * mods["focus_reaction_mult"]
179
+
180
+ if "accuracy" in modified:
181
+ modified["accuracy"] *= mods["accuracy_mult"]
182
+
183
+ if "consistency" in modified:
184
+ modified["consistency"] *= mods["consistency_mult"]
185
+
186
+ if "game_sense" in modified:
187
+ modified["game_sense"] *= mods["game_sense_mult"]
188
+
189
+ return modified
190
+
191
+ def reset(self) -> None:
192
+ """Reset session state to fresh."""
193
+ self.state = SessionState()
194
+
195
+
196
+ def simulate_round_sequence(
197
+ n_rounds: int,
198
+ win_probability: float = 0.5,
199
+ seed: Optional[int] = None,
200
+ ) -> list[str]:
201
+ """
202
+ Simulate a sequence of round outcomes.
203
+
204
+ Args:
205
+ n_rounds: Number of rounds
206
+ win_probability: Base probability of winning each round
207
+ seed: Random seed
208
+
209
+ Returns:
210
+ List of events ("round_win" or "round_loss")
211
+ """
212
+ rng = np.random.default_rng(seed)
213
+ events = []
214
+
215
+ for _ in range(n_rounds):
216
+ if rng.random() < win_probability:
217
+ events.append("round_win")
218
+ else:
219
+ events.append("round_loss")
220
+
221
+ return events
222
+
223
+
224
+ def generate_session_trace(
225
+ n_rounds: int = 24,
226
+ mental_resilience: float = 50.0,
227
+ win_probability: float = 0.5,
228
+ round_duration_ms: float = 90000.0, # 1.5 minutes per round
229
+ seed: Optional[int] = None,
230
+ ) -> list[Dict[str, Any]]:
231
+ """
232
+ Generate complete session trace with states at each round.
233
+
234
+ Args:
235
+ n_rounds: Number of rounds
236
+ mental_resilience: Player's mental resilience (0-100)
237
+ win_probability: Base win probability
238
+ round_duration_ms: Average round duration
239
+ seed: Random seed
240
+
241
+ Returns:
242
+ List of dicts with round number, event, state, and modifiers
243
+ """
244
+ rng = np.random.default_rng(seed)
245
+ sim = SessionSimulator.from_skill(mental_resilience, seed)
246
+
247
+ events = simulate_round_sequence(n_rounds, win_probability, seed)
248
+ trace = []
249
+
250
+ for round_num, event in enumerate(events):
251
+ # Add some kills/deaths during round
252
+ n_kills = rng.poisson(1.0)
253
+ n_deaths = rng.poisson(0.5)
254
+
255
+ for _ in range(n_kills):
256
+ sim.update("kill", round_duration_ms / (n_kills + n_deaths + 1))
257
+ for _ in range(n_deaths):
258
+ sim.update("death", round_duration_ms / (n_kills + n_deaths + 1))
259
+
260
+ # Round outcome
261
+ sim.update(event, round_duration_ms / 3)
262
+
263
+ trace.append({
264
+ "round": round_num,
265
+ "event": event,
266
+ "state": sim.state.to_dict(),
267
+ "modifiers": sim.get_modifiers(),
268
+ })
269
+
270
+ return trace
src/manifold/data/trajectories.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from dataclasses import dataclass
4
+ from typing import Optional, Tuple
5
+
6
+
7
+ def minimum_jerk(t: np.ndarray) -> np.ndarray:
8
+ """
9
+ Minimum jerk trajectory (5th order polynomial).
10
+
11
+ This is the optimal trajectory that minimizes jerk (rate of change of acceleration).
12
+ Human movements closely follow this profile.
13
+
14
+ Args:
15
+ t: Normalized time array [0, 1]
16
+
17
+ Returns:
18
+ Normalized position [0, 1]
19
+ """
20
+ # 5th order polynomial: 10t³ - 15t⁴ + 6t⁵
21
+ return 10.0 * t**3 - 15.0 * t**4 + 6.0 * t**5
22
+
23
+
24
+ def signal_dependent_noise(
25
+ velocity: np.ndarray,
26
+ k: float = 0.1,
27
+ rng: Optional[np.random.Generator] = None,
28
+ ) -> np.ndarray:
29
+ """
30
+ Signal-dependent noise following Harris-Wolpert model.
31
+
32
+ Noise variance scales with signal magnitude (Weber's law).
33
+ This is a fundamental property of human motor control.
34
+
35
+ Args:
36
+ velocity: Velocity array
37
+ k: Noise coefficient (higher = noisier)
38
+ rng: Random generator
39
+
40
+ Returns:
41
+ Noise array with signal-dependent variance
42
+ """
43
+ if rng is None:
44
+ rng = np.random.default_rng()
45
+
46
+ std = k * np.abs(velocity)
47
+ return rng.normal(0, std + 1e-8) # Small epsilon for stability
48
+
49
+
50
+ def generate_micro_corrections(
51
+ n_ticks: int,
52
+ frequency: float = 5.0,
53
+ amplitude: float = 0.02,
54
+ rng: Optional[np.random.Generator] = None,
55
+ ) -> np.ndarray:
56
+ """
57
+ Generate micro-corrections that humans make during aiming.
58
+
59
+ These are small corrective movements with quasi-periodic timing.
60
+
61
+ Args:
62
+ n_ticks: Number of time ticks
63
+ frequency: Average number of corrections per trajectory
64
+ amplitude: Maximum correction amplitude (degrees)
65
+ rng: Random generator
66
+
67
+ Returns:
68
+ Array of shape [n_ticks, 2] with x,y corrections
69
+ """
70
+ if rng is None:
71
+ rng = np.random.default_rng()
72
+
73
+ corrections = np.zeros((n_ticks, 2))
74
+
75
+ if frequency <= 0 or n_ticks < 2:
76
+ return corrections
77
+
78
+ # Generate correction times using exponential intervals
79
+ mean_interval = n_ticks / frequency
80
+ current_tick = 0
81
+
82
+ while current_tick < n_ticks:
83
+ interval = int(rng.exponential(mean_interval))
84
+ current_tick += max(1, interval)
85
+
86
+ if current_tick < n_ticks:
87
+ # Small correction impulse
88
+ amp = rng.exponential(amplitude)
89
+ direction = rng.uniform(0, 2 * np.pi)
90
+ corrections[current_tick, 0] = amp * np.cos(direction)
91
+ corrections[current_tick, 1] = amp * np.sin(direction)
92
+
93
+ # Smooth corrections (they're not instant) using simple moving average
94
+ kernel_size = min(5, n_ticks // 2)
95
+ if kernel_size > 1:
96
+ kernel = np.ones(kernel_size) / kernel_size
97
+ for i in range(2):
98
+ corrections[:, i] = np.convolve(corrections[:, i], kernel, mode='same')
99
+
100
+ return corrections
101
+
102
+
103
+ def generate_human_trajectory(
104
+ start_angle: np.ndarray,
105
+ target_angle: np.ndarray,
106
+ skill_aim: float,
107
+ skill_consistency: float,
108
+ duration_ms: float,
109
+ tick_rate: int = 128,
110
+ rng: Optional[np.random.Generator] = None,
111
+ ) -> np.ndarray:
112
+ """
113
+ Generate realistic human mouse trajectory using minimum jerk principle
114
+ with signal-dependent noise and micro-corrections.
115
+
116
+ Args:
117
+ start_angle: Starting crosshair angle [x, y] in degrees
118
+ target_angle: Target angle [x, y] in degrees
119
+ skill_aim: Aim skill (0-100)
120
+ skill_consistency: Consistency skill (0-100)
121
+ duration_ms: Movement duration in milliseconds
122
+ tick_rate: Ticks per second (CS2 uses 128)
123
+ rng: Random generator
124
+
125
+ Returns:
126
+ Array of shape [n_ticks-1, 2] with delta (dx, dy) per tick
127
+ """
128
+ if rng is None:
129
+ rng = np.random.default_rng()
130
+
131
+ start_angle = np.asarray(start_angle, dtype=np.float64)
132
+ target_angle = np.asarray(target_angle, dtype=np.float64)
133
+
134
+ n_ticks = max(2, int(duration_ms * tick_rate / 1000))
135
+ t = np.linspace(0, 1, n_ticks)
136
+
137
+ # Minimum jerk profile
138
+ s = minimum_jerk(t)
139
+
140
+ # Planned trajectory
141
+ displacement = target_angle - start_angle
142
+ planned = start_angle[:, None] + displacement[:, None] * s # [2, n_ticks]
143
+
144
+ # Velocity for signal-dependent noise (derivative of position)
145
+ velocity = np.gradient(s) * np.linalg.norm(displacement)
146
+
147
+ # Signal-dependent noise (Harris-Wolpert model)
148
+ # Noise scale inversely proportional to skill
149
+ noise_scale = (1.0 - skill_aim / 150.0) * 0.1 # 0.033 - 0.1 degrees
150
+ noise_x = signal_dependent_noise(velocity, noise_scale, rng)
151
+ noise_y = signal_dependent_noise(velocity, noise_scale, rng)
152
+ noise = np.stack([noise_x, noise_y], axis=0) # [2, n_ticks]
153
+
154
+ # Micro-corrections (human feedback control)
155
+ correction_frequency = 3.0 + skill_consistency / 20.0 # 3-8 corrections
156
+ correction_amplitude = 0.02 * (1.0 - skill_aim / 100.0) # Less corrections for skilled
157
+ corrections = generate_micro_corrections(n_ticks, correction_frequency, correction_amplitude, rng)
158
+
159
+ # Combine: planned + noise + corrections
160
+ actual = planned + noise + corrections.T
161
+
162
+ # Convert to deltas
163
+ deltas = np.diff(actual, axis=1).T # [n_ticks-1, 2]
164
+
165
+ return deltas
166
+
167
+
168
+ def generate_aimbot_trajectory(
169
+ natural_trajectory: np.ndarray,
170
+ target_positions: np.ndarray,
171
+ intensity: float,
172
+ humanization: dict,
173
+ rng: Optional[np.random.Generator] = None,
174
+ ) -> np.ndarray:
175
+ """
176
+ Modify natural trajectory with aimbot assistance.
177
+
178
+ Even with humanization, leaves detectable artifacts:
179
+ - Too smooth (lacks human micro-corrections)
180
+ - Noise is too uniform
181
+ - Delay is too consistent
182
+
183
+ Args:
184
+ natural_trajectory: Human trajectory [n_ticks, 2]
185
+ target_positions: Target angle at each tick [n_ticks, 2]
186
+ intensity: Cheat intensity (0-1)
187
+ humanization: Dict with aim_smoothing, noise_amplitude, fov_degrees
188
+ rng: Random generator
189
+
190
+ Returns:
191
+ Modified trajectory [n_ticks, 2]
192
+ """
193
+ if rng is None:
194
+ rng = np.random.default_rng()
195
+
196
+ modified = natural_trajectory.copy()
197
+ n_ticks = len(modified)
198
+
199
+ # Cumulative positions from deltas
200
+ positions = np.zeros((n_ticks + 1, 2))
201
+ positions[1:] = np.cumsum(modified, axis=0)
202
+
203
+ fov_limit = humanization.get("fov_degrees", 90.0)
204
+ smoothing = humanization.get("aim_smoothing", 0.5)
205
+ noise_amp = humanization.get("noise_amplitude", 0.0)
206
+
207
+ for i in range(n_ticks):
208
+ current_pos = positions[i]
209
+ target_pos = target_positions[i]
210
+ correction_needed = target_pos - current_pos
211
+ distance = np.linalg.norm(correction_needed)
212
+
213
+ if distance < fov_limit:
214
+ # Apply aimbot correction
215
+ if smoothing > 0:
216
+ # Humanized: exponential approach (ARTIFACT: too smooth)
217
+ correction = correction_needed * (1.0 - smoothing) * intensity
218
+ else:
219
+ # Blatant: instant (ARTIFACT: infinite jerk)
220
+ correction = correction_needed * intensity
221
+
222
+ # Add humanization noise (ARTIFACT: noise is too uniform)
223
+ if noise_amp > 0:
224
+ correction += rng.normal(0, noise_amp, 2)
225
+
226
+ modified[i] += correction
227
+
228
+ # Update subsequent positions
229
+ positions[i+1:] += correction
230
+
231
+ return modified
232
+
233
+
234
+ def fitts_law_time(
235
+ distance: float,
236
+ target_width: float,
237
+ a: float = 0.0,
238
+ b: float = 0.1,
239
+ ) -> float:
240
+ """
241
+ Calculate movement time using Fitts' Law.
242
+
243
+ MT = a + b * log2(2D/W)
244
+
245
+ Args:
246
+ distance: Distance to target
247
+ target_width: Width of target
248
+ a: Intercept (reaction time offset)
249
+ b: Slope (movement time per bit)
250
+
251
+ Returns:
252
+ Movement time in seconds
253
+ """
254
+ if target_width <= 0:
255
+ target_width = 1e-6
256
+
257
+ index_of_difficulty = np.log2(2.0 * distance / target_width + 1.0)
258
+ return a + b * index_of_difficulty
259
+
260
+
261
+ def extract_trajectory_features(trajectory: np.ndarray) -> dict:
262
+ """
263
+ Extract features from trajectory for cheat detection.
264
+
265
+ Args:
266
+ trajectory: Delta trajectory [n_ticks, 2]
267
+
268
+ Returns:
269
+ Dict of extracted features
270
+ """
271
+ if len(trajectory) < 3:
272
+ return {
273
+ "max_jerk": 0.0,
274
+ "mean_jerk": 0.0,
275
+ "jerk_variance": 0.0,
276
+ "path_efficiency": 1.0,
277
+ "velocity_peak_timing": 0.5,
278
+ }
279
+
280
+ # Compute velocity and acceleration
281
+ velocity = np.linalg.norm(trajectory, axis=1)
282
+ acceleration = np.diff(velocity)
283
+ jerk = np.diff(acceleration) if len(acceleration) > 1 else np.array([0.0])
284
+
285
+ # Jerk statistics
286
+ max_jerk = float(np.max(np.abs(jerk))) if len(jerk) > 0 else 0.0
287
+ mean_jerk = float(np.mean(np.abs(jerk))) if len(jerk) > 0 else 0.0
288
+ jerk_variance = float(np.var(jerk)) if len(jerk) > 0 else 0.0
289
+
290
+ # Path efficiency (actual distance / direct distance)
291
+ total_distance = np.sum(velocity)
292
+ direct_distance = np.linalg.norm(np.sum(trajectory, axis=0))
293
+ path_efficiency = direct_distance / (total_distance + 1e-8)
294
+
295
+ # Velocity peak timing (should be ~0.5 for bell-shaped profile)
296
+ if len(velocity) > 0:
297
+ peak_idx = np.argmax(velocity)
298
+ velocity_peak_timing = peak_idx / len(velocity)
299
+ else:
300
+ velocity_peak_timing = 0.5
301
+
302
+ return {
303
+ "max_jerk": max_jerk,
304
+ "mean_jerk": mean_jerk,
305
+ "jerk_variance": jerk_variance,
306
+ "path_efficiency": path_efficiency,
307
+ "velocity_peak_timing": velocity_peak_timing,
308
+ }
src/manifold/evaluation/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from manifold.evaluation.metrics import CheatDetectionMetrics, compute_ece
2
+ from manifold.evaluation.analysis import (
3
+ PredictionAnalysis,
4
+ analyze_predictions,
5
+ compute_feature_importance,
6
+ format_analysis_report,
7
+ )
8
+
9
+ __all__ = [
10
+ "CheatDetectionMetrics",
11
+ "compute_ece",
12
+ "PredictionAnalysis",
13
+ "analyze_predictions",
14
+ "compute_feature_importance",
15
+ "format_analysis_report",
16
+ ]
src/manifold/evaluation/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (489 Bytes). View file
 
src/manifold/evaluation/__pycache__/analysis.cpython-312.pyc ADDED
Binary file (5.93 kB). View file
 
src/manifold/evaluation/__pycache__/metrics.cpython-312.pyc ADDED
Binary file (7.04 kB). View file
 
src/manifold/evaluation/analysis.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from typing import Optional, Dict, Any, List
4
+ from dataclasses import dataclass
5
+
6
+
7
+ @dataclass
8
+ class PredictionAnalysis:
9
+ """Analysis results for a set of predictions."""
10
+ total_samples: int
11
+ correct: int
12
+ incorrect: int
13
+ accuracy: float
14
+
15
+ by_class: Dict[str, Dict[str, int]] # {class: {correct, incorrect, total}}
16
+ by_uncertainty: Dict[str, Dict[str, float]] # {low/medium/high: {accuracy, count}}
17
+
18
+ high_confidence_errors: List[Dict[str, Any]] # Samples with high confidence but wrong
19
+ low_confidence_correct: List[Dict[str, Any]] # Samples with low confidence but correct
20
+
21
+
22
+ def analyze_predictions(
23
+ predictions: np.ndarray,
24
+ labels: np.ndarray,
25
+ probabilities: Optional[np.ndarray] = None,
26
+ uncertainties: Optional[np.ndarray] = None,
27
+ ) -> PredictionAnalysis:
28
+ """Detailed analysis of model predictions."""
29
+ correct_mask = predictions == labels
30
+
31
+ # Basic stats
32
+ total = len(predictions)
33
+ correct = correct_mask.sum()
34
+ incorrect = total - correct
35
+ accuracy = correct / total if total > 0 else 0
36
+
37
+ # By class
38
+ by_class = {}
39
+ for c in [0, 1, 2]:
40
+ class_mask = labels == c
41
+ class_name = ["clean", "suspicious", "cheating"][c]
42
+ by_class[class_name] = {
43
+ "correct": int((correct_mask & class_mask).sum()),
44
+ "incorrect": int((~correct_mask & class_mask).sum()),
45
+ "total": int(class_mask.sum()),
46
+ }
47
+
48
+ # By uncertainty (if available)
49
+ by_uncertainty = {}
50
+ if uncertainties is not None:
51
+ low_mask = uncertainties < 0.3
52
+ med_mask = (uncertainties >= 0.3) & (uncertainties < 0.7)
53
+ high_mask = uncertainties >= 0.7
54
+
55
+ for name, mask in [("low", low_mask), ("medium", med_mask), ("high", high_mask)]:
56
+ if mask.sum() > 0:
57
+ by_uncertainty[name] = {
58
+ "accuracy": float(correct_mask[mask].mean()),
59
+ "count": int(mask.sum()),
60
+ }
61
+
62
+ # Error analysis
63
+ high_conf_errors = []
64
+ low_conf_correct = []
65
+
66
+ if probabilities is not None:
67
+ confidences = probabilities.max(axis=1)
68
+
69
+ # High confidence errors
70
+ high_conf_wrong = (~correct_mask) & (confidences > 0.9)
71
+ for idx in np.where(high_conf_wrong)[0][:10]: # Top 10
72
+ high_conf_errors.append({
73
+ "idx": int(idx),
74
+ "predicted": int(predictions[idx]),
75
+ "actual": int(labels[idx]),
76
+ "confidence": float(confidences[idx]),
77
+ })
78
+
79
+ # Low confidence correct
80
+ low_conf_right = correct_mask & (confidences < 0.5)
81
+ for idx in np.where(low_conf_right)[0][:10]:
82
+ low_conf_correct.append({
83
+ "idx": int(idx),
84
+ "predicted": int(predictions[idx]),
85
+ "actual": int(labels[idx]),
86
+ "confidence": float(confidences[idx]),
87
+ })
88
+
89
+ return PredictionAnalysis(
90
+ total_samples=total,
91
+ correct=int(correct),
92
+ incorrect=int(incorrect),
93
+ accuracy=accuracy,
94
+ by_class=by_class,
95
+ by_uncertainty=by_uncertainty,
96
+ high_confidence_errors=high_conf_errors,
97
+ low_confidence_correct=low_conf_correct,
98
+ )
99
+
100
+
101
+ def compute_feature_importance(
102
+ model_outputs: Dict[str, np.ndarray],
103
+ method: str = "gradient",
104
+ ) -> Dict[str, float]:
105
+ """Compute feature importance from model outputs."""
106
+ # Placeholder - actual implementation would use gradients
107
+ return {"placeholder": 1.0}
108
+
109
+
110
+ def format_analysis_report(analysis: PredictionAnalysis) -> str:
111
+ """Format analysis as readable report string."""
112
+ lines = [
113
+ "=" * 50,
114
+ "PREDICTION ANALYSIS REPORT",
115
+ "=" * 50,
116
+ f"Total Samples: {analysis.total_samples}",
117
+ f"Accuracy: {analysis.accuracy:.4f} ({analysis.correct}/{analysis.total_samples})",
118
+ "",
119
+ "By Class:",
120
+ ]
121
+
122
+ for cls, stats in analysis.by_class.items():
123
+ acc = stats["correct"] / stats["total"] if stats["total"] > 0 else 0
124
+ lines.append(f" {cls}: {acc:.4f} ({stats['correct']}/{stats['total']})")
125
+
126
+ if analysis.by_uncertainty:
127
+ lines.append("")
128
+ lines.append("By Uncertainty:")
129
+ for level, stats in analysis.by_uncertainty.items():
130
+ lines.append(f" {level}: acc={stats['accuracy']:.4f}, n={stats['count']}")
131
+
132
+ if analysis.high_confidence_errors:
133
+ lines.append("")
134
+ lines.append(f"High Confidence Errors ({len(analysis.high_confidence_errors)}):")
135
+ for err in analysis.high_confidence_errors[:3]:
136
+ lines.append(f" idx={err['idx']}: pred={err['predicted']}, "
137
+ f"actual={err['actual']}, conf={err['confidence']:.3f}")
138
+
139
+ lines.append("=" * 50)
140
+ return "\n".join(lines)
src/manifold/evaluation/metrics.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Detection metrics for cheat evaluation."""
2
+ from __future__ import annotations
3
+
4
+ import numpy as np
5
+ from sklearn.metrics import (
6
+ accuracy_score,
7
+ precision_score,
8
+ recall_score,
9
+ f1_score,
10
+ roc_auc_score,
11
+ precision_recall_curve,
12
+ auc,
13
+ confusion_matrix,
14
+ )
15
+ from typing import Optional, Dict, Any, List
16
+ from dataclasses import dataclass, field
17
+
18
+
19
+ @dataclass
20
+ class CheatDetectionMetrics:
21
+ """Accumulator for cheat detection evaluation metrics."""
22
+
23
+ predictions: List[int] = field(default_factory=list)
24
+ labels: List[int] = field(default_factory=list)
25
+ probabilities: List[np.ndarray] = field(default_factory=list)
26
+ uncertainties: List[float] = field(default_factory=list)
27
+ cheat_types: List[str] = field(default_factory=list)
28
+
29
+ def update(
30
+ self,
31
+ preds: np.ndarray,
32
+ labels: np.ndarray,
33
+ probs: Optional[np.ndarray] = None,
34
+ uncertainties: Optional[np.ndarray] = None,
35
+ cheat_types: Optional[List[str]] = None,
36
+ ) -> None:
37
+ """Add batch of predictions to metrics."""
38
+ self.predictions.extend(preds.tolist())
39
+ self.labels.extend(labels.tolist())
40
+ if probs is not None:
41
+ self.probabilities.extend(probs.tolist())
42
+ if uncertainties is not None:
43
+ self.uncertainties.extend(uncertainties.tolist())
44
+ if cheat_types is not None:
45
+ self.cheat_types.extend(cheat_types)
46
+
47
+ def reset(self) -> None:
48
+ """Clear all accumulated data."""
49
+ self.predictions.clear()
50
+ self.labels.clear()
51
+ self.probabilities.clear()
52
+ self.uncertainties.clear()
53
+ self.cheat_types.clear()
54
+
55
+ def compute(self) -> Dict[str, float]:
56
+ """Compute all metrics."""
57
+ preds = np.array(self.predictions)
58
+ labels = np.array(self.labels)
59
+
60
+ # Convert to binary (0=clean, 1/2=cheater)
61
+ binary_preds = (preds > 0).astype(int)
62
+ binary_labels = (labels > 0).astype(int)
63
+
64
+ metrics = {
65
+ "accuracy": accuracy_score(binary_labels, binary_preds),
66
+ "precision": precision_score(binary_labels, binary_preds, zero_division=0),
67
+ "recall": recall_score(binary_labels, binary_preds, zero_division=0),
68
+ "f1": f1_score(binary_labels, binary_preds, zero_division=0),
69
+ }
70
+
71
+ # AUC-ROC if probabilities available
72
+ if self.probabilities:
73
+ probs = np.array(self.probabilities)
74
+ cheat_prob = (
75
+ probs[:, 1] + probs[:, 2] if probs.shape[1] == 3 else probs[:, 1]
76
+ )
77
+ try:
78
+ metrics["auc_roc"] = roc_auc_score(binary_labels, cheat_prob)
79
+ except ValueError:
80
+ metrics["auc_roc"] = 0.0
81
+
82
+ # AUC-PR
83
+ precision_arr, recall_arr, _ = precision_recall_curve(
84
+ binary_labels, cheat_prob
85
+ )
86
+ metrics["auc_pr"] = auc(recall_arr, precision_arr)
87
+
88
+ # Confusion matrix
89
+ cm = confusion_matrix(binary_labels, binary_preds)
90
+ if cm.shape == (2, 2):
91
+ tn, fp, fn, tp = cm.ravel()
92
+ metrics["true_positives"] = int(tp)
93
+ metrics["true_negatives"] = int(tn)
94
+ metrics["false_positives"] = int(fp)
95
+ metrics["false_negatives"] = int(fn)
96
+ metrics["false_positive_rate"] = fp / (fp + tn) if (fp + tn) > 0 else 0
97
+
98
+ return metrics
99
+
100
+ def compute_by_cheat_type(self) -> Dict[str, Dict[str, float]]:
101
+ """Compute recall by cheat type."""
102
+ if not self.cheat_types:
103
+ return {}
104
+
105
+ results = {}
106
+ types = set(self.cheat_types)
107
+
108
+ for ctype in types:
109
+ if ctype == "none":
110
+ continue
111
+ mask = [t == ctype for t in self.cheat_types]
112
+ type_labels = np.array(self.labels)[mask]
113
+ type_preds = np.array(self.predictions)[mask]
114
+
115
+ if len(type_labels) > 0:
116
+ binary_preds = (type_preds > 0).astype(int)
117
+ binary_labels = (type_labels > 0).astype(int)
118
+ results[ctype] = {
119
+ "recall": recall_score(binary_labels, binary_preds, zero_division=0),
120
+ "count": len(type_labels),
121
+ }
122
+
123
+ return results
124
+
125
+
126
+ def compute_ece(probs: np.ndarray, labels: np.ndarray, n_bins: int = 10) -> float:
127
+ """Expected Calibration Error."""
128
+ bin_boundaries = np.linspace(0, 1, n_bins + 1)
129
+ confidences = probs.max(axis=1)
130
+ predictions = probs.argmax(axis=1)
131
+ accuracies = (predictions == labels).astype(float)
132
+
133
+ ece = 0.0
134
+ for i in range(n_bins):
135
+ mask = (confidences > bin_boundaries[i]) & (confidences <= bin_boundaries[i + 1])
136
+ if mask.sum() > 0:
137
+ bin_acc = accuracies[mask].mean()
138
+ bin_conf = confidences[mask].mean()
139
+ ece += mask.sum() * abs(bin_acc - bin_conf)
140
+
141
+ return ece / len(labels)
src/manifold/models/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """MANIFOLD models."""
2
+
3
+ from manifold.models.manifold_lite import MANIFOLDLite
4
+
5
+ __all__ = ["MANIFOLDLite"]
src/manifold/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (283 Bytes). View file
 
src/manifold/models/__pycache__/manifold_lite.cpython-312.pyc ADDED
Binary file (10.3 kB). View file
 
src/manifold/models/components/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from manifold.models.components.cca import (
2
+ CounterfactualProbe,
3
+ CausalCounterfactualAttention,
4
+ )
5
+ from manifold.models.components.hse import (
6
+ HyperbolicSkillEmbedding,
7
+ initialize_skill_anchors,
8
+ )
9
+ from manifold.models.components.ihe import (
10
+ IHEBlock,
11
+ InformationHorizonEncoder,
12
+ )
13
+ from manifold.models.components.mdm import (
14
+ MotorDynamicsModule,
15
+ NeuralODEFunc,
16
+ fixed_euler_solve,
17
+ )
18
+ from manifold.models.components.mpl import (
19
+ MPLEncoder,
20
+ MPLDecoder,
21
+ ManifoldProjectionLayer,
22
+ )
23
+ from manifold.models.components.tiv import (
24
+ DomainClassifier,
25
+ GradientReversalFunction,
26
+ GradientReversalLayer,
27
+ TemporalInvariantVerifier,
28
+ )
29
+ from manifold.models.components.verdict import (
30
+ EvidentialHead,
31
+ SequencePooling,
32
+ compute_uncertainty,
33
+ dirichlet_strength,
34
+ )
35
+
36
+ __all__ = [
37
+ "CausalCounterfactualAttention",
38
+ "CounterfactualProbe",
39
+ "DomainClassifier",
40
+ "EvidentialHead",
41
+ "GradientReversalFunction",
42
+ "GradientReversalLayer",
43
+ "HyperbolicSkillEmbedding",
44
+ "IHEBlock",
45
+ "InformationHorizonEncoder",
46
+ "ManifoldProjectionLayer",
47
+ "MotorDynamicsModule",
48
+ "MPLDecoder",
49
+ "MPLEncoder",
50
+ "NeuralODEFunc",
51
+ "SequencePooling",
52
+ "TemporalInvariantVerifier",
53
+ "compute_uncertainty",
54
+ "dirichlet_strength",
55
+ "fixed_euler_solve",
56
+ "initialize_skill_anchors",
57
+ ]
src/manifold/models/components/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (1.24 kB). View file
 
src/manifold/models/components/__pycache__/cca.cpython-312.pyc ADDED
Binary file (5.99 kB). View file
 
src/manifold/models/components/__pycache__/hse.cpython-312.pyc ADDED
Binary file (5.59 kB). View file
 
src/manifold/models/components/__pycache__/ihe.cpython-312.pyc ADDED
Binary file (5.99 kB). View file
 
src/manifold/models/components/__pycache__/mdm.cpython-312.pyc ADDED
Binary file (7.18 kB). View file
 
src/manifold/models/components/__pycache__/mpl.cpython-312.pyc ADDED
Binary file (9.06 kB). View file
 
src/manifold/models/components/__pycache__/tiv.cpython-312.pyc ADDED
Binary file (7.47 kB). View file
 
src/manifold/models/components/__pycache__/verdict.cpython-312.pyc ADDED
Binary file (9.13 kB). View file
 
src/manifold/models/components/cca.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Causal Counterfactual Attention (CCA) for MANIFOLD."""
2
+
3
+ from __future__ import annotations
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from typing import Optional, Dict, Any
8
+ from manifold.models.layers.attention import MultiHeadLinearAttention
9
+
10
+
11
+ class CounterfactualProbe(nn.Module):
12
+ """
13
+ Learnable query vectors for counterfactual reasoning.
14
+
15
+ These probes ask "what if" questions about the input sequence.
16
+ """
17
+
18
+ def __init__(self, embed_dim: int = 256, num_probes: int = 16):
19
+ super().__init__()
20
+ self.embed_dim = embed_dim
21
+ self.num_probes = num_probes
22
+
23
+ # Learnable probe vectors - "what if" questions
24
+ self.probes = nn.Parameter(torch.randn(num_probes, embed_dim) * 0.02)
25
+
26
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
27
+ """
28
+ Compute attention between probes and sequence.
29
+
30
+ Args:
31
+ x: Input [batch, seq, embed_dim]
32
+
33
+ Returns:
34
+ Probe outputs [batch, num_probes, embed_dim]
35
+ """
36
+ batch, seq, dim = x.shape
37
+
38
+ # Probes as queries: [num_probes, embed_dim] -> [batch, num_probes, embed_dim]
39
+ q = self.probes.unsqueeze(0).expand(batch, -1, -1)
40
+
41
+ # x as keys and values: [batch, seq, embed_dim]
42
+ k = x
43
+ v = x
44
+
45
+ # Scaled dot-product attention (sparse: only num_probes queries)
46
+ # Attention weights: [batch, num_probes, seq]
47
+ scale = dim ** -0.5
48
+ attn = torch.bmm(q, k.transpose(1, 2)) * scale
49
+ attn = F.softmax(attn, dim=-1)
50
+
51
+ # Weighted sum of values: [batch, num_probes, embed_dim]
52
+ output = torch.bmm(attn, v)
53
+
54
+ return output
55
+
56
+
57
+ class CausalCounterfactualAttention(nn.Module):
58
+ """
59
+ Dual-path attention: factual (standard) + counterfactual (sparse probes).
60
+
61
+ Factual path: Linear attention O(T) on actual sequence
62
+ Counterfactual path: 16 sparse probes asking "what if" questions
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ embed_dim: int = 256,
68
+ num_cf_probes: int = 16,
69
+ num_heads: int = 8,
70
+ dropout: float = 0.1,
71
+ ):
72
+ super().__init__()
73
+ self.embed_dim = embed_dim
74
+ self.num_cf_probes = num_cf_probes
75
+
76
+ # Factual path: causal linear attention O(T)
77
+ self.factual_attention = MultiHeadLinearAttention(
78
+ embed_dim=embed_dim,
79
+ num_heads=num_heads,
80
+ dropout=dropout,
81
+ causal=True,
82
+ use_rotary=True,
83
+ )
84
+
85
+ # Counterfactual path: sparse probes
86
+ self.cf_probes = CounterfactualProbe(
87
+ embed_dim=embed_dim,
88
+ num_probes=num_cf_probes,
89
+ )
90
+
91
+ # Project counterfactual probe outputs to sequence contribution
92
+ self.cf_proj = nn.Linear(embed_dim, embed_dim)
93
+
94
+ # Learnable weights to broadcast cf probes to sequence positions
95
+ # Maps [batch, num_probes, embed_dim] -> contribution at each position
96
+ self.cf_to_seq = nn.Linear(num_cf_probes, 1)
97
+
98
+ # Combine factual + counterfactual
99
+ self.combine = nn.Linear(embed_dim * 2, embed_dim)
100
+
101
+ # Layer normalization
102
+ self.norm = nn.LayerNorm(embed_dim)
103
+
104
+ self.dropout = nn.Dropout(dropout)
105
+
106
+ def forward(
107
+ self,
108
+ x: torch.Tensor,
109
+ mask: Optional[torch.Tensor] = None,
110
+ ) -> Dict[str, torch.Tensor]:
111
+ """
112
+ Args:
113
+ x: Input [batch, seq, embed_dim]
114
+
115
+ Returns:
116
+ Dict with:
117
+ - "output": combined output [batch, seq, embed_dim]
118
+ - "factual": factual attention output
119
+ - "counterfactual": counterfactual probe outputs [batch, num_probes, embed_dim]
120
+ """
121
+ batch, seq, _ = x.shape
122
+
123
+ # Factual path: linear attention on sequence
124
+ factual_out = self.factual_attention(x, mask=mask)["output"]
125
+
126
+ # Counterfactual path: probe attention
127
+ cf_out = self.cf_probes(x) # [batch, num_probes, embed_dim]
128
+ cf_projected = self.cf_proj(cf_out) # [batch, num_probes, embed_dim]
129
+
130
+ # Broadcast counterfactual to sequence length
131
+ # [batch, num_probes, embed_dim] -> [batch, seq, embed_dim]
132
+ # Transpose for linear: [batch, embed_dim, num_probes]
133
+ cf_transposed = cf_projected.transpose(1, 2)
134
+ # Apply linear to last dim: [batch, embed_dim, 1]
135
+ cf_seq = self.cf_to_seq(cf_transposed)
136
+ # Squeeze and expand: [batch, embed_dim] -> [batch, seq, embed_dim]
137
+ cf_contribution = cf_seq.squeeze(-1).unsqueeze(1).expand(-1, seq, -1)
138
+
139
+ # Combine: concatenate factual and counterfactual contributions
140
+ combined = torch.cat([factual_out, cf_contribution], dim=-1)
141
+ output = self.combine(combined)
142
+ output = self.dropout(output)
143
+
144
+ # Normalize
145
+ output = self.norm(output)
146
+
147
+ return {
148
+ "output": output,
149
+ "factual": factual_out,
150
+ "counterfactual": cf_out,
151
+ }
152
+
153
+ @classmethod
154
+ def from_config(cls, config) -> "CausalCounterfactualAttention":
155
+ """Create from ModelConfig."""
156
+ return cls(
157
+ embed_dim=config.embed_dim,
158
+ num_cf_probes=config.num_cf_probes,
159
+ num_heads=config.cca_heads,
160
+ dropout=config.dropout,
161
+ )
src/manifold/models/components/hse.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Hyperbolic Skill Embedding (HSE) for player skill hierarchy representation."""
2
+
3
+ from __future__ import annotations
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from typing import Optional, Dict, Any
8
+
9
+ from manifold.models.layers.hyperbolic import (
10
+ TangentSpaceProjection,
11
+ HyperbolicDistanceLayer,
12
+ hyperbolic_distance_tangent,
13
+ expmap0,
14
+ )
15
+
16
+
17
+ def initialize_skill_anchors(num_levels: int = 7, dim: int = 32) -> torch.Tensor:
18
+ """
19
+ Initialize skill level anchors in hyperbolic space.
20
+
21
+ Higher skill = further from origin (hierarchy property).
22
+
23
+ Args:
24
+ num_levels: Number of skill levels (default 7: silver to pro)
25
+ dim: Dimension of the embedding space
26
+
27
+ Returns:
28
+ Tensor of shape [num_levels, dim] with anchors at increasing distances
29
+ """
30
+ distances = torch.linspace(0.1, 0.8, num_levels)
31
+ directions = F.normalize(torch.randn(num_levels, dim), dim=-1)
32
+ return directions * distances.unsqueeze(-1)
33
+
34
+
35
+ class HyperbolicSkillEmbedding(nn.Module):
36
+ """
37
+ Embed player behavior in hyperbolic space with skill hierarchy.
38
+
39
+ Hyperbolic geometry naturally represents hierarchies:
40
+ - Lower skill players cluster near origin
41
+ - Higher skill players further out
42
+ - Cheaters may appear "impossible" - high skill indicators
43
+ but behavioral patterns inconsistent with skill level
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ input_dim: int = 256,
49
+ manifold_dim: int = 32,
50
+ num_skill_levels: int = 7,
51
+ init_curvature: float = 1.0,
52
+ ):
53
+ """
54
+ Initialize HyperbolicSkillEmbedding.
55
+
56
+ Args:
57
+ input_dim: Dimension of input features
58
+ manifold_dim: Dimension of hyperbolic manifold embedding
59
+ num_skill_levels: Number of skill level anchors
60
+ init_curvature: Initial curvature value (learnable)
61
+ """
62
+ super().__init__()
63
+ self.input_dim = input_dim
64
+ self.manifold_dim = manifold_dim
65
+ self.num_skill_levels = num_skill_levels
66
+
67
+ self.projection = TangentSpaceProjection(
68
+ input_dim=input_dim,
69
+ output_dim=manifold_dim,
70
+ curvature=init_curvature,
71
+ )
72
+
73
+ self.curvature = nn.Parameter(torch.tensor(init_curvature))
74
+
75
+ self.skill_anchors = nn.Parameter(
76
+ initialize_skill_anchors(num_skill_levels, manifold_dim)
77
+ )
78
+
79
+ self.distance_layer = HyperbolicDistanceLayer(
80
+ dim=manifold_dim,
81
+ num_anchors=num_skill_levels,
82
+ curvature=init_curvature,
83
+ use_tangent_approx=True,
84
+ )
85
+ self.distance_layer.anchors = self.skill_anchors
86
+
87
+ def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
88
+ """
89
+ Forward pass through hyperbolic skill embedding.
90
+
91
+ Args:
92
+ x: Input features [batch, seq, input_dim]
93
+
94
+ Returns:
95
+ Dict with:
96
+ - "embedding": hyperbolic embedding [batch, seq, manifold_dim]
97
+ - "skill_distances": distances to skill anchors [batch, seq, num_skill_levels]
98
+ - "predicted_skill": softmax over distances (closest anchor)
99
+ - "curvature": current learned curvature value
100
+ """
101
+ proj_out = self.projection(x)
102
+ tangent = proj_out["tangent"]
103
+
104
+ tangent_expanded = tangent.unsqueeze(-2)
105
+
106
+ skill_distances = hyperbolic_distance_tangent(
107
+ tangent_expanded,
108
+ self.skill_anchors,
109
+ c=self.curvature.abs().clamp(min=0.1),
110
+ )
111
+
112
+ predicted_skill = F.softmax(-skill_distances, dim=-1)
113
+
114
+ return {
115
+ "embedding": tangent,
116
+ "skill_distances": skill_distances,
117
+ "predicted_skill": predicted_skill,
118
+ "curvature": self.curvature,
119
+ }
120
+
121
+ @classmethod
122
+ def from_config(cls, config: Any) -> "HyperbolicSkillEmbedding":
123
+ """
124
+ Create HyperbolicSkillEmbedding from ModelConfig.
125
+
126
+ Args:
127
+ config: ModelConfig with HSE parameters
128
+
129
+ Returns:
130
+ Configured HyperbolicSkillEmbedding instance
131
+ """
132
+ return cls(
133
+ input_dim=getattr(config, "embed_dim", 256),
134
+ manifold_dim=getattr(config, "manifold_dim", 32),
135
+ num_skill_levels=getattr(config, "num_skill_levels", 7),
136
+ init_curvature=getattr(config, "init_curvature", 1.0),
137
+ )
src/manifold/models/components/ihe.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Information Horizon Encoder - Causal transformer with linear attention."""
2
+
3
+ from __future__ import annotations
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from typing import Optional, Dict, Any, List
8
+
9
+ from manifold.models.layers.attention import MultiHeadLinearAttention, RotaryPositionEncoding
10
+
11
+
12
+ class IHEBlock(nn.Module):
13
+ """
14
+ Single IHE transformer block with linear attention + FFN.
15
+
16
+ Uses pre-norm architecture for training stability.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ embed_dim: int = 256,
22
+ num_heads: int = 8,
23
+ ff_dim: int = 1024,
24
+ dropout: float = 0.1,
25
+ ):
26
+ super().__init__()
27
+
28
+ self.norm1 = nn.LayerNorm(embed_dim)
29
+ self.norm2 = nn.LayerNorm(embed_dim)
30
+
31
+ self.attention = MultiHeadLinearAttention(
32
+ embed_dim=embed_dim,
33
+ num_heads=num_heads,
34
+ dropout=dropout,
35
+ causal=True,
36
+ use_rotary=True,
37
+ )
38
+
39
+ self.ffn = nn.Sequential(
40
+ nn.Linear(embed_dim, ff_dim),
41
+ nn.GELU(),
42
+ nn.Dropout(dropout),
43
+ nn.Linear(ff_dim, embed_dim),
44
+ nn.Dropout(dropout),
45
+ )
46
+
47
+ def forward(
48
+ self,
49
+ x: torch.Tensor,
50
+ mask: Optional[torch.Tensor] = None,
51
+ ) -> Dict[str, torch.Tensor]:
52
+ """
53
+ Forward pass through transformer block.
54
+
55
+ Args:
56
+ x: Input tensor [batch, seq, embed_dim]
57
+ mask: Optional attention mask [batch, seq]
58
+
59
+ Returns:
60
+ Dict with 'output' and 'attention_weights'
61
+ """
62
+ normed = self.norm1(x)
63
+ attn_out = self.attention(normed, mask=mask)
64
+ x = x + attn_out["output"]
65
+
66
+ normed = self.norm2(x)
67
+ x = x + self.ffn(normed)
68
+
69
+ return {
70
+ "output": x,
71
+ "attention_weights": None,
72
+ }
73
+
74
+
75
+ class InformationHorizonEncoder(nn.Module):
76
+ """
77
+ Multi-layer causal transformer for encoding player action sequences.
78
+
79
+ Uses linear attention O(T) and rotary position encoding.
80
+ Causal masking ensures actions can't see future information.
81
+ """
82
+
83
+ def __init__(
84
+ self,
85
+ embed_dim: int = 256,
86
+ num_layers: int = 4,
87
+ num_heads: int = 8,
88
+ ff_dim: int = 1024,
89
+ dropout: float = 0.1,
90
+ max_seq_len: int = 128,
91
+ ):
92
+ super().__init__()
93
+
94
+ self.embed_dim = embed_dim
95
+ self.num_layers = num_layers
96
+ self.num_heads = num_heads
97
+ self.max_seq_len = max_seq_len
98
+
99
+ head_dim = embed_dim // num_heads
100
+ self.pos_encoding = RotaryPositionEncoding(
101
+ dim=head_dim,
102
+ max_seq_len=max_seq_len,
103
+ )
104
+
105
+ self.layers = nn.ModuleList([
106
+ IHEBlock(
107
+ embed_dim=embed_dim,
108
+ num_heads=num_heads,
109
+ ff_dim=ff_dim,
110
+ dropout=dropout,
111
+ )
112
+ for _ in range(num_layers)
113
+ ])
114
+
115
+ self.final_norm = nn.LayerNorm(embed_dim)
116
+
117
+ def forward(
118
+ self,
119
+ x: torch.Tensor,
120
+ mask: Optional[torch.Tensor] = None,
121
+ ) -> Dict[str, torch.Tensor]:
122
+ """
123
+ Encode action sequence through causal transformer layers.
124
+
125
+ Args:
126
+ x: Input tensor [batch, seq, embed_dim]
127
+ mask: Optional attention mask [batch, seq]
128
+
129
+ Returns:
130
+ Dict with 'encoding' and 'all_layer_outputs'
131
+ """
132
+ all_layer_outputs: List[torch.Tensor] = []
133
+
134
+ for layer in self.layers:
135
+ layer_out = layer(x, mask=mask)
136
+ x = layer_out["output"]
137
+ all_layer_outputs.append(x)
138
+
139
+ encoding = self.final_norm(x)
140
+
141
+ return {
142
+ "encoding": encoding,
143
+ "all_layer_outputs": all_layer_outputs,
144
+ }
145
+
146
+ @classmethod
147
+ def from_config(cls, config: Any) -> "InformationHorizonEncoder":
148
+ """
149
+ Create InformationHorizonEncoder from ModelConfig.
150
+
151
+ Args:
152
+ config: ModelConfig instance with IHE parameters
153
+
154
+ Returns:
155
+ Configured InformationHorizonEncoder instance
156
+ """
157
+ return cls(
158
+ embed_dim=config.embed_dim,
159
+ num_layers=config.ihe_layers,
160
+ num_heads=config.ihe_heads,
161
+ ff_dim=config.ihe_ff_dim,
162
+ dropout=config.ihe_dropout,
163
+ max_seq_len=config.sequence_length,
164
+ )
src/manifold/models/components/mdm.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Motor Dynamics Module for MANIFOLD.
2
+
3
+ Neural ODE with fixed Euler solver for modeling motor dynamics.
4
+ Integrates physics constraints to enforce human biomechanical limits.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from typing import Optional, Dict, Any, TYPE_CHECKING
13
+
14
+ from manifold.models.layers.physics import PhysicsConstraintLayer
15
+
16
+ if TYPE_CHECKING:
17
+ from manifold.config import ModelConfig
18
+
19
+
20
+ class NeuralODEFunc(nn.Module):
21
+ """Neural network defining the ODE dynamics dz/dt = f(z, t).
22
+
23
+ Implements a 3-layer MLP with GELU activations to learn
24
+ continuous-time dynamics in the latent space.
25
+ """
26
+
27
+ def __init__(self, hidden_dim: int = 512, input_dim: int = 256):
28
+ super().__init__()
29
+
30
+ self.net = nn.Sequential(
31
+ nn.Linear(input_dim, hidden_dim),
32
+ nn.GELU(),
33
+ nn.Linear(hidden_dim, hidden_dim),
34
+ nn.GELU(),
35
+ nn.Linear(hidden_dim, input_dim),
36
+ )
37
+
38
+ def forward(self, t: torch.Tensor, z: torch.Tensor) -> torch.Tensor:
39
+ """Compute dz/dt.
40
+
41
+ Args:
42
+ t: Current time (scalar tensor, unused but required for ODE interface)
43
+ z: Current state [batch, seq, input_dim] or [batch, input_dim]
44
+
45
+ Returns:
46
+ dz/dt with same shape as z
47
+ """
48
+ return self.net(z)
49
+
50
+
51
+ def fixed_euler_solve(
52
+ func: NeuralODEFunc,
53
+ z0: torch.Tensor,
54
+ t_span: tuple = (0.0, 1.0),
55
+ num_steps: int = 4,
56
+ ) -> torch.Tensor:
57
+ """Fixed-step Euler ODE solver.
58
+
59
+ Much more memory efficient than adaptive solvers like dopri5.
60
+ Uses simple forward Euler: z_{n+1} = z_n + dt * f(t_n, z_n)
61
+
62
+ Args:
63
+ func: Neural ODE function computing dz/dt
64
+ z0: Initial state [batch, ...]
65
+ t_span: Integration interval (t0, t1)
66
+ num_steps: Number of Euler steps (default 4 for memory efficiency)
67
+
68
+ Returns:
69
+ Final state z(t1) with same shape as z0
70
+ """
71
+ dt = (t_span[1] - t_span[0]) / num_steps
72
+ z = z0
73
+ t = t_span[0]
74
+
75
+ for _ in range(num_steps):
76
+ t_tensor = torch.tensor(t, device=z.device, dtype=z.dtype)
77
+ dz = func(t_tensor, z)
78
+ z = z + dt * dz
79
+ t += dt
80
+
81
+ return z
82
+
83
+
84
+ class MotorDynamicsModule(nn.Module):
85
+ """Neural ODE module for motor dynamics with physics constraints.
86
+
87
+ Uses fixed 4-step Euler solver for memory efficiency during training.
88
+ Integrates physics constraints (jerk, turn rate, acceleration limits)
89
+ to ensure learned dynamics respect human biomechanical limits.
90
+
91
+ Args:
92
+ input_dim: Dimension of input features (default 256)
93
+ hidden_dim: Hidden dimension for ODE function (default 512)
94
+ num_steps: Number of Euler integration steps (default 4)
95
+ use_physics_constraints: Whether to apply physics constraints (default True)
96
+ """
97
+
98
+ def __init__(
99
+ self,
100
+ input_dim: int = 256,
101
+ hidden_dim: int = 512,
102
+ num_steps: int = 4,
103
+ use_physics_constraints: bool = True,
104
+ ):
105
+ super().__init__()
106
+
107
+ self.input_dim = input_dim
108
+ self.hidden_dim = hidden_dim
109
+ self.num_steps = num_steps
110
+ self.use_physics_constraints = use_physics_constraints
111
+
112
+ self.ode_func = NeuralODEFunc(hidden_dim=hidden_dim, input_dim=input_dim)
113
+
114
+ if use_physics_constraints:
115
+ self.physics = PhysicsConstraintLayer(learnable=True)
116
+ else:
117
+ self.physics = None
118
+
119
+ self.output_proj = nn.Linear(input_dim, input_dim)
120
+
121
+ def forward(
122
+ self,
123
+ x: torch.Tensor,
124
+ trajectory: Optional[torch.Tensor] = None,
125
+ ) -> Dict[str, torch.Tensor]:
126
+ """Forward pass through motor dynamics module.
127
+
128
+ Args:
129
+ x: Input tensor [batch, seq, input_dim]
130
+ trajectory: Optional trajectory for physics violation computation
131
+ [batch, seq, 2] mouse deltas (dx, dy)
132
+
133
+ Returns:
134
+ Dict containing:
135
+ - "output": Transformed features [batch, seq, input_dim]
136
+ - "physics_violations": Physics constraint violations (if applicable)
137
+ """
138
+ z = fixed_euler_solve(
139
+ func=self.ode_func,
140
+ z0=x,
141
+ t_span=(0.0, 1.0),
142
+ num_steps=self.num_steps,
143
+ )
144
+
145
+ output = self.output_proj(z)
146
+ result = {"output": output}
147
+
148
+ if self.use_physics_constraints and self.physics is not None:
149
+ if trajectory is not None:
150
+ physics_violations = self.physics(trajectory)
151
+ else:
152
+ dummy_trajectory = output[..., :2].detach()
153
+ physics_violations = self.physics(dummy_trajectory)
154
+
155
+ result["physics_violations"] = physics_violations
156
+ else:
157
+ result["physics_violations"] = {"total_violation": torch.tensor(0.0, device=x.device)}
158
+
159
+ return result
160
+
161
+ @classmethod
162
+ def from_config(cls, config: "ModelConfig") -> "MotorDynamicsModule":
163
+ """Create MotorDynamicsModule from ModelConfig.
164
+
165
+ Args:
166
+ config: Model configuration object
167
+
168
+ Returns:
169
+ Configured MotorDynamicsModule instance
170
+ """
171
+ return cls(
172
+ input_dim=config.embed_dim,
173
+ hidden_dim=config.mdm_hidden,
174
+ num_steps=config.mdm_steps,
175
+ use_physics_constraints=True,
176
+ )