LimmeDev commited on
Commit
6f1e643
Β·
verified Β·
1 Parent(s): 454ecdd

Update for ZeroGPU with @spaces.GPU decorator

Browse files
Files changed (3) hide show
  1. README.md +2 -2
  2. app.py +149 -190
  3. requirements.txt +1 -1
README.md CHANGED
@@ -7,12 +7,12 @@ sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
- hardware: h200
11
  ---
12
 
13
  # MANIFOLD - CS2 Cheat Detection Training
14
 
15
- Train the MANIFOLD (Motor-Aware Neural Inference for Faithfulness Of Latent Dynamics) model for CS2 cheat detection.
16
 
17
  ## Features
18
  - Synthetic data generation with realistic player behavior
 
7
  sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
+ hardware: zero-a10g
11
  ---
12
 
13
  # MANIFOLD - CS2 Cheat Detection Training
14
 
15
+ Train the MANIFOLD model for CS2 cheat detection using ZeroGPU (H200).
16
 
17
  ## Features
18
  - Synthetic data generation with realistic player behavior
app.py CHANGED
@@ -1,5 +1,5 @@
1
  #!/usr/bin/env python3
2
- """MANIFOLD Training Interface for Hugging Face Spaces."""
3
 
4
  import gradio as gr
5
  import torch
@@ -7,10 +7,8 @@ import numpy as np
7
  import json
8
  import time
9
  from pathlib import Path
10
- from threading import Thread
11
- import queue
12
 
13
- # Add src to path for manifold imports
14
  import sys
15
  sys.path.insert(0, str(Path(__file__).parent / "src"))
16
 
@@ -18,82 +16,47 @@ from manifold import MANIFOLDLite
18
  from manifold.config import ModelConfig, TrainingConfig
19
  from manifold.data.generator import SyntheticDataGenerator
20
  from manifold.data.dataset import MANIFOLDDataset, create_dataloader
21
- from manifold.training.trainer import MANIFOLDTrainer
22
- from manifold.training.callbacks import Callback, ProgressCallback
23
  from manifold.training.curriculum import CurriculumScheduler
 
24
 
25
- # Global state
26
- training_log = queue.Queue()
27
- is_training = False
28
  current_model = None
29
 
30
 
31
- class GradioCallback(Callback):
32
- """Callback that logs to Gradio interface."""
33
-
34
- def on_epoch_end(self, trainer, epoch_info):
35
- epoch = epoch_info["epoch"]
36
- stage = epoch_info.get("stage", {}).get("stage_name", "")
37
- train_loss = epoch_info.get("train", {}).get("loss", 0)
38
- val_loss = epoch_info.get("val", {}).get("loss", 0)
39
- val_acc = epoch_info.get("val", {}).get("accuracy", 0)
40
- lr = epoch_info.get("lr", 0)
41
-
42
- log_entry = {
43
- "epoch": epoch + 1,
44
- "stage": stage,
45
- "train_loss": f"{train_loss:.4f}",
46
- "val_loss": f"{val_loss:.4f}",
47
- "val_acc": f"{val_acc:.4f}",
48
- "lr": f"{lr:.2e}",
49
- }
50
- training_log.put(log_entry)
51
-
52
-
53
  def get_device_info():
54
- """Get GPU information."""
55
  if torch.cuda.is_available():
56
- gpu_name = torch.cuda.get_device_name(0)
57
- gpu_mem = torch.cuda.get_device_properties(0).total_memory / 1e9
58
- return f"GPU: {gpu_name} ({gpu_mem:.1f} GB)"
59
- return "CPU only (no GPU detected)"
60
 
61
 
62
  def generate_data(num_legit, num_cheaters, seed, progress=gr.Progress()):
63
- """Generate synthetic training data."""
64
  progress(0, desc="Initializing generator...")
65
-
66
- generator = SyntheticDataGenerator(seed=seed, engagements_per_session=200)
67
 
68
  all_features = []
69
  all_labels = []
70
  total = num_legit + num_cheaters
71
 
72
- # Generate legit players
73
  for i in progress.tqdm(range(num_legit), desc="Generating legit players"):
74
  session = generator.generate_player(is_cheater=False)
75
  all_features.append(session.to_tensor())
76
  all_labels.append(0)
77
 
78
- # Generate cheaters
79
  for i in progress.tqdm(range(num_cheaters), desc="Generating cheaters"):
80
  session = generator.generate_player(is_cheater=True)
81
  all_features.append(session.to_tensor())
82
  all_labels.append(2)
83
 
84
- # Convert and shuffle
85
  features = np.array(all_features)
86
  labels = np.array(all_labels)
87
 
88
- rng = np.random.default_rng(seed)
89
  indices = rng.permutation(total)
90
  features = features[indices]
91
  labels = labels[indices]
92
 
93
- # Split 90/10
94
  split_idx = int(total * 0.9)
95
 
96
- # Save to temp location
97
  data_dir = Path("/tmp/manifold_data")
98
  data_dir.mkdir(exist_ok=True)
99
 
@@ -102,195 +65,191 @@ def generate_data(num_legit, num_cheaters, seed, progress=gr.Progress()):
102
  np.save(data_dir / "val_features.npy", features[split_idx:])
103
  np.save(data_dir / "val_labels.npy", labels[split_idx:])
104
 
105
- return f"Generated {total} samples:\n- Train: {split_idx}\n- Val: {total - split_idx}\n- Features shape: {features.shape}"
106
 
107
 
108
- def train_model(batch_size, learning_rate, max_epochs, progress=gr.Progress()):
109
- """Train the MANIFOLD model."""
110
- global is_training, current_model
111
-
112
- if is_training:
113
- return "Training already in progress!"
114
 
115
- is_training = True
116
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
117
 
118
- try:
119
- # Load data
120
- data_dir = Path("/tmp/manifold_data")
121
- if not (data_dir / "train_features.npy").exists():
122
- is_training = False
123
- return "No data found! Generate data first."
124
-
125
- progress(0.1, desc="Loading data...")
126
- train_features = np.load(data_dir / "train_features.npy")
127
- train_labels = np.load(data_dir / "train_labels.npy")
128
- val_features = np.load(data_dir / "val_features.npy")
129
- val_labels = np.load(data_dir / "val_labels.npy")
130
-
131
- train_dataset = MANIFOLDDataset(data=train_features, labels=train_labels)
132
- val_dataset = MANIFOLDDataset(data=val_features, labels=val_labels)
133
-
134
- train_loader = create_dataloader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
135
- val_loader = create_dataloader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
136
-
137
- progress(0.2, desc="Creating model...")
138
- model_config = ModelConfig()
139
- model = MANIFOLDLite.from_config(model_config)
 
 
 
 
 
 
 
 
 
 
 
140
 
141
- train_config = TrainingConfig(
142
- batch_size=batch_size,
143
- learning_rate=learning_rate,
144
- max_epochs=max_epochs,
145
- )
146
 
147
- callbacks = [GradioCallback()]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- trainer = MANIFOLDTrainer(
150
- model=model,
151
- config=train_config,
152
- train_dataloader=train_loader,
153
- val_dataloader=val_loader,
154
- callbacks=callbacks,
155
- )
156
 
157
- progress(0.3, desc="Training...")
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
- # Run training
160
- history = trainer.train()
161
 
162
- # Save model
163
- save_path = Path("/tmp/manifold_model.pt")
164
- trainer.save_checkpoint(save_path)
165
- current_model = model
166
 
167
- is_training = False
168
- return f"Training complete!\nFinal val accuracy: {history['val'][-1].get('accuracy', 'N/A')}\nModel saved to {save_path}"
169
 
170
- except Exception as e:
171
- is_training = False
172
- return f"Training failed: {str(e)}"
173
-
174
-
175
- def get_training_logs():
176
- """Get accumulated training logs."""
177
- logs = []
178
- while not training_log.empty():
179
- try:
180
- logs.append(training_log.get_nowait())
181
- except:
182
- break
183
-
184
- if not logs:
185
- return "No logs yet. Start training to see progress."
186
-
187
- # Format as table
188
- header = "| Epoch | Stage | Train Loss | Val Loss | Val Acc | LR |\n|-------|-------|------------|----------|---------|----|\n"
189
- rows = "\n".join([
190
- f"| {l['epoch']} | {l['stage'][:20]} | {l['train_loss']} | {l['val_loss']} | {l['val_acc']} | {l['lr']} |"
191
- for l in logs
192
- ])
193
- return header + rows
194
 
195
 
 
196
  def test_inference(num_samples):
197
- """Test model inference."""
198
  global current_model
199
 
 
 
200
  if current_model is None:
201
- # Try to load saved model
202
  model_path = Path("/tmp/manifold_model.pt")
203
  if model_path.exists():
204
  current_model = MANIFOLDLite.from_config(ModelConfig())
205
  ckpt = torch.load(model_path, map_location="cpu")
206
  current_model.load_state_dict(ckpt["model_state_dict"])
207
  else:
208
- return "No model available! Train a model first."
209
 
210
- device = "cuda" if torch.cuda.is_available() else "cpu"
211
- current_model.to(device)
212
- current_model.eval()
213
-
214
- # Generate test samples
215
- generator = SyntheticDataGenerator(seed=12345)
216
 
 
217
  results = []
218
- for i in range(num_samples):
219
- is_cheater = i % 2 == 1 # Alternate
 
220
  session = generator.generate_player(is_cheater=is_cheater)
221
  features = torch.tensor(session.to_tensor(), dtype=torch.float32).unsqueeze(0).to(device)
222
 
223
  with torch.no_grad():
224
- outputs = current_model(features)
225
 
226
- pred_class = outputs["predicted_class"].item()
227
- uncertainty = outputs["uncertainty"].item()
228
- probs = outputs["verdict_probs"][0].cpu().numpy()
229
 
230
- class_names = ["Clean", "Suspicious", "Cheating"]
231
- results.append({
232
- "Sample": i + 1,
233
- "Actual": "Cheater" if is_cheater else "Legit",
234
- "Predicted": class_names[pred_class],
235
- "Confidence": f"{probs.max():.2%}",
236
- "Uncertainty": f"{uncertainty:.4f}",
237
- "Correct": "βœ“" if (pred_class > 0) == is_cheater else "βœ—",
238
- })
239
-
240
- # Format as markdown table
241
- header = "| # | Actual | Predicted | Confidence | Uncertainty | Correct |\n|---|--------|-----------|------------|-------------|---------|"
242
- rows = "\n".join([
243
- f"| {r['Sample']} | {r['Actual']} | {r['Predicted']} | {r['Confidence']} | {r['Uncertainty']} | {r['Correct']} |"
244
- for r in results
245
- ])
246
-
247
- correct = sum(1 for r in results if r["Correct"] == "βœ“")
248
- summary = f"\n\n**Accuracy: {correct}/{num_samples} ({100*correct/num_samples:.1f}%)**"
249
-
250
- return header + "\n" + rows + summary
251
 
252
 
253
- # Build Gradio interface
254
- with gr.Blocks(title="MANIFOLD Training") as demo:
255
- gr.Markdown("# 🎯 MANIFOLD - CS2 Cheat Detection Training")
256
- gr.Markdown(f"**Device:** {get_device_info()}")
257
 
258
  with gr.Tabs():
259
- with gr.TabItem("1. Generate Data"):
260
- gr.Markdown("Generate synthetic CS2 player behavior data for training.")
261
  with gr.Row():
262
- num_legit = gr.Slider(100, 10000, value=1000, step=100, label="Legit Players")
263
- num_cheaters = gr.Slider(100, 5000, value=500, step=100, label="Cheaters")
264
- seed = gr.Number(value=42, label="Random Seed")
265
- gen_btn = gr.Button("Generate Data", variant="primary")
266
- gen_output = gr.Textbox(label="Generation Status", lines=5)
267
  gen_btn.click(generate_data, [num_legit, num_cheaters, seed], gen_output)
268
 
269
- with gr.TabItem("2. Train Model"):
270
- gr.Markdown("Train the MANIFOLD model with curriculum learning.")
271
  with gr.Row():
272
- batch_size = gr.Slider(8, 128, value=32, step=8, label="Batch Size")
273
- learning_rate = gr.Number(value=3e-4, label="Learning Rate")
274
- max_epochs = gr.Slider(5, 100, value=20, step=5, label="Max Epochs")
275
- train_btn = gr.Button("Start Training", variant="primary")
276
- train_output = gr.Textbox(label="Training Status", lines=5)
277
- train_btn.click(train_model, [batch_size, learning_rate, max_epochs], train_output)
278
-
279
- gr.Markdown("### Training Logs")
280
- logs_output = gr.Markdown("No logs yet.")
281
- refresh_btn = gr.Button("Refresh Logs")
282
- refresh_btn.click(get_training_logs, [], logs_output)
283
-
284
- with gr.TabItem("3. Test Model"):
285
- gr.Markdown("Test the trained model on synthetic samples.")
286
- num_test = gr.Slider(5, 50, value=10, step=5, label="Number of Test Samples")
287
- test_btn = gr.Button("Run Inference", variant="primary")
288
- test_output = gr.Markdown("Click 'Run Inference' to test the model.")
289
  test_btn.click(test_inference, [num_test], test_output)
290
 
291
- gr.Markdown("---")
292
- gr.Markdown("**MANIFOLD** - Motor-Aware Neural Inference for Faithfulness Of Latent Dynamics")
293
-
294
 
295
  if __name__ == "__main__":
296
  demo.launch()
 
1
  #!/usr/bin/env python3
2
+ """MANIFOLD Training Interface for Hugging Face Spaces with ZeroGPU."""
3
 
4
  import gradio as gr
5
  import torch
 
7
  import json
8
  import time
9
  from pathlib import Path
10
+ import spaces
 
11
 
 
12
  import sys
13
  sys.path.insert(0, str(Path(__file__).parent / "src"))
14
 
 
16
  from manifold.config import ModelConfig, TrainingConfig
17
  from manifold.data.generator import SyntheticDataGenerator
18
  from manifold.data.dataset import MANIFOLDDataset, create_dataloader
19
+ from manifold.training.trainer import train_epoch, validate
 
20
  from manifold.training.curriculum import CurriculumScheduler
21
+ from manifold.training.losses import compute_total_loss
22
 
 
 
 
23
  current_model = None
24
 
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def get_device_info():
 
27
  if torch.cuda.is_available():
28
+ return f"GPU: {torch.cuda.get_device_name(0)} ({torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB)"
29
+ return "CPU (GPU will be allocated when training starts)"
 
 
30
 
31
 
32
  def generate_data(num_legit, num_cheaters, seed, progress=gr.Progress()):
 
33
  progress(0, desc="Initializing generator...")
34
+ generator = SyntheticDataGenerator(seed=int(seed), engagements_per_session=200)
 
35
 
36
  all_features = []
37
  all_labels = []
38
  total = num_legit + num_cheaters
39
 
 
40
  for i in progress.tqdm(range(num_legit), desc="Generating legit players"):
41
  session = generator.generate_player(is_cheater=False)
42
  all_features.append(session.to_tensor())
43
  all_labels.append(0)
44
 
 
45
  for i in progress.tqdm(range(num_cheaters), desc="Generating cheaters"):
46
  session = generator.generate_player(is_cheater=True)
47
  all_features.append(session.to_tensor())
48
  all_labels.append(2)
49
 
 
50
  features = np.array(all_features)
51
  labels = np.array(all_labels)
52
 
53
+ rng = np.random.default_rng(int(seed))
54
  indices = rng.permutation(total)
55
  features = features[indices]
56
  labels = labels[indices]
57
 
 
58
  split_idx = int(total * 0.9)
59
 
 
60
  data_dir = Path("/tmp/manifold_data")
61
  data_dir.mkdir(exist_ok=True)
62
 
 
65
  np.save(data_dir / "val_features.npy", features[split_idx:])
66
  np.save(data_dir / "val_labels.npy", labels[split_idx:])
67
 
68
+ return f"βœ… Generated {total} samples:\n- Train: {split_idx}\n- Val: {total - split_idx}\n- Shape: {features.shape}"
69
 
70
 
71
+ @spaces.GPU(duration=300)
72
+ def train_model(batch_size, learning_rate, num_epochs):
73
+ global current_model
 
 
 
74
 
 
75
  device = "cuda" if torch.cuda.is_available() else "cpu"
76
+ gpu_info = f"Using: {torch.cuda.get_device_name(0)}" if torch.cuda.is_available() else "CPU only"
77
 
78
+ data_dir = Path("/tmp/manifold_data")
79
+ if not (data_dir / "train_features.npy").exists():
80
+ return "❌ No data found! Generate data first.", ""
81
+
82
+ train_features = np.load(data_dir / "train_features.npy")
83
+ train_labels = np.load(data_dir / "train_labels.npy")
84
+ val_features = np.load(data_dir / "val_features.npy")
85
+ val_labels = np.load(data_dir / "val_labels.npy")
86
+
87
+ train_dataset = MANIFOLDDataset(data=train_features, labels=train_labels)
88
+ val_dataset = MANIFOLDDataset(data=val_features, labels=val_labels)
89
+
90
+ train_loader = create_dataloader(train_dataset, batch_size=int(batch_size), shuffle=True, num_workers=0)
91
+ val_loader = create_dataloader(val_dataset, batch_size=int(batch_size), shuffle=False, num_workers=0)
92
+
93
+ model = MANIFOLDLite.from_config(ModelConfig())
94
+ model = model.to(device)
95
+
96
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.01)
97
+ scaler = torch.amp.GradScaler(enabled=True)
98
+
99
+ scheduler = CurriculumScheduler()
100
+ logs = []
101
+
102
+ logs.append(f"πŸš€ {gpu_info}")
103
+ logs.append(f"πŸ“Š Train: {len(train_dataset)}, Val: {len(val_dataset)}")
104
+ logs.append(f"πŸ”§ Params: {model.get_num_params():,}")
105
+ logs.append("-" * 40)
106
+
107
+ global_step = 0
108
+
109
+ for epoch in range(int(num_epochs)):
110
+ stage_config = scheduler.get_stage_config()
111
 
112
+ for pg in optimizer.param_groups:
113
+ pg["lr"] = stage_config["learning_rate"]
 
 
 
114
 
115
+ model.train()
116
+ train_loss = 0
117
+ for batch in train_loader:
118
+ batch = {k: v.to(device) for k, v in batch.items()}
119
+
120
+ with torch.amp.autocast(device_type='cuda', dtype=torch.float16):
121
+ outputs = model(batch["features"], mask=batch.get("mask"), active_components=stage_config.get("components"))
122
+ loss, _ = compute_total_loss(outputs, {"labels": batch["labels"]}, stage_config["losses"], global_step)
123
+
124
+ scaler.scale(loss).backward()
125
+ scaler.unscale_(optimizer)
126
+ torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
127
+ scaler.step(optimizer)
128
+ scaler.update()
129
+ optimizer.zero_grad(set_to_none=True)
130
+
131
+ train_loss += loss.item()
132
+ global_step += 1
133
 
134
+ train_loss /= len(train_loader)
 
 
 
 
 
 
135
 
136
+ model.eval()
137
+ val_loss = 0
138
+ correct = 0
139
+ total = 0
140
+ with torch.no_grad():
141
+ for batch in val_loader:
142
+ batch = {k: v.to(device) for k, v in batch.items()}
143
+ outputs = model(batch["features"], mask=batch.get("mask"), active_components=stage_config.get("components"))
144
+ loss, _ = compute_total_loss(outputs, {"labels": batch["labels"]}, stage_config["losses"])
145
+ val_loss += loss.item()
146
+ if "predicted_class" in outputs:
147
+ correct += (outputs["predicted_class"] == batch["labels"]).sum().item()
148
+ total += batch["labels"].size(0)
149
 
150
+ val_loss /= len(val_loader)
151
+ val_acc = correct / total if total > 0 else 0
152
 
153
+ step_info = scheduler.step_epoch()
154
+ stage_name = step_info["stage_name"].split(":")[0] if ":" in step_info["stage_name"] else step_info["stage_name"]
 
 
155
 
156
+ logs.append(f"Epoch {epoch+1:2d} | {stage_name:8s} | Loss: {train_loss:.4f} / {val_loss:.4f} | Acc: {val_acc:.4f}")
 
157
 
158
+ if step_info.get("stage_changed"):
159
+ logs.append(f" β†’ Advanced to {scheduler.current_stage.name}")
160
+
161
+ save_path = Path("/tmp/manifold_model.pt")
162
+ torch.save({"model_state_dict": model.state_dict(), "config": ModelConfig()}, save_path)
163
+ current_model = model.cpu()
164
+
165
+ logs.append("-" * 40)
166
+ logs.append(f"βœ… Training complete! Final val accuracy: {val_acc:.4f}")
167
+
168
+ return "βœ… Training complete!", "\n".join(logs)
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
 
171
+ @spaces.GPU(duration=60)
172
  def test_inference(num_samples):
 
173
  global current_model
174
 
175
+ device = "cuda" if torch.cuda.is_available() else "cpu"
176
+
177
  if current_model is None:
 
178
  model_path = Path("/tmp/manifold_model.pt")
179
  if model_path.exists():
180
  current_model = MANIFOLDLite.from_config(ModelConfig())
181
  ckpt = torch.load(model_path, map_location="cpu")
182
  current_model.load_state_dict(ckpt["model_state_dict"])
183
  else:
184
+ return "❌ No model! Train first."
185
 
186
+ model = current_model.to(device)
187
+ model.eval()
 
 
 
 
188
 
189
+ generator = SyntheticDataGenerator(seed=99999)
190
  results = []
191
+
192
+ for i in range(int(num_samples)):
193
+ is_cheater = i % 2 == 1
194
  session = generator.generate_player(is_cheater=is_cheater)
195
  features = torch.tensor(session.to_tensor(), dtype=torch.float32).unsqueeze(0).to(device)
196
 
197
  with torch.no_grad():
198
+ outputs = model(features)
199
 
200
+ pred = outputs["predicted_class"].item()
201
+ conf = outputs["verdict_probs"][0].max().item()
202
+ unc = outputs["uncertainty"].item()
203
 
204
+ classes = ["Clean", "Suspicious", "Cheating"]
205
+ actual = "Cheater" if is_cheater else "Legit"
206
+ correct = "βœ“" if (pred > 0) == is_cheater else "βœ—"
207
+
208
+ results.append(f"| {i+1} | {actual} | {classes[pred]} | {conf:.1%} | {unc:.3f} | {correct} |")
209
+
210
+ current_model = model.cpu()
211
+
212
+ header = "| # | Actual | Predicted | Conf | Uncert | βœ“/βœ— |\n|---|--------|-----------|------|--------|-----|"
213
+ correct_count = sum(1 for r in results if "βœ“" in r)
214
+ footer = f"\n\n**Accuracy: {correct_count}/{num_samples} ({100*correct_count/num_samples:.1f}%)**"
215
+
216
+ return header + "\n" + "\n".join(results) + footer
 
 
 
 
 
 
 
 
217
 
218
 
219
+ with gr.Blocks(title="MANIFOLD Training", theme=gr.themes.Soft()) as demo:
220
+ gr.Markdown("# 🎯 MANIFOLD - CS2 Cheat Detection")
221
+ gr.Markdown(f"**{get_device_info()}** | ZeroGPU will allocate H200 on demand")
 
222
 
223
  with gr.Tabs():
224
+ with gr.TabItem("1️⃣ Generate Data"):
225
+ gr.Markdown("Generate synthetic CS2 player data")
226
  with gr.Row():
227
+ num_legit = gr.Slider(100, 10000, value=2000, step=100, label="Legit Players")
228
+ num_cheaters = gr.Slider(100, 5000, value=1000, step=100, label="Cheaters")
229
+ seed = gr.Number(value=42, label="Seed")
230
+ gen_btn = gr.Button("🎲 Generate Data", variant="primary")
231
+ gen_output = gr.Textbox(label="Status", lines=3)
232
  gen_btn.click(generate_data, [num_legit, num_cheaters, seed], gen_output)
233
 
234
+ with gr.TabItem("2️⃣ Train Model"):
235
+ gr.Markdown("Train with 4-stage curriculum learning (ZeroGPU: 5 min limit)")
236
  with gr.Row():
237
+ batch_size = gr.Slider(16, 128, value=64, step=16, label="Batch Size")
238
+ lr = gr.Number(value=3e-4, label="Learning Rate")
239
+ epochs = gr.Slider(5, 50, value=15, step=5, label="Epochs")
240
+ train_btn = gr.Button("πŸš€ Start Training", variant="primary")
241
+ train_status = gr.Textbox(label="Status", lines=2)
242
+ train_logs = gr.Textbox(label="Training Logs", lines=15)
243
+ train_btn.click(train_model, [batch_size, lr, epochs], [train_status, train_logs])
244
+
245
+ with gr.TabItem("3️⃣ Test Model"):
246
+ gr.Markdown("Test on synthetic samples")
247
+ num_test = gr.Slider(5, 30, value=10, step=5, label="Test Samples")
248
+ test_btn = gr.Button("πŸ” Run Inference", variant="primary")
249
+ test_output = gr.Markdown()
 
 
 
 
250
  test_btn.click(test_inference, [num_test], test_output)
251
 
252
+ gr.Markdown("---\n*MANIFOLD: Motor-Aware Neural Inference for Faithfulness Of Latent Dynamics*")
 
 
253
 
254
  if __name__ == "__main__":
255
  demo.launch()
requirements.txt CHANGED
@@ -1,9 +1,9 @@
1
  torch>=2.1.0
2
  numpy>=1.24.0
3
  scipy>=1.11.0
4
- pandas>=2.0.0
5
  pydantic>=2.0.0
6
  tqdm>=4.66.0
7
  einops>=0.7.0
8
  scikit-learn>=1.3.0
9
  gradio>=4.0.0
 
 
1
  torch>=2.1.0
2
  numpy>=1.24.0
3
  scipy>=1.11.0
 
4
  pydantic>=2.0.0
5
  tqdm>=4.66.0
6
  einops>=0.7.0
7
  scikit-learn>=1.3.0
8
  gradio>=4.0.0
9
+ spaces