Spaces:
Sleeping
Sleeping
Factor Studios
commited on
Upload test_ai_integration.py
Browse files- test_ai_integration.py +30 -30
test_ai_integration.py
CHANGED
|
@@ -230,39 +230,39 @@ def test_ai_integration():
|
|
| 230 |
if shared_vram is None:
|
| 231 |
shared_vram = VirtualVRAM()
|
| 232 |
shared_vram.storage = shared_storage
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
chip
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
continue
|
| 259 |
-
|
| 260 |
-
# Track total processing units
|
| 261 |
-
total_sms += chip.num_sms
|
| 262 |
-
total_cores += chip.num_sms * chip.cores_per_sm
|
| 263 |
|
| 264 |
# Store chip configuration in WebSocket storage
|
| 265 |
-
|
| 266 |
"num_sms": chip.num_sms,
|
| 267 |
"cores_per_sm": chip.cores_per_sm,
|
| 268 |
"total_cores": chip.num_sms * chip.cores_per_sm,
|
|
|
|
| 230 |
if shared_vram is None:
|
| 231 |
shared_vram = VirtualVRAM()
|
| 232 |
shared_vram.storage = shared_storage
|
| 233 |
+
|
| 234 |
+
for i in range(num_chips):
|
| 235 |
+
# Configure each chip with shared WebSocket storage
|
| 236 |
+
chip = Chip(chip_id=i, vram_size_gb=None, storage=shared_storage)
|
| 237 |
+
chips.append(chip)
|
| 238 |
|
| 239 |
+
# Connect chips in a ring topology
|
| 240 |
+
if i > 0:
|
| 241 |
+
chip.connect_chip(chips[i-1], optical_link)
|
| 242 |
+
|
| 243 |
+
# Initialize AI accelerator with shared resources
|
| 244 |
+
ai_accelerator = chip.ai_accelerator
|
| 245 |
+
ai_accelerator.vram = shared_vram
|
| 246 |
+
ai_accelerator.storage = shared_storage # Ensure storage is set
|
| 247 |
+
ai_accelerators.append(ai_accelerator)
|
| 248 |
+
|
| 249 |
+
# Verify WebSocket connection before loading model
|
| 250 |
+
if not shared_storage.wait_for_connection():
|
| 251 |
+
raise RuntimeError(f"Lost WebSocket connection during chip {i} initialization")
|
| 252 |
+
|
| 253 |
+
# Load model weights from WebSocket storage (no CPU transfer)
|
| 254 |
+
try:
|
| 255 |
+
ai_accelerator.load_model(model_id, None, None) # Model already in WebSocket storage
|
| 256 |
+
except Exception as e:
|
| 257 |
+
print(f"Warning: Failed to load model on chip {i}: {e}")
|
| 258 |
+
continue
|
| 259 |
+
|
| 260 |
+
# Track total processing units
|
| 261 |
+
total_sms += chip.num_sms
|
| 262 |
+
total_cores += chip.num_sms * chip.cores_per_sm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
|
| 264 |
# Store chip configuration in WebSocket storage
|
| 265 |
+
shared_storage.store_state(f"chips/{i}/config", "state", {
|
| 266 |
"num_sms": chip.num_sms,
|
| 267 |
"cores_per_sm": chip.cores_per_sm,
|
| 268 |
"total_cores": chip.num_sms * chip.cores_per_sm,
|