Henry Barnes commited on
Commit ·
7cec0d4
1
Parent(s): 655116f
feat: rebuild as N1/N2 processor configurator
Browse files- N1 vs N2 specs comparison table
- Hardware constraint validation (cores, neurons, synapses)
- Local LIF simulator (no API key needed)
- Spike raster visualisation
- Cloud API tab for full-scale compute
- README.md +20 -6
- app.py +458 -175
- monitor_space.py +42 -0
- requirements.txt +1 -2
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
title: Catalyst
|
| 3 |
emoji: '🧠'
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
|
@@ -9,13 +9,27 @@ python_version: "3.12"
|
|
| 9 |
app_file: app.py
|
| 10 |
pinned: false
|
| 11 |
license: mit
|
| 12 |
-
short_description:
|
| 13 |
---
|
| 14 |
|
| 15 |
-
# Catalyst
|
| 16 |
|
| 17 |
-
|
| 18 |
|
| 19 |
-
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Catalyst Neuromorphic — Processor Configurator
|
| 3 |
emoji: '🧠'
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
|
|
|
| 9 |
app_file: app.py
|
| 10 |
pinned: false
|
| 11 |
license: mit
|
| 12 |
+
short_description: N1/N2 neuromorphic processor configurator
|
| 13 |
---
|
| 14 |
|
| 15 |
+
# Catalyst Neuromorphic — Processor Configurator
|
| 16 |
|
| 17 |
+
Explore the **N1** and **N2** spiking neuromorphic processors. Compare specs, configure networks with hardware constraint validation, and run local LIF simulations with spike raster visualisation.
|
| 18 |
|
| 19 |
+
**No API key required** — simulations run locally in the browser.
|
| 20 |
|
| 21 |
+
For full-scale hardware-accurate simulations, use the [Catalyst Cloud API](https://catalyst-neuromorphic.com/cloud).
|
| 22 |
+
|
| 23 |
+
## Features
|
| 24 |
+
|
| 25 |
+
- **Processor comparison** — N1 (Loihi 1 parity) vs N2 (Loihi 2 parity) side-by-side
|
| 26 |
+
- **Hardware constraint validation** — check if your network fits on the target processor
|
| 27 |
+
- **Live LIF simulation** — configure neurons, connections, and topology
|
| 28 |
+
- **Spike raster visualisation** — see firing patterns across populations
|
| 29 |
+
|
| 30 |
+
## Links
|
| 31 |
+
|
| 32 |
+
- [Website](https://catalyst-neuromorphic.com)
|
| 33 |
+
- [Cloud API](https://catalyst-neuromorphic.com/cloud)
|
| 34 |
+
- [Python SDK](https://pypi.org/project/catalyst-cloud/)
|
| 35 |
+
- [N1 Paper (Zenodo)](https://zenodo.org/records/18727094)
|
app.py
CHANGED
|
@@ -1,224 +1,507 @@
|
|
| 1 |
-
"""Catalyst
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
|
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
import gradio as gr
|
| 8 |
-
import requests
|
| 9 |
import numpy as np
|
| 10 |
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
def signup(email: str) -> str:
|
| 15 |
-
"""Sign up for a free API key."""
|
| 16 |
-
if not email or "@" not in email:
|
| 17 |
-
return "Please enter a valid email address."
|
| 18 |
-
try:
|
| 19 |
-
resp = requests.post(f"{API_URL}/v1/signup", json={"email": email, "tier": "free"}, timeout=15)
|
| 20 |
-
if resp.status_code == 200:
|
| 21 |
-
data = resp.json()
|
| 22 |
-
return f"Your API key: {data['api_key']}\n\nSave this — it's shown only once.\nFree tier: {data['limits']['max_neurons']} neurons, {data['limits']['max_timesteps']} timesteps, {data['limits']['max_jobs_per_day']} jobs/day."
|
| 23 |
-
else:
|
| 24 |
-
return f"Error: {resp.json().get('detail', resp.text)}"
|
| 25 |
-
except Exception as e:
|
| 26 |
-
return f"Connection error: {e}"
|
| 27 |
|
|
|
|
| 28 |
|
| 29 |
-
def
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
"""
|
| 33 |
-
if not api_key or not api_key.startswith("cn_live_"):
|
| 34 |
-
return "Enter a valid API key (starts with cn_live_)", None
|
| 35 |
|
| 36 |
-
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
connections = []
|
| 41 |
|
| 42 |
if hidden_size > 0:
|
| 43 |
-
populations.append({"label": "hidden", "size": hidden_size,
|
|
|
|
| 44 |
connections.append({
|
| 45 |
"source": "input", "target": "hidden",
|
| 46 |
-
"topology": topology, "weight": weight, "
|
| 47 |
})
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
})
|
| 54 |
-
elif output_size > 0:
|
| 55 |
-
populations.append({"label": "output", "size": output_size, "params": {"threshold": 1000}})
|
| 56 |
connections.append({
|
| 57 |
-
"source":
|
| 58 |
-
"topology": topology, "weight": weight, "
|
| 59 |
})
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
return f"Total neurons ({total}) exceeds free tier limit (1024). Reduce sizes.", None
|
| 64 |
-
|
| 65 |
-
try:
|
| 66 |
-
# Create network
|
| 67 |
-
resp = requests.post(f"{API_URL}/v1/networks", headers=headers,
|
| 68 |
-
json={"populations": populations, "connections": connections}, timeout=15)
|
| 69 |
-
if resp.status_code != 200:
|
| 70 |
-
return f"Network error: {resp.json().get('detail', resp.text)}", None
|
| 71 |
-
|
| 72 |
-
network_id = resp.json()["network_id"]
|
| 73 |
-
|
| 74 |
-
# Submit job
|
| 75 |
-
resp = requests.post(f"{API_URL}/v1/jobs", headers=headers, json={
|
| 76 |
-
"network_id": network_id,
|
| 77 |
-
"timesteps": timesteps,
|
| 78 |
-
"stimuli": [{"population": "input", "current": input_current}],
|
| 79 |
-
}, timeout=15)
|
| 80 |
-
if resp.status_code != 200:
|
| 81 |
-
return f"Job error: {resp.json().get('detail', resp.text)}", None
|
| 82 |
-
|
| 83 |
-
job_id = resp.json()["job_id"]
|
| 84 |
-
|
| 85 |
-
# Poll for completion
|
| 86 |
-
import time
|
| 87 |
-
for _ in range(60):
|
| 88 |
-
time.sleep(0.5)
|
| 89 |
-
resp = requests.get(f"{API_URL}/v1/jobs/{job_id}", headers=headers, timeout=15)
|
| 90 |
-
job = resp.json()
|
| 91 |
-
if job["status"] == "completed":
|
| 92 |
-
break
|
| 93 |
-
if job["status"] == "failed":
|
| 94 |
-
return f"Simulation failed: {job.get('error_message', 'Unknown error')}", None
|
| 95 |
-
else:
|
| 96 |
-
return "Timeout waiting for simulation to complete.", None
|
| 97 |
-
|
| 98 |
-
result = job["result"]
|
| 99 |
-
|
| 100 |
-
# Get spike trains
|
| 101 |
-
resp = requests.get(f"{API_URL}/v1/jobs/{job_id}/spikes", headers=headers, timeout=15)
|
| 102 |
-
spikes = resp.json()["spike_trains"]
|
| 103 |
-
|
| 104 |
-
# Build raster plot
|
| 105 |
-
import matplotlib
|
| 106 |
-
matplotlib.use("Agg")
|
| 107 |
-
import matplotlib.pyplot as plt
|
| 108 |
-
|
| 109 |
-
fig, ax = plt.subplots(figsize=(10, 5))
|
| 110 |
-
fig.patch.set_facecolor("#0a0a0a")
|
| 111 |
-
ax.set_facecolor("#0a0a0a")
|
| 112 |
-
|
| 113 |
-
colors = {"input": "#4A9EFF", "hidden": "#FF6B6B", "output": "#50C878"}
|
| 114 |
-
neuron_offset = 0
|
| 115 |
-
yticks = []
|
| 116 |
-
yticklabels = []
|
| 117 |
-
|
| 118 |
-
for pop_label in [p["label"] for p in populations]:
|
| 119 |
-
pop_size = next(p["size"] for p in populations if p["label"] == pop_label)
|
| 120 |
-
pop_spikes = spikes.get(pop_label, {})
|
| 121 |
-
color = colors.get(pop_label, "#FFFFFF")
|
| 122 |
-
|
| 123 |
-
for neuron_str, times in pop_spikes.items():
|
| 124 |
-
neuron_idx = int(neuron_str)
|
| 125 |
-
y = neuron_offset + neuron_idx
|
| 126 |
-
ax.scatter(times, [y] * len(times), s=1, c=color, marker="|", linewidths=0.5)
|
| 127 |
-
|
| 128 |
-
mid = neuron_offset + pop_size // 2
|
| 129 |
-
yticks.append(mid)
|
| 130 |
-
yticklabels.append(f"{pop_label}\n({pop_size})")
|
| 131 |
-
neuron_offset += pop_size
|
| 132 |
-
|
| 133 |
-
ax.set_xlabel("Timestep", color="white", fontsize=11)
|
| 134 |
-
ax.set_ylabel("Neuron", color="white", fontsize=11)
|
| 135 |
-
ax.set_title("Spike Raster Plot", color="white", fontsize=13, fontweight="bold")
|
| 136 |
-
ax.set_yticks(yticks)
|
| 137 |
-
ax.set_yticklabels(yticklabels, fontsize=9)
|
| 138 |
-
ax.tick_params(colors="white")
|
| 139 |
-
ax.spines["bottom"].set_color("#333")
|
| 140 |
-
ax.spines["left"].set_color("#333")
|
| 141 |
-
ax.spines["top"].set_visible(False)
|
| 142 |
-
ax.spines["right"].set_visible(False)
|
| 143 |
-
ax.set_xlim(-1, timesteps + 1)
|
| 144 |
-
ax.set_ylim(-1, neuron_offset)
|
| 145 |
-
|
| 146 |
-
plt.tight_layout()
|
| 147 |
-
|
| 148 |
-
summary = (
|
| 149 |
-
f"Total spikes: {result['total_spikes']}\n"
|
| 150 |
-
f"Timesteps: {result['timesteps']}\n"
|
| 151 |
-
f"Compute time: {job['compute_seconds']:.4f}s\n\n"
|
| 152 |
-
f"Firing rates:\n" +
|
| 153 |
-
"\n".join(f" {k}: {v:.4f}" for k, v in result["firing_rates"].items())
|
| 154 |
-
)
|
| 155 |
-
|
| 156 |
-
return summary, fig
|
| 157 |
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
with gr.Blocks(
|
| 165 |
-
title="Catalyst
|
| 166 |
theme=gr.themes.Base(
|
| 167 |
primary_hue="blue",
|
| 168 |
neutral_hue="slate",
|
|
|
|
| 169 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
) as demo:
|
| 171 |
gr.Markdown("""
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
|
|
|
| 175 |
""")
|
| 176 |
|
| 177 |
-
with gr.Tab("
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
signup_output = gr.Textbox(label="Result", lines=4)
|
| 181 |
-
signup_btn.click(signup, inputs=[email_input], outputs=[signup_output])
|
| 182 |
|
| 183 |
-
|
| 184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
output_size = gr.Slider(0, 200, value=20, step=1, label="Output neurons (0 = skip)")
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
with gr.Row():
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
with gr.Row():
|
| 200 |
-
timesteps = gr.Slider(10,
|
| 201 |
-
input_current = gr.Slider(
|
|
|
|
| 202 |
|
| 203 |
-
run_btn = gr.Button("
|
| 204 |
|
| 205 |
with gr.Row():
|
| 206 |
-
|
| 207 |
-
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
run_btn.click(
|
| 210 |
-
|
| 211 |
-
inputs=[
|
| 212 |
-
|
| 213 |
-
|
|
|
|
| 214 |
)
|
| 215 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
gr.Markdown("""
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
""")
|
| 223 |
|
| 224 |
demo.launch()
|
|
|
|
| 1 |
+
"""Catalyst Neuromorphic — Interactive Processor Configurator & Simulator.
|
| 2 |
|
| 3 |
+
Explore the N1 and N2 neuromorphic processors, configure spiking neural
|
| 4 |
+
networks with hardware-accurate constraints, and run local simulations
|
| 5 |
+
with live spike raster visualisation.
|
| 6 |
"""
|
| 7 |
|
| 8 |
import gradio as gr
|
|
|
|
| 9 |
import numpy as np
|
| 10 |
|
| 11 |
+
# ── Processor specs ──────────────────────────────────────────────────────────
|
| 12 |
+
|
| 13 |
+
PROCESSORS = {
|
| 14 |
+
"N1": {
|
| 15 |
+
"cores": 128,
|
| 16 |
+
"neurons_per_core": 1024,
|
| 17 |
+
"synapses_per_core": 131072,
|
| 18 |
+
"total_neurons": 131072,
|
| 19 |
+
"dendrites": 4,
|
| 20 |
+
"graded_spike_bits": 8,
|
| 21 |
+
"learning_opcodes": 14,
|
| 22 |
+
"max_axon_delay": 63,
|
| 23 |
+
"parity": "Intel Loihi 1",
|
| 24 |
+
"neuron_models": ["LIF"],
|
| 25 |
+
"features": [
|
| 26 |
+
"Dendritic compartments (4 per neuron)",
|
| 27 |
+
"Graded spikes (8-bit payload)",
|
| 28 |
+
"On-chip learning (14-opcode ISA, STDP)",
|
| 29 |
+
"Axonal delays (up to 63 timesteps)",
|
| 30 |
+
"Programmable synaptic plasticity",
|
| 31 |
+
],
|
| 32 |
+
},
|
| 33 |
+
"N2": {
|
| 34 |
+
"cores": 128,
|
| 35 |
+
"neurons_per_core": 1024,
|
| 36 |
+
"synapses_per_core": 131072,
|
| 37 |
+
"total_neurons": 131072,
|
| 38 |
+
"dendrites": 4,
|
| 39 |
+
"graded_spike_bits": 8,
|
| 40 |
+
"learning_opcodes": 18,
|
| 41 |
+
"max_axon_delay": 63,
|
| 42 |
+
"parity": "Intel Loihi 2",
|
| 43 |
+
"neuron_models": ["LIF", "CUBA", "ALIF", "Izhikevich", "Custom"],
|
| 44 |
+
"features": [
|
| 45 |
+
"Programmable neuron models (microcode engine)",
|
| 46 |
+
"CUBA, LIF, ALIF, Izhikevich built-in",
|
| 47 |
+
"Custom neuron models via microcode",
|
| 48 |
+
"Graded spikes (8-bit payload)",
|
| 49 |
+
"On-chip learning (18-opcode ISA)",
|
| 50 |
+
"Dendritic compartments (4 per neuron)",
|
| 51 |
+
"Axonal delays (up to 63 timesteps)",
|
| 52 |
+
"Multi-chip scalability",
|
| 53 |
+
"Three-factor learning rules",
|
| 54 |
+
],
|
| 55 |
+
},
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ── Local LIF simulator ─────────────────────────────────────────────────────
|
| 60 |
+
|
| 61 |
+
def simulate_lif(populations, connections, timesteps, dt=1.0):
|
| 62 |
+
"""Run a simple LIF simulation. Returns spike times per population."""
|
| 63 |
+
# Build neuron arrays
|
| 64 |
+
pop_offsets = {}
|
| 65 |
+
total = 0
|
| 66 |
+
for p in populations:
|
| 67 |
+
pop_offsets[p["label"]] = total
|
| 68 |
+
total += p["size"]
|
| 69 |
+
|
| 70 |
+
voltage = np.zeros(total)
|
| 71 |
+
threshold = np.array([1000.0] * total)
|
| 72 |
+
leak = np.array([50.0] * total)
|
| 73 |
+
refrac = np.zeros(total, dtype=int)
|
| 74 |
+
|
| 75 |
+
# Apply per-population params
|
| 76 |
+
for p in populations:
|
| 77 |
+
off = pop_offsets[p["label"]]
|
| 78 |
+
sz = p["size"]
|
| 79 |
+
threshold[off : off + sz] = p.get("threshold", 1000)
|
| 80 |
+
leak[off : off + sz] = p.get("leak", 50)
|
| 81 |
+
|
| 82 |
+
# Build weight matrix
|
| 83 |
+
W = np.zeros((total, total))
|
| 84 |
+
for c in connections:
|
| 85 |
+
src_off = pop_offsets[c["source"]]
|
| 86 |
+
src_sz = next(p["size"] for p in populations if p["label"] == c["source"])
|
| 87 |
+
tgt_off = pop_offsets[c["target"]]
|
| 88 |
+
tgt_sz = next(p["size"] for p in populations if p["label"] == c["target"])
|
| 89 |
+
|
| 90 |
+
topo = c.get("topology", "random_sparse")
|
| 91 |
+
w = c.get("weight", 500)
|
| 92 |
+
prob = c.get("probability", 0.3)
|
| 93 |
+
|
| 94 |
+
if topo == "all_to_all":
|
| 95 |
+
W[tgt_off : tgt_off + tgt_sz, src_off : src_off + src_sz] = w
|
| 96 |
+
elif topo == "one_to_one":
|
| 97 |
+
n = min(src_sz, tgt_sz)
|
| 98 |
+
for i in range(n):
|
| 99 |
+
W[tgt_off + i, src_off + i] = w
|
| 100 |
+
else: # random_sparse
|
| 101 |
+
mask = np.random.random((tgt_sz, src_sz)) < prob
|
| 102 |
+
W[tgt_off : tgt_off + tgt_sz, src_off : src_off + src_sz] = mask * w
|
| 103 |
+
|
| 104 |
+
# Stimulus: constant current to first population
|
| 105 |
+
stim_pop = populations[0]
|
| 106 |
+
stim_off = pop_offsets[stim_pop["label"]]
|
| 107 |
+
stim_sz = stim_pop["size"]
|
| 108 |
+
stim_current = np.zeros(total)
|
| 109 |
+
stim_current[stim_off : stim_off + stim_sz] = populations[0].get("input_current", 800)
|
| 110 |
+
|
| 111 |
+
# Run
|
| 112 |
+
spike_times = {p["label"]: {} for p in populations}
|
| 113 |
+
|
| 114 |
+
for t in range(timesteps):
|
| 115 |
+
# Refractory
|
| 116 |
+
active = refrac <= 0
|
| 117 |
+
|
| 118 |
+
# Leak
|
| 119 |
+
voltage = voltage * (1.0 - leak / 4096.0)
|
| 120 |
+
|
| 121 |
+
# Synaptic input from previous spikes
|
| 122 |
+
spikes_vec = np.zeros(total)
|
| 123 |
+
for p in populations:
|
| 124 |
+
off = pop_offsets[p["label"]]
|
| 125 |
+
sz = p["size"]
|
| 126 |
+
for nid_str, times in spike_times[p["label"]].items():
|
| 127 |
+
nid = int(nid_str)
|
| 128 |
+
if times and times[-1] == t - 1:
|
| 129 |
+
spikes_vec[off + nid] = 1.0
|
| 130 |
+
|
| 131 |
+
synaptic = W @ spikes_vec
|
| 132 |
+
|
| 133 |
+
# Update voltage
|
| 134 |
+
voltage += (stim_current + synaptic) * active
|
| 135 |
+
|
| 136 |
+
# Noise (small)
|
| 137 |
+
voltage += np.random.randn(total) * 20 * active
|
| 138 |
+
|
| 139 |
+
# Spike detection
|
| 140 |
+
fired = (voltage >= threshold) & active
|
| 141 |
+
indices = np.where(fired)[0]
|
| 142 |
+
|
| 143 |
+
for idx in indices:
|
| 144 |
+
# Find which population
|
| 145 |
+
for p in populations:
|
| 146 |
+
off = pop_offsets[p["label"]]
|
| 147 |
+
sz = p["size"]
|
| 148 |
+
if off <= idx < off + sz:
|
| 149 |
+
nid = idx - off
|
| 150 |
+
key = str(nid)
|
| 151 |
+
if key not in spike_times[p["label"]]:
|
| 152 |
+
spike_times[p["label"]][key] = []
|
| 153 |
+
spike_times[p["label"]][key].append(t)
|
| 154 |
+
break
|
| 155 |
+
|
| 156 |
+
# Reset
|
| 157 |
+
voltage[fired] = 0.0
|
| 158 |
+
refrac[fired] = 3 # refractory period
|
| 159 |
+
refrac -= 1
|
| 160 |
+
refrac = np.maximum(refrac, 0)
|
| 161 |
+
|
| 162 |
+
return spike_times, pop_offsets
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def make_raster(populations, spike_times, timesteps):
|
| 166 |
+
"""Create a dark-themed spike raster plot."""
|
| 167 |
+
import matplotlib
|
| 168 |
+
matplotlib.use("Agg")
|
| 169 |
+
import matplotlib.pyplot as plt
|
| 170 |
+
|
| 171 |
+
fig, ax = plt.subplots(figsize=(12, 5), dpi=100)
|
| 172 |
+
fig.patch.set_facecolor("#0d1117")
|
| 173 |
+
ax.set_facecolor("#0d1117")
|
| 174 |
+
|
| 175 |
+
colors = ["#4A9EFF", "#FF6B6B", "#50C878", "#FFD93D", "#C084FC"]
|
| 176 |
+
neuron_offset = 0
|
| 177 |
+
|
| 178 |
+
total_spikes = 0
|
| 179 |
+
for i, pop in enumerate(populations):
|
| 180 |
+
color = colors[i % len(colors)]
|
| 181 |
+
pop_spikes = spike_times.get(pop["label"], {})
|
| 182 |
+
|
| 183 |
+
for nid_str, times in pop_spikes.items():
|
| 184 |
+
nid = int(nid_str)
|
| 185 |
+
y = neuron_offset + nid
|
| 186 |
+
ax.scatter(times, [y] * len(times), s=1.5, c=color, marker="|", linewidths=0.6)
|
| 187 |
+
total_spikes += len(times)
|
| 188 |
+
|
| 189 |
+
mid = neuron_offset + pop["size"] // 2
|
| 190 |
+
ax.annotate(
|
| 191 |
+
f'{pop["label"]}\n({pop["size"]})',
|
| 192 |
+
xy=(-0.01, mid),
|
| 193 |
+
xycoords=("axes fraction", "data"),
|
| 194 |
+
fontsize=8,
|
| 195 |
+
color=color,
|
| 196 |
+
ha="right",
|
| 197 |
+
va="center",
|
| 198 |
+
)
|
| 199 |
+
neuron_offset += pop["size"]
|
| 200 |
+
|
| 201 |
+
ax.set_xlabel("Timestep", color="#8b949e", fontsize=10)
|
| 202 |
+
ax.set_title("Spike Raster", color="white", fontsize=12, fontweight="bold", pad=10)
|
| 203 |
+
ax.tick_params(colors="#8b949e", labelsize=8)
|
| 204 |
+
ax.spines["bottom"].set_color("#30363d")
|
| 205 |
+
ax.spines["left"].set_color("#30363d")
|
| 206 |
+
ax.spines["top"].set_visible(False)
|
| 207 |
+
ax.spines["right"].set_visible(False)
|
| 208 |
+
ax.set_xlim(-1, timesteps + 1)
|
| 209 |
+
ax.set_ylim(-1, neuron_offset)
|
| 210 |
+
ax.set_yticks([])
|
| 211 |
+
|
| 212 |
+
plt.tight_layout()
|
| 213 |
+
return fig, total_spikes
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
# ── Hardware constraint validation ───────────────────────────────────────────
|
| 217 |
+
|
| 218 |
+
def validate_hardware(processor, populations, connections):
|
| 219 |
+
"""Check if the network fits on the selected processor."""
|
| 220 |
+
spec = PROCESSORS[processor]
|
| 221 |
+
total_neurons = sum(p["size"] for p in populations)
|
| 222 |
+
max_neurons = spec["total_neurons"]
|
| 223 |
+
|
| 224 |
+
cores_needed = max(1, -(-total_neurons // spec["neurons_per_core"])) # ceil div
|
| 225 |
+
cores_available = spec["cores"]
|
| 226 |
+
|
| 227 |
+
total_synapses = 0
|
| 228 |
+
for c in connections:
|
| 229 |
+
src_sz = next(p["size"] for p in populations if p["label"] == c["source"])
|
| 230 |
+
tgt_sz = next(p["size"] for p in populations if p["label"] == c["target"])
|
| 231 |
+
topo = c.get("topology", "random_sparse")
|
| 232 |
+
prob = c.get("probability", 0.3)
|
| 233 |
+
if topo == "all_to_all":
|
| 234 |
+
total_synapses += src_sz * tgt_sz
|
| 235 |
+
elif topo == "one_to_one":
|
| 236 |
+
total_synapses += min(src_sz, tgt_sz)
|
| 237 |
+
else:
|
| 238 |
+
total_synapses += int(src_sz * tgt_sz * prob)
|
| 239 |
|
| 240 |
+
fits = cores_needed <= cores_available
|
| 241 |
+
utilisation = (cores_needed / cores_available) * 100
|
| 242 |
+
|
| 243 |
+
report = f"### Hardware Mapping: {processor}\n\n"
|
| 244 |
+
report += f"| Resource | Used | Available | Status |\n"
|
| 245 |
+
report += f"|----------|------|-----------|--------|\n"
|
| 246 |
+
report += f"| Neurons | {total_neurons:,} | {max_neurons:,} | {'OK' if total_neurons <= max_neurons else 'OVER'} |\n"
|
| 247 |
+
report += f"| Cores | {cores_needed} | {cores_available} | {'OK' if fits else 'OVER'} |\n"
|
| 248 |
+
report += f"| Synapses | {total_synapses:,} | {spec['synapses_per_core'] * cores_available:,} | est. |\n"
|
| 249 |
+
report += f"| Utilisation | {utilisation:.1f}% | | |\n\n"
|
| 250 |
+
|
| 251 |
+
if fits:
|
| 252 |
+
report += f"**Network fits on {processor}.** Using {cores_needed}/{cores_available} cores ({utilisation:.1f}%)."
|
| 253 |
+
else:
|
| 254 |
+
report += f"**Network does NOT fit on {processor}.** Needs {cores_needed} cores but only {cores_available} available. Reduce neuron count."
|
| 255 |
+
|
| 256 |
+
return report, fits
|
| 257 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
+
# ── Main interface ───────────────────────────────────────────────────────────
|
| 260 |
|
| 261 |
+
def run_demo(processor, num_cores, neurons_per_core, neuron_model,
|
| 262 |
+
hidden_size, output_size, topology, weight, probability,
|
| 263 |
+
timesteps, input_current):
|
| 264 |
+
"""Configure, validate, and simulate."""
|
|
|
|
|
|
|
| 265 |
|
| 266 |
+
input_size = num_cores * neurons_per_core
|
| 267 |
+
if input_size > 2048:
|
| 268 |
+
input_size = 2048 # cap for demo performance
|
| 269 |
|
| 270 |
+
populations = [
|
| 271 |
+
{"label": "input", "size": min(input_size, 512), "threshold": 1000,
|
| 272 |
+
"leak": 50, "input_current": input_current},
|
| 273 |
+
]
|
| 274 |
connections = []
|
| 275 |
|
| 276 |
if hidden_size > 0:
|
| 277 |
+
populations.append({"label": "hidden", "size": hidden_size,
|
| 278 |
+
"threshold": 1000, "leak": 50})
|
| 279 |
connections.append({
|
| 280 |
"source": "input", "target": "hidden",
|
| 281 |
+
"topology": topology, "weight": weight, "probability": probability,
|
| 282 |
})
|
| 283 |
+
|
| 284 |
+
if output_size > 0:
|
| 285 |
+
src = "hidden" if hidden_size > 0 else "input"
|
| 286 |
+
populations.append({"label": "output", "size": output_size,
|
| 287 |
+
"threshold": 1000, "leak": 50})
|
|
|
|
|
|
|
|
|
|
| 288 |
connections.append({
|
| 289 |
+
"source": src, "target": "output",
|
| 290 |
+
"topology": topology, "weight": weight, "probability": probability,
|
| 291 |
})
|
| 292 |
|
| 293 |
+
# Validate
|
| 294 |
+
hw_report, fits = validate_hardware(processor, populations, connections)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
|
| 296 |
+
# Cap simulation size for responsiveness
|
| 297 |
+
total = sum(p["size"] for p in populations)
|
| 298 |
+
if total > 2000:
|
| 299 |
+
return hw_report + "\n\n*Demo capped at 2,000 neurons for browser performance. Full scale available via Cloud API.*", None
|
| 300 |
+
|
| 301 |
+
# Simulate
|
| 302 |
+
spike_times, _ = simulate_lif(populations, connections, timesteps)
|
| 303 |
+
fig, total_spikes = make_raster(populations, spike_times, timesteps)
|
| 304 |
+
|
| 305 |
+
# Stats
|
| 306 |
+
stats = f"\n\n---\n### Simulation Results\n"
|
| 307 |
+
stats += f"- **Total spikes**: {total_spikes:,}\n"
|
| 308 |
+
stats += f"- **Timesteps**: {timesteps}\n"
|
| 309 |
+
stats += f"- **Neuron model**: {neuron_model}\n"
|
| 310 |
+
|
| 311 |
+
for p in populations:
|
| 312 |
+
pop_spikes = sum(len(t) for t in spike_times[p["label"]].values())
|
| 313 |
+
rate = pop_spikes / (p["size"] * timesteps) if p["size"] * timesteps > 0 else 0
|
| 314 |
+
stats += f"- **{p['label']}**: {pop_spikes:,} spikes ({rate:.3f} spikes/neuron/step)\n"
|
| 315 |
+
|
| 316 |
+
return hw_report + stats, fig
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def get_neuron_models(processor):
|
| 320 |
+
"""Return available neuron models for selected processor."""
|
| 321 |
+
return gr.update(choices=PROCESSORS[processor]["neuron_models"],
|
| 322 |
+
value=PROCESSORS[processor]["neuron_models"][0])
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def get_processor_info(processor):
|
| 326 |
+
"""Return markdown specs for selected processor."""
|
| 327 |
+
spec = PROCESSORS[processor]
|
| 328 |
+
md = f"## Catalyst {processor}\n\n"
|
| 329 |
+
md += f"**Parity**: {spec['parity']}\n\n"
|
| 330 |
+
md += f"| Spec | Value |\n|------|-------|\n"
|
| 331 |
+
md += f"| Cores | {spec['cores']} |\n"
|
| 332 |
+
md += f"| Neurons/core | {spec['neurons_per_core']:,} |\n"
|
| 333 |
+
md += f"| Total neurons | {spec['total_neurons']:,} |\n"
|
| 334 |
+
md += f"| Synapses/core | {spec['synapses_per_core']:,} |\n"
|
| 335 |
+
md += f"| Dendrites | {spec['dendrites']} compartments |\n"
|
| 336 |
+
md += f"| Graded spikes | {spec['graded_spike_bits']}-bit |\n"
|
| 337 |
+
md += f"| Learning opcodes | {spec['learning_opcodes']} |\n"
|
| 338 |
+
md += f"| Max axon delay | {spec['max_axon_delay']} timesteps |\n"
|
| 339 |
+
md += f"| Neuron models | {', '.join(spec['neuron_models'])} |\n\n"
|
| 340 |
+
md += "### Key Features\n\n"
|
| 341 |
+
for f in spec["features"]:
|
| 342 |
+
md += f"- {f}\n"
|
| 343 |
+
return md
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# ── Gradio app ───────────────────────────────────────────────────────────────
|
| 347 |
|
| 348 |
with gr.Blocks(
|
| 349 |
+
title="Catalyst Neuromorphic — Processor Configurator",
|
| 350 |
theme=gr.themes.Base(
|
| 351 |
primary_hue="blue",
|
| 352 |
neutral_hue="slate",
|
| 353 |
+
font=gr.themes.GoogleFont("Inter"),
|
| 354 |
),
|
| 355 |
+
css="""
|
| 356 |
+
.gradio-container { max-width: 1100px !important; }
|
| 357 |
+
.dark { background: #0d1117 !important; }
|
| 358 |
+
""",
|
| 359 |
) as demo:
|
| 360 |
gr.Markdown("""
|
| 361 |
+
# Catalyst Neuromorphic — Processor Configurator
|
| 362 |
+
|
| 363 |
+
Explore the **N1** and **N2** spiking neuromorphic processors.
|
| 364 |
+
Configure networks, validate hardware constraints, and run simulations — all in the browser.
|
| 365 |
""")
|
| 366 |
|
| 367 |
+
with gr.Tab("Processors"):
|
| 368 |
+
gr.Markdown("""
|
| 369 |
+
### Compare the N1 and N2 neuromorphic processors
|
|
|
|
|
|
|
| 370 |
|
| 371 |
+
| | **N1** | **N2** |
|
| 372 |
+
|---|---|---|
|
| 373 |
+
| **Parity** | Intel Loihi 1 | Intel Loihi 2 |
|
| 374 |
+
| **Cores** | 128 | 128 |
|
| 375 |
+
| **Neurons/core** | 1,024 | 1,024 |
|
| 376 |
+
| **Total neurons** | 131,072 | 131,072 |
|
| 377 |
+
| **Neuron models** | LIF | LIF, CUBA, ALIF, Izhikevich, Custom |
|
| 378 |
+
| **Learning** | 14-opcode ISA, STDP | 18-opcode ISA, three-factor |
|
| 379 |
+
| **Dendrites** | 4 compartments | 4 compartments |
|
| 380 |
+
| **Graded spikes** | 8-bit | 8-bit |
|
| 381 |
+
| **Max axon delay** | 63 timesteps | 63 timesteps |
|
| 382 |
+
| **Key advance** | Foundation | Programmable neuron microcode engine |
|
| 383 |
|
| 384 |
+
The **N1** is a complete neuromorphic processor with full Loihi 1 parity — 128 cores, on-chip learning, dendritic computation.
|
| 385 |
+
|
| 386 |
+
The **N2** adds a **programmable microcode engine** for custom neuron models. Instead of hardwired LIF, you can program arbitrary neuron dynamics — CUBA, ALIF, Izhikevich, or anything you design.
|
|
|
|
| 387 |
|
| 388 |
+
Both have been validated on FPGA. Both are fully open-design.
|
| 389 |
+
|
| 390 |
+
**Papers**: [N1 Paper (Zenodo)](https://zenodo.org/records/18727094) | [N2 Paper](/papers/catalyst-n2.pdf)
|
| 391 |
+
|
| 392 |
+
**Website**: [catalyst-neuromorphic.com](https://catalyst-neuromorphic.com)
|
| 393 |
+
""")
|
| 394 |
+
|
| 395 |
+
with gr.Tab("Configure & Simulate"):
|
| 396 |
with gr.Row():
|
| 397 |
+
with gr.Column(scale=1):
|
| 398 |
+
processor = gr.Radio(
|
| 399 |
+
["N1", "N2"], value="N2", label="Processor",
|
| 400 |
+
info="Select which processor to target",
|
| 401 |
+
)
|
| 402 |
+
neuron_model = gr.Dropdown(
|
| 403 |
+
["LIF", "CUBA", "ALIF", "Izhikevich", "Custom"],
|
| 404 |
+
value="LIF", label="Neuron Model",
|
| 405 |
+
info="N2 supports programmable models",
|
| 406 |
+
)
|
| 407 |
+
num_cores = gr.Slider(1, 128, value=4, step=1,
|
| 408 |
+
label="Cores", info="How many cores to use")
|
| 409 |
+
neurons_per_core = gr.Slider(1, 1024, value=64, step=1,
|
| 410 |
+
label="Neurons per core")
|
| 411 |
+
|
| 412 |
+
with gr.Column(scale=1):
|
| 413 |
+
hidden_size = gr.Slider(0, 512, value=128, step=1,
|
| 414 |
+
label="Hidden neurons", info="0 = direct input→output")
|
| 415 |
+
output_size = gr.Slider(0, 256, value=64, step=1,
|
| 416 |
+
label="Output neurons", info="0 = no output layer")
|
| 417 |
+
topology = gr.Dropdown(
|
| 418 |
+
["all_to_all", "one_to_one", "random_sparse"],
|
| 419 |
+
value="random_sparse", label="Connection Topology",
|
| 420 |
+
)
|
| 421 |
+
weight = gr.Slider(100, 3000, value=800, step=50,
|
| 422 |
+
label="Synaptic Weight")
|
| 423 |
+
probability = gr.Slider(0.01, 1.0, value=0.3, step=0.01,
|
| 424 |
+
label="Connection Probability",
|
| 425 |
+
info="For random_sparse topology")
|
| 426 |
|
| 427 |
with gr.Row():
|
| 428 |
+
timesteps = gr.Slider(10, 500, value=200, step=10, label="Timesteps")
|
| 429 |
+
input_current = gr.Slider(100, 5000, value=800, step=100,
|
| 430 |
+
label="Input Current")
|
| 431 |
|
| 432 |
+
run_btn = gr.Button("Simulate", variant="primary", size="lg")
|
| 433 |
|
| 434 |
with gr.Row():
|
| 435 |
+
hw_report = gr.Markdown(label="Hardware Report")
|
| 436 |
+
raster_plot = gr.Plot(label="Spike Raster")
|
| 437 |
+
|
| 438 |
+
# Events
|
| 439 |
+
processor.change(get_neuron_models, inputs=[processor], outputs=[neuron_model])
|
| 440 |
|
| 441 |
run_btn.click(
|
| 442 |
+
run_demo,
|
| 443 |
+
inputs=[processor, num_cores, neurons_per_core, neuron_model,
|
| 444 |
+
hidden_size, output_size, topology, weight, probability,
|
| 445 |
+
timesteps, input_current],
|
| 446 |
+
outputs=[hw_report, raster_plot],
|
| 447 |
)
|
| 448 |
|
| 449 |
+
with gr.Tab("Cloud API"):
|
| 450 |
+
gr.Markdown("""
|
| 451 |
+
### Run at scale with the Catalyst Cloud API
|
| 452 |
+
|
| 453 |
+
The simulator above runs locally in the browser for small networks.
|
| 454 |
+
For **full-scale simulations** (131K+ neurons, hardware-accurate timing, on-chip learning),
|
| 455 |
+
use the Catalyst Cloud API.
|
| 456 |
+
|
| 457 |
+
**Install the Python SDK:**
|
| 458 |
+
```bash
|
| 459 |
+
pip install catalyst-cloud
|
| 460 |
+
```
|
| 461 |
+
|
| 462 |
+
**Quick start:**
|
| 463 |
+
```python
|
| 464 |
+
from catalyst_cloud import Client
|
| 465 |
+
|
| 466 |
+
client = Client("cn_live_your_key_here")
|
| 467 |
+
|
| 468 |
+
# Create a network
|
| 469 |
+
net = client.create_network(
|
| 470 |
+
populations=[
|
| 471 |
+
{"label": "input", "size": 700},
|
| 472 |
+
{"label": "hidden", "size": 512, "params": {"threshold": 1000}},
|
| 473 |
+
],
|
| 474 |
+
connections=[
|
| 475 |
+
{"source": "input", "target": "hidden",
|
| 476 |
+
"topology": "random_sparse", "weight": 500, "p": 0.3},
|
| 477 |
+
],
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
# Run simulation
|
| 481 |
+
result = client.simulate(
|
| 482 |
+
network_id=net["network_id"],
|
| 483 |
+
timesteps=250,
|
| 484 |
+
stimuli=[{"population": "input", "current": 5000}],
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
print(f"Total spikes: {result['total_spikes']}")
|
| 488 |
+
```
|
| 489 |
+
|
| 490 |
+
### Links
|
| 491 |
+
|
| 492 |
+
- [Sign up for free](https://catalyst-neuromorphic.com/cloud)
|
| 493 |
+
- [API Documentation](https://catalyst-neuromorphic.com/cloud/docs)
|
| 494 |
+
- [Pricing](https://catalyst-neuromorphic.com/cloud/pricing)
|
| 495 |
+
- [PyPI: catalyst-cloud](https://pypi.org/project/catalyst-cloud/)
|
| 496 |
+
- [GitHub: catalyst-cloud-python](https://github.com/catalyst-neuromorphic/catalyst-cloud-python)
|
| 497 |
+
""")
|
| 498 |
+
|
| 499 |
gr.Markdown("""
|
| 500 |
+
---
|
| 501 |
+
[Website](https://catalyst-neuromorphic.com) |
|
| 502 |
+
[Research](https://catalyst-neuromorphic.com/research) |
|
| 503 |
+
[Cloud API](https://catalyst-neuromorphic.com/cloud) |
|
| 504 |
+
[GitHub](https://github.com/catalyst-neuromorphic)
|
| 505 |
""")
|
| 506 |
|
| 507 |
demo.launch()
|
monitor_space.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Monitor HF Space health for 1 hour after deployment."""
|
| 2 |
+
import requests
|
| 3 |
+
import time
|
| 4 |
+
import datetime
|
| 5 |
+
|
| 6 |
+
SPACE_URL = "https://mrwabbit-catalyst-cloud.hf.space/"
|
| 7 |
+
CHECK_INTERVAL = 120 # seconds (every 2 minutes)
|
| 8 |
+
TOTAL_DURATION = 3600 # 1 hour
|
| 9 |
+
|
| 10 |
+
start = time.time()
|
| 11 |
+
checks = 0
|
| 12 |
+
failures = 0
|
| 13 |
+
|
| 14 |
+
print(f"[{datetime.datetime.now():%H:%M:%S}] Starting HF Space monitoring for 1 hour...")
|
| 15 |
+
print(f"URL: {SPACE_URL}")
|
| 16 |
+
print(f"Check interval: {CHECK_INTERVAL}s")
|
| 17 |
+
print("-" * 60)
|
| 18 |
+
|
| 19 |
+
while time.time() - start < TOTAL_DURATION:
|
| 20 |
+
checks += 1
|
| 21 |
+
elapsed = int(time.time() - start)
|
| 22 |
+
try:
|
| 23 |
+
r = requests.get(SPACE_URL, timeout=30)
|
| 24 |
+
status = r.status_code
|
| 25 |
+
ok = 200 <= status < 400
|
| 26 |
+
if ok:
|
| 27 |
+
print(f"[{datetime.datetime.now():%H:%M:%S}] Check #{checks} ({elapsed//60}m): OK (HTTP {status})")
|
| 28 |
+
else:
|
| 29 |
+
failures += 1
|
| 30 |
+
print(f"[{datetime.datetime.now():%H:%M:%S}] Check #{checks} ({elapsed//60}m): FAIL (HTTP {status})")
|
| 31 |
+
except Exception as e:
|
| 32 |
+
failures += 1
|
| 33 |
+
print(f"[{datetime.datetime.now():%H:%M:%S}] Check #{checks} ({elapsed//60}m): ERROR ({e})")
|
| 34 |
+
|
| 35 |
+
time.sleep(CHECK_INTERVAL)
|
| 36 |
+
|
| 37 |
+
print("-" * 60)
|
| 38 |
+
print(f"Monitoring complete. {checks} checks, {failures} failures.")
|
| 39 |
+
if failures == 0:
|
| 40 |
+
print("RESULT: Space is HEALTHY - no issues detected in 1 hour.")
|
| 41 |
+
else:
|
| 42 |
+
print(f"RESULT: {failures} failures detected - investigate!")
|
requirements.txt
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
gradio=
|
| 2 |
-
requests
|
| 3 |
numpy
|
| 4 |
matplotlib
|
|
|
|
| 1 |
+
gradio>=5.0,<6.0
|
|
|
|
| 2 |
numpy
|
| 3 |
matplotlib
|