repo stringclasses 20
values | path stringlengths 6 94 | lang stringclasses 5
values | n_chars int64 81 200k | sha256 stringlengths 64 64 | content stringlengths 81 200k |
|---|---|---|---|---|---|
eren23/synapse | synapse/synapse-esp32/build.rs | rs | 278 | d8340add2983eab773e12a0ede217afe65aca9fb4ed257e925c55b923396eea3 | fn main() {
// Only emit ESP-IDF sysenv when cross-compiling for espidf targets.
// Host builds (cargo test -p synapse-esp32) skip this entirely.
if std::env::var("CARGO_CFG_TARGET_OS").as_deref() == Ok("espidf") {
embuild::espidf::sysenv::output();
}
}
|
eren23/synapse | synapse/synapse-esp32/examples/lewm_rollout_bench.rs | rs | 2,610 | 0f1c2d3103a49da78393e4c2cbae57dfd8d6dd349bc5a107d0a3dedd65029e5a | //! Benchmark: sequential vs fused rollout latency.
use synapse_esp32::model::Esp32LeWM;
fn det(len: usize, seed: u32) -> Vec<f32> {
(0..len)
.map(|i| {
let m = seed.wrapping_mul(1_664_525).wrapping_add((i as u32).wrapping_mul(1_013_904_223));
let centered = (m % 2_001).wrapping_sub... |
eren23/synapse | synapse/synapse-esp32/examples/lewm_probe.rs | rs | 7,950 | 35af2e49b5d50efb9c1b31cf835a92e0e4c7a5644d6775d2c3c9b711e8052782 | use std::path::PathBuf;
use synapse_inference::ops::pure_rust_ops::{gelu, layernorm, matmul_t};
use synapse_inference::quantization::QuantizedQ4LeWM;
fn main() {
let mut model_path: Option<PathBuf> = None;
let args: Vec<String> = std::env::args().collect();
let mut i = 1;
while i < args.len() {
... |
eren23/synapse | synapse/synapse-esp32/examples/lewm_golden.rs | rs | 2,234 | 4b5e45df030c1ab8bb6fe2515b29de27b3d194485ee90caa57bf8bc90a87776a | use std::path::PathBuf;
use synapse_esp32::model::Esp32LeWM;
fn main() {
let mut model_path: Option<PathBuf> = None;
let mut steps: usize = 5;
let args: Vec<String> = std::env::args().collect();
let mut i = 1;
while i < args.len() {
match args[i].as_str() {
"--model" | "-m" =>... |
eren23/synapse | synapse/synapse-esp32/examples/lewm_encode_probe.rs | rs | 4,354 | 2c389456a0bfc5ad3246a4d8ab84af538c9fc2a939dcf21a56fb046d8daa0cc5 | use std::path::PathBuf;
use synapse_esp32::model::Esp32LeWM;
use synapse_inference::ops::patch_embed::patch_embed;
use synapse_inference::ops::pure_rust_ops::layernorm;
use synapse_inference::quantization::FullyQuantizedLeWM;
fn main() {
let mut model_path: Option<PathBuf> = None;
let args: Vec<String> = std... |
eren23/synapse | synapse/synapse-esp32/src/lib.rs | rs | 539 | 1d99d73f5951e8e4c2fa708ca2346ac09712065ab53e86caf57e765cd47a7091 | //! Synapse ESP32-P4: multi-model inference on a $10 RISC-V microcontroller.
//!
//! Supported models:
//! - LeWM (world model): encode, predict, rollout
//! - Mamba Q4 (language model): text generation
//! - RWKV-7 Q4 (language model): text generation
//!
//! Architecture:
//! Phone camera / text -> WiFi HTTP ... |
eren23/synapse | synapse/synapse-esp32/src/server.rs | rs | 11,352 | 8ab471455ccdf45d4dc59017105781f93d827f5ed01e323872028852d5e5a71b | //! HTTP server for inference over WiFi.
//!
//! Endpoints:
//! POST /encode -- image -> latent (LeWM only)
//! POST /predict -- latent + action -> next latent (LeWM only)
//! POST /rollout -- latent + actions -> trajectory (LeWM only)
//! POST /llm/generate -- prompt tokens -> generated tokens (Mamba/R... |
eren23/synapse | synapse/synapse-esp32/src/main.rs | rs | 7,658 | 04f0632cdf7ab85519862b6c6d5626b09a309528b7b43ee36b040237a4776bbf | //! ESP32-P4 multi-model inference server.
//!
//! On real hardware (--features esp32):
//! Connects to WiFi, starts HTTP server, serves inference endpoints.
//!
//! On host (default, --features host-test):
//! Runs a quick smoke test of all model types and server handlers.
#[cfg(all(feature = "host-test", feature... |
eren23/synapse | synapse/synapse-esp32/src/model.rs | rs | 30,094 | 9f80fb4e38ec612923b5d9ea7e8c87c5d06f8b5504f0a048ef4788e516ed465c | //! Model loading and inference for ESP32.
//!
//! Supports multiple model types:
//! - LEWM world model (encode/predict/rollout) β f32 or Q4 quantized
//! - Mamba Q4 language model (text generation)
//! - RWKV-7 Q4 language model (text generation)
use std::time::Instant;
use synapse_inference::models::ssm::mamba::blo... |
eren23/synapse | synapse/crates/synapse-train/src/checkpoint.rs | rs | 5,829 | 9ee879152bb84cbcd45154f33aee04c3ddd2ecf29586662695b906198edd69ae | use std::collections::BTreeMap;
use std::io::{self, Cursor, Read, Write};
const MAGIC: &[u8; 4] = b"SYNP";
const VERSION: u32 = 1;
/// Model state dictionary: parameter name -> (shape, data).
pub type StateDict = BTreeMap<String, (Vec<usize>, Vec<f32>)>;
/// Serialize a state dict to a writer in a binary format.
///... |
eren23/synapse | synapse/crates/synapse-train/src/lib.rs | rs | 450 | 191bedb6a82d22c72757b28723a3b84ceaba3addb2d4d8c2394dd8ede9ac2a0a | pub mod callback;
pub mod checkpoint;
pub mod metrics;
pub mod progress;
pub mod trainer;
pub use callback::{CallbackAction, EarlyStopping, ModelCheckpoint, TrainerCallback};
pub use checkpoint::{load_checkpoint, load_from_bytes, save_checkpoint, save_to_bytes, StateDict};
pub use metrics::{Accuracy, ConfusionMatrix, ... |
eren23/synapse | synapse/crates/synapse-train/src/callback.rs | rs | 5,718 | a90880593b99e307723e2f2aa8eedf516b8dcacf54a98558e0a722611c55e0ec | use crate::trainer::{EpochResult, TrainHistory};
/// Action returned by callbacks to control the training loop.
pub enum CallbackAction {
Continue,
Stop,
}
/// Trait for hooks into the training loop.
pub trait TrainerCallback {
fn on_epoch_start(&mut self, _epoch: usize) {}
fn on_epoch_end(&mut self, ... |
eren23/synapse | synapse/crates/synapse-train/src/metrics.rs | rs | 6,906 | 425fb5254be83725f0770fa806eb96932417dfe10b3f418fa4aae5388494312d | /// Tracks a running mean of scalar values.
pub struct RunningMean {
sum: f64,
count: usize,
}
impl RunningMean {
pub fn new() -> Self {
RunningMean { sum: 0.0, count: 0 }
}
pub fn update(&mut self, value: f32) {
self.sum += value as f64;
self.count += 1;
}
pub fn ... |
eren23/synapse | synapse/crates/synapse-train/src/progress.rs | rs | 3,411 | 5a4b91a51ccd13435e8db1b82fdca8aca10af5a2c15bd4dc395c04e00eecea83 | use std::time::{Duration, Instant};
/// Tracks progress through epochs and batches, providing ETA estimates.
pub struct ProgressTracker {
total_epochs: usize,
current_epoch: usize,
total_batches: usize,
current_batch: usize,
start_time: Instant,
epoch_start: Instant,
}
impl ProgressTracker {
... |
eren23/synapse | synapse/crates/synapse-train/src/trainer.rs | rs | 5,051 | 50a26206f49fbabd1d735d5fddfa31adca1190833af87465205acdf7b4fa2eb4 | use std::time::Instant;
use synapse_autograd::Tensor;
use crate::callback::{CallbackAction, TrainerCallback};
use crate::metrics::RunningMean;
use crate::progress::ProgressTracker;
/// Configuration for the training loop.
pub struct TrainerConfig {
pub epochs: usize,
}
/// Result of a single training epoch.
pub... |
eren23/synapse | synapse/crates/synapse-data/src/text_dataset.rs | rs | 6,656 | 72c2e5fd7c7620ea8b11843b7f34280cf101bf07a4813430ef513d5cd027cde7 | use crate::collate::pad_sequences;
use crate::dataset::Dataset;
use crate::tokenizer::{WhitespaceTokenizer, PAD_ID};
use crate::Tensor;
/// A text classification dataset that loads tab-separated `label\ttext` lines.
///
/// Each sample is stored as `(token_ids, label)` and returned as
/// `[token_ids_tensor, label_ten... |
eren23/synapse | synapse/crates/synapse-data/src/collate.rs | rs | 5,132 | 153fdfe5f7a194fe2b6a80cf028962dde6807ef733e4140e4b6ff40999001da4 | use crate::Tensor;
/// Default collate function: given a batch of samples (each a `Vec<Tensor>`),
/// stack corresponding tensors along a new leading batch dimension.
///
/// For N samples each containing K tensors, produces K tensors each with
/// a new leading dimension of size N.
///
/// Example: 4 samples of [feat... |
eren23/synapse | synapse/crates/synapse-data/src/lib.rs | rs | 5,660 | 10706b57178ed6f5aa4f0960111969369563c657d631877786bfef75380e0258 | pub mod collate;
pub mod dataloader;
pub mod dataset;
pub mod sampler;
pub mod text_dataset;
pub mod tokenizer;
pub mod transform;
use std::fmt;
/// A simple N-dimensional tensor backed by contiguous f32 data in row-major order.
#[derive(Clone)]
pub struct Tensor {
data: Vec<f32>,
shape: Vec<usize>,
}
impl f... |
eren23/synapse | synapse/crates/synapse-data/src/dataloader.rs | rs | 13,867 | 862066b4523f9fc61074b0efd80fe701c19b4ed69016c7c31959523f5625200d | use std::sync::mpsc::{sync_channel, Receiver};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use crate::collate::default_collate;
use crate::dataset::Dataset;
use crate::sampler::{RandomSampler, Sampler, SequentialSampler};
use crate::Tensor;
/// A configurable data loader that batches dataset samples with... |
eren23/synapse | synapse/crates/synapse-data/src/sampler.rs | rs | 5,793 | 2343c247ae95734aaf2f52e3d4ee78303c50b054da3f03de9a72529c8b04c0fb | use rand::rngs::StdRng;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
/// A sampler produces a sequence of dataset indices.
pub trait Sampler: Send {
/// Reset the sampler for a new epoch, returning an iterator over indices.
fn indices(&mut self) -> Vec<usize>;
fn len(&self) -> usize;
}
/// Yi... |
eren23/synapse | synapse/crates/synapse-data/src/transform.rs | rs | 4,863 | e6c4fc5d2f69652d42bd657a6a4786b5852173b0b19ed3b4258a1e3a4422550f | use crate::Tensor;
use rand::Rng;
/// A transform modifies tensor data.
pub trait Transform: Send + Sync {
fn apply(&self, tensor: &Tensor) -> Tensor;
}
/// Normalize: `(x - mean) / std` element-wise.
pub struct Normalize {
pub mean: f32,
pub std: f32,
}
impl Normalize {
pub fn new(mean: f32, std: f3... |
eren23/synapse | synapse/crates/synapse-data/src/tokenizer.rs | rs | 10,711 | b0fd225d387793da21b62164daf1a1cecbd2a0e442e8cfad498542a837cbfb7e | use std::collections::HashMap;
/// Special token IDs reserved at the start of every vocabulary.
pub const PAD_ID: usize = 0;
pub const UNK_ID: usize = 1;
pub const BOS_ID: usize = 2;
pub const EOS_ID: usize = 3;
const PAD_TOKEN: &str = "<PAD>";
const UNK_TOKEN: &str = "<UNK>";
const BOS_TOKEN: &str = "<BOS>";
const E... |
eren23/synapse | synapse/crates/synapse-data/src/dataset.rs | rs | 2,926 | 950bfc56071649f4c4908bbb11484fb41587a789dbea61e844a6e8b64b2e8731 | use crate::Tensor;
/// A dataset provides indexed access to samples.
/// Each sample is a `Vec<Tensor>` (e.g., [features, labels]).
pub trait Dataset: Send + Sync {
fn len(&self) -> usize;
fn get(&self, index: usize) -> Vec<Tensor>;
fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// A datase... |
eren23/synapse | synapse/crates/synapse-code-tokenizer/tests/cross_validation.rs | rs | 3,730 | 6088795ba95615e756cfa033a387c5be37c8b2752f8bcdeeb8e970e2d2794b2f | //! Byte-for-byte validation against the Python FNV-1a reference
//! (scripts/ast_tokenizer_fnv.py). If Python-produced tokens match exactly,
//! the Rust port is drop-in equivalent.
use synapse_code_tokenizer::tokenize;
/// Each case: (rust_source, expected_tokens from Python ast_tokenizer_fnv.py)
const CASES: &[(&s... |
eren23/synapse | synapse/crates/synapse-code-tokenizer/src/lib.rs | rs | 30,968 | 3879bd33c71a8f7a9a01bdd7d4f5c4644192c3ad0bc214dde374efd79a8b9d2d | //! Python AST tokenizer for Code WM β matches `ast_tokenizer.ast_tokenize()`
//! from the training tap, implemented in Rust via rustpython-parser.
//!
//! This removes the Python runtime dependency from Code WM inference and
//! enables tokenization on any target Rust compiles to (WASM, ESP32, native).
//!
//! ## Voca... |
eren23/synapse | synapse/crates/synapse-inference/build.rs | rs | 364 | 32f8e9ffe0c2d45a78de2f242bb4ca538df950123401deb8719ddb471b0b9dbb | fn main() {
// Link Apple Accelerate framework for cblas_sgemm β only when targeting macOS.
// Uses TARGET env var (not cfg!) because build.rs runs on the host.
let target = std::env::var("TARGET").unwrap_or_default();
if target.contains("apple") && !target.contains("wasm") {
println!("cargo:rus... |
eren23/synapse | synapse/crates/synapse-inference/tests/cdt_q4_parity.rs | rs | 4,003 | 56962672f9fb5807fcb9d06d9eb96440ccc284e4c2f8957d7b31a69f4ad0e5fd | //! Sanity test for in-memory Q4_0 quantization of the CodeDeltaTok head.
//!
//! Loads the trained fp32 head, quantizes it in memory, compares delta and
//! recon against the fp32 reference computed by `cdt_trained_parity.rs`'s
//! fixture. Q4 is expected to drift β we assert cosine β₯ 0.98 and
//! βΞβ / βrefβ < 0.2 on... |
eren23/synapse | synapse/crates/synapse-inference/tests/unixcoder_parity.rs | rs | 6,394 | 9f2eaf38c80e0e2840e3df3e3aeec120c61f550474d2398412a4efe3bf9ec065 | //! HuggingFace-parity test for the UniXcoder (RoBERTa) encoder.
//!
//! Loads `microsoft/unixcoder-base` from the HF cache (or from a directory
//! pointed at by the `UNIXCODER_BASE_DIR` env var) and runs the same
//! 16 reference code snippets that
//! `scripts/export_unixcoder_reference.py export` pushed into
//! `t... |
eren23/synapse | synapse/crates/synapse-inference/tests/cdt_trained_parity.rs | rs | 4,116 | dd138d8972bd4160d2808c4d6aae8ecbbba81c0463e989693650c8940cf1f5a6 | //! Parity test for a trained CodeDeltaTok checkpoint.
//!
//! Complements `cdt_parity.rs` (which uses a random-init model): loads the
//! converted `cdt_paper.safetensors` built with `convert-cdt` and asserts
//! that Synapse's forward pass reproduces the PyTorch model's delta + recon
//! on the 8 reference (h_b, h_a)... |
eren23/synapse | synapse/crates/synapse-inference/tests/cdt_parity.rs | rs | 4,272 | 1438b07aa763b6e9bf4190ea9b012dc698fd849cd43aca445aa925f5d5e55a3f | //! Parity test for the CodeDeltaTok head.
//!
//! No trained weights are needed: `scripts/export_unixcoder_reference.py
//! random-cdt` dumps a fresh random-initialized PyTorch CDT model plus its
//! outputs on a handful of synthetic (h_b, h_a) pairs. We load the same
//! state dict into the Rust port and assert forwa... |
eren23/synapse | synapse/crates/synapse-inference/tests/unixcoder_q4_parity.rs | rs | 4,594 | 92ed886b54c59eaafc2630def593b845dd8d4683399d1dfdef3165536c7b773e | //! Q4_0-quantized UniXcoder vs fp32 reference.
//!
//! Loads `microsoft/unixcoder-base`, builds an fp32 [`RoBERTaEncoder`],
//! quantizes to a [`Q4RoBERTaEncoder`], and asserts that the CLS features
//! on the parity fixture stay within cosine β₯ 0.98 / rel-L2 < 0.20 of the
//! stored HF reference. Skips if either HF c... |
eren23/synapse | synapse/crates/synapse-inference/tests/code_wm_cross_backend.rs | rs | 3,902 | a95c8654c446ea84b37ee03823f229fbe6dc52d47e69c0381a8957b30286eaa6 | //! Verify Code WM produces identical outputs with pure-rust and zig-ffi backends.
//! The pure-rust path is what WASM/ESP32 use, so matching zig-ffi proves
//! cross-platform zero drift.
//!
//! Run both:
//! cargo test -p synapse-inference --test code_wm_cross_backend # zig-ffi (default)
//! cargo test -p synaps... |
eren23/synapse | synapse/crates/synapse-inference/examples/lfm25_inference.rs | rs | 6,164 | e914aa3063516a0ab536e0e3efd3360e8c80ef0bdaca17c94891a860b39bb9f7 | //! Load LFM2.5-350M from GGUF and benchmark CPU vs Metal GPU inference.
//!
//! Usage: cargo run --release --features metal -p synapse-inference --example lfm25_inference -- <path-to-gguf>
use std::path::Path;
use synapse_inference::models::ssm::hybrid::config::HybridConfig;
use synapse_inference::models::ssm::hybri... |
eren23/synapse | synapse/crates/synapse-inference/examples/unixcoder_embed.rs | rs | 7,832 | 05be165568b6d618d2698800efe7324ab548e944dbed698c661355b2026f7ac6 | //! Encode code snippets with `microsoft/unixcoder-base` in pure Rust.
//!
//! Reproduces the feature-extraction call used by the codewm3 paper's
//! tap (`collectors/precompute_backbone_features.py`): CLS pool of the
//! frozen UniXcoder encoder, 768-dim f32.
//!
//! Usage:
//!
//! ```bash
//! # Point at a local UniXc... |
eren23/synapse | synapse/crates/synapse-inference/examples/lfm25_chat.rs | rs | 8,080 | 6b492f2855ac1c12dae827005ece5d57caa22f0ef2bd97bc79d1ea1aaad6889a | //! Interactive chat with LFM2.5-350M.
//!
//! Usage:
//! cargo run --release -p synapse-inference --example lfm25_chat -- <model-dir>
//! cargo run --release --features metal -p synapse-inference --example lfm25_chat -- <model-dir>
//!
//! Example:
//! cargo run --release --features metal -p synapse-inference --... |
eren23/synapse | synapse/crates/synapse-inference/examples/unixcoder_bench.rs | rs | 7,305 | 1606eb440688770d2e748dfeec9e10242414b8373679f1cbc6d72dac87ede9ab | //! Micro-benchmark: UniXcoder CLS latency + (optional) CDT encode/decode.
//!
//! Measures steady-state per-snippet latency on a single CPU thread β the
//! quantity the paper reports as `encoding latency` in
//! `sections/experiments.tex`.
//!
//! Usage:
//! ```bash
//! cargo run --release -p synapse-inference --exam... |
eren23/synapse | synapse/crates/synapse-inference/src/chat_template.rs | rs | 15,269 | 486db7cd3ecc6320ba1235dc39405de59733af35f3907da7c9fd4b100dac3f66 | //! Chat template support for HuggingFace-style `tokenizer_config.json`.
//!
//! Many HuggingFace models ship a Jinja2 `chat_template` field that describes
//! how to format a list of `{role, content}` messages into a single prompt
//! string. This module parses that field and renders it via `minijinja`.
use std::pat... |
eren23/synapse | synapse/crates/synapse-inference/src/lib.rs | rs | 2,061 | 2ae0e15ebff3072c8af1369b22f4a43a1c7f6e179f913c34665aa7f4182d52b9 | pub mod capabilities;
#[cfg(not(target_os = "espidf"))]
pub mod chat_template;
pub mod config;
#[cfg(feature = "diffusion")]
pub mod diffusion;
#[cfg(not(target_os = "espidf"))]
pub mod engine;
pub mod generation;
pub mod kv_cache;
#[cfg(feature = "metal")]
pub mod metal;
pub mod models;
#[cfg(not(target_os = "espidf")... |
eren23/synapse | synapse/crates/synapse-inference/src/lib_tests.rs | rs | 30,229 | 9c539941b4043f3db5060aec8cbf013b1e3f86cbd93842abbfc63e100373b18d | mod config_tests {
use crate::config::*;
use crate::registry;
const QWEN3_JSON: &str = include_str!("../../../configs/qwen3_0.6b.json");
const LLAMA_JSON: &str = include_str!("../../../configs/llama3.2_1b.json");
const MISTRAL_JSON: &str = include_str!("../../../configs/mistral_7b.json");
// β... |
eren23/synapse | synapse/crates/synapse-inference/src/model_adapter.rs | rs | 6,073 | 4b8315c9f34c498c0d4c27c56170cbe97045dfdacc144bb6cf0aeeaa435efd77 | use std::error::Error;
use std::fmt;
use crate::chat_template::{ChatMessage, ChatTemplate, ChatTemplateOptions};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum ThinkingMode {
#[default]
Auto,
Disabled,
}
impl ThinkingMode {
pub fn parse_cli(value: &str) -> Result<Self, String> {
... |
eren23/synapse | synapse/crates/synapse-inference/src/capabilities.rs | rs | 8,967 | 686c8b5e70bbae5f0b7812b80942f3c7de16162b90fdf953485fd8f3ff0b13de | use serde::{Deserialize, Serialize};
#[cfg(feature = "zig-ffi")]
use synapse_core::{capability_summary, CapabilityRuntimeProfile, CapabilitySupportLevel};
const STATUS_MANIFEST_JSON: &str = include_str!("../../../status/public_status.json");
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde... |
eren23/synapse | synapse/crates/synapse-inference/src/weight_loading/converter.rs | rs | 4,287 | d37dba49b03d596f0b785540c1646244511152bddc6be9de96028f87a4500091 | /// Convert f16 (IEEE 754 half-precision) bit patterns to f32 values.
pub fn f16_to_f32(data: &[u16]) -> Vec<f32> {
data.iter().map(|&bits| f16_bits_to_f32(bits)).collect()
}
/// Convert bf16 (bfloat16) bit patterns to f32 values.
pub fn bf16_to_f32(data: &[u16]) -> Vec<f32> {
data.iter().map(|&bits| bf16_bits... |
eren23/synapse | synapse/crates/synapse-inference/src/weight_loading/gguf.rs | rs | 29,002 | a15bbb5faf4352eb00358498d8eb44f7a8ceb5e14ed84fbd2adcd0d8e2a53a23 | use std::collections::HashMap;
use std::fs::File;
use std::path::Path;
use super::converter::f16_to_f32;
use super::{AlignedBuffer, RawTensor, WeightError};
use memmap2::Mmap;
const GGUF_MAGIC: u32 = 0x46554747; // "GGUF" little-endian
const GGUF_DEFAULT_ALIGNMENT: usize = 32;
// GGML tensor types
const GGML_TYPE_F3... |
eren23/synapse | synapse/crates/synapse-inference/src/weight_loading/mod.rs | rs | 7,854 | c6c9e39bb7c576489058d0d5f0f3729ffd5d1dd2ed9663b5a8519ceae5aaeeba | pub mod converter;
#[cfg(not(target_os = "espidf"))]
pub mod gguf;
#[cfg(not(target_os = "espidf"))]
pub mod safetensors;
pub mod weight_map;
pub use converter::{bf16_to_f32, f16_to_f32, transpose};
#[cfg(not(target_os = "espidf"))]
pub use gguf::{load_gguf, load_gguf_with_raw_q4, parse_gguf, RawQ4Tensor};
#[cfg(not(t... |
eren23/synapse | synapse/crates/synapse-inference/src/weight_loading/safetensors.rs | rs | 23,641 | 9f6f1f39003dc030ce5fb8fdcbdd77bc531ee48730a9bf2b1aecba02be7351d4 | use std::collections::HashMap;
use std::fs::File;
use std::path::Path;
use super::converter::{bf16_bits_to_f32, f16_bits_to_f32};
use super::{AlignedBuffer, RawTensor, WeightError};
use memmap2::Mmap;
use serde::Deserialize;
/// Represents the `model.safetensors.index.json` file used by sharded checkpoints.
#[derive(... |
eren23/synapse | synapse/crates/synapse-inference/src/weight_loading/weight_map.rs | rs | 61,602 | 22da17d221ac9f2745949c948e7908f1e2d063b2c0102756b0a09b1fceaab120 | use std::collections::{HashMap, HashSet};
use super::WeightError;
/// A rule mapping a source name pattern to a target name pattern.
///
/// Supports `{i}` as a placeholder for layer indices.
#[derive(Debug, Clone)]
pub struct MappingRule {
pub source: String,
pub target: String,
}
fn rule(source: &str, targ... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/code_wm_forward.rs | rs | 9,189 | cdf7ad050a5bee1217610fd87e1c69d95eabad20687a40c6f053448b99e1fd28 | //! GPU-accelerated Code WM encoder β single Metal command buffer, zero CPU-GPU sync.
//!
//! All encoder_loops encoded into ONE command buffer. GPU processes them back-to-back.
//! For 128-d models, CPU BLAS outperforms GPU due to dispatch overhead. This path
//! becomes competitive at model_dim >= 256 where GPU paral... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/hybrid_gpu_buffers.rs | rs | 23,669 | 25518949148d35f9d0a532267f11f81a4fd94aac1b8e2ae3280b618dbd1ad96d | //! GPU-resident model buffers for hybrid LIV Conv + GQA decode.
//!
//! Pre-uploads all weights to Metal shared buffers at init, allocates persistent
//! scratch buffers, conv state buffers, and a GPU-side KV cache (for GQA layers
//! only), so that the decode loop can encode ALL layers into a single command
//! buffe... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/dispatch.rs | rs | 15,090 | 356350e6084271c8cc0a30150c256e84fb600eebbb3737b31d2e84e60aa8939d | use super::buffer::BufferPool;
use super::device::MetalBackend;
use std::cell::RefCell;
/// GPU dispatch threshold: operations with M*N*K > this use Metal GPU.
/// Empirically tuned for Apple M-series: GPU kernel launch overhead
/// dominates for smaller operations.
const GPU_DISPATCH_THRESHOLD: usize = 1_000_000;
///... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/device.rs | rs | 6,741 | dd2825140eb289354ac740da85e44973ef54360d1d97627aa9e95b8d1cb711dd | use ::metal::{CommandQueue, CompileOptions, ComputePipelineState, Device, Library};
use std::collections::HashMap;
/// Errors from Metal backend operations.
#[derive(Debug)]
pub enum MetalError {
/// No Metal-compatible GPU found.
NoDevice,
/// Shader compilation failed.
ShaderCompilation(String),
... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/hybrid_gpu_forward.rs | rs | 28,858 | 5b16c017232385e6d3a53c4087f603370456936a1d2cd02a969ff88e5eb4787c | //! GPU-native forward pass for hybrid LIV Conv + GQA decoder layers.
//!
//! Encodes ALL layers into a single Metal command buffer with zero CPU-GPU
//! round trips. Each layer dispatches either the LIV Conv path or the GQA path
//! based on `layer_kinds[i]`.
use super::device::MetalBackend;
use super::hybrid_gpu_buf... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/lewm_forward.rs | rs | 33,319 | 8809f0136d7f68a2d97512ca754c0411d54486efc00ae25f1d88060c43ffb973 | //! GPU-accelerated LEWM predict_next using a single Metal command buffer.
//!
//! Encodes all 6 adaLN predictor layers into one command buffer so the GPU
//! processes them back-to-back with zero CPU-GPU synchronization between layers.
//! This targets sub-0.76ms latency (PyTorch MPS baseline) for seq_len=3, hidden=19... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/mod.rs | rs | 461 | 8a62c67da60701754b2f5138617c839983d352d9e13dc6a1edcfd50373a9ca0f | mod buffer;
mod device;
pub mod dispatch;
pub mod gpu_buffers;
pub mod gpu_forward;
pub mod hybrid_gpu_buffers;
pub mod hybrid_gpu_forward;
pub mod lewm_forward;
pub mod code_wm_forward;
pub use buffer::BufferPool;
pub use device::{MetalBackend, MetalError};
pub use dispatch::ComputeBackend;
pub use gpu_buffers::Metal... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/tests.rs | rs | 66,587 | 5935b36fe1aa749e63416b95e796eca43f9b8e5d47e3ea41b9ac077a9860c879 | use super::*;
use crate::metal::device::KERNEL_NAMES;
use crate::models::ssm::hybrid::layer::conv1d_step_single;
/// Helper: get MetalBackend or skip test on non-Apple hardware.
fn get_backend() -> Option<MetalBackend> {
match MetalBackend::new() {
Ok(b) => Some(b),
Err(MetalError::NoDevice) => {
... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/gpu_forward.rs | rs | 36,479 | 80142330c0d236a8df439fba6de8fa46457aec9bd35d716caa8eac7f870c08e6 | //! GPU-native forward pass for decoder layers.
//!
//! Phase 1: Keeps Q/K/V projection in one command buffer (CMD1), and chains
//! the entire FFN sub-layer (O proj β residual β norm β gate/up β swiglu β down β residual)
//! into a second command buffer (CMD2). This halves commit+wait from 4 to 2 per layer.
//!
//! At... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/buffer.rs | rs | 4,221 | 3dc750e03dc88cfee3cc6932264ea499d17208ada07c5143a77bf02c5c911380 | use ::metal::{Buffer, Device, MTLResourceOptions};
use std::collections::HashMap;
/// GPU buffer pool that reuses Metal buffers by size to avoid repeated allocations.
///
/// Tracks allocated vs reused buffers and organizes free buffers by byte size
/// for O(1) lookup on reuse.
pub struct BufferPool {
device: Dev... |
eren23/synapse | synapse/crates/synapse-inference/src/metal/gpu_buffers.rs | rs | 16,126 | 106d9305c01a7dcf77ce4ca10d9d1b5ca8f9018018399c9777a06c3c29ae8392 | //! GPU-resident model buffers for Phase 3 all-layers-in-one-command-buffer decode.
//!
//! Pre-uploads all weights to Metal shared buffers at init, allocates persistent
//! scratch buffers and a GPU-side KV cache, so that the decode loop can encode
//! ALL layers into a single command buffer with zero CPU-GPU round tr... |
eren23/synapse | synapse/crates/synapse-inference/src/config/quantization.rs | rs | 309 | 7d07629740c68da6eaad4ada9901fd2f75e92555a7e4f954bb384f0bda13feac | use serde::{Deserialize, Serialize};
/// Configuration for weight quantization.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type")]
pub enum QuantConfig {
F32,
F16,
INT8 {
calibration_method: String,
calibration_samples: usize,
},
Ternary,
}
|
eren23/synapse | synapse/crates/synapse-inference/src/config/norm.rs | rs | 411 | 9655781bc6cf1b458286cac97a6b3e0f840f70a4113ab89ef539514a65a5d29f | use serde::{Deserialize, Serialize};
/// Configuration for the normalization layer variant.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type")]
pub enum NormConfig {
RMSNorm { eps: f64 },
LayerNorm { eps: f64 },
}
impl NormConfig {
pub fn eps(&self) -> f64 {
match sel... |
eren23/synapse | synapse/crates/synapse-inference/src/config/model_config.rs | rs | 11,107 | ca91116cc46b8fab229ce00cbbffa48c3754d3434be028f93b6c20050335e6b8 | use serde::{Deserialize, Serialize};
use std::fs;
use std::path::Path;
use super::attention::AttentionConfig;
use super::ffn::FFNConfig;
use super::norm::NormConfig;
use super::position::{self, PositionConfig};
use super::quantization::QuantConfig;
/// Top-level model configuration, deserializable from JSON.
#[derive... |
eren23/synapse | synapse/crates/synapse-inference/src/config/ffn.rs | rs | 588 | 0ad8b65d8f05a64bafef33cd7a75742b764d69e2b8a204e26289d51b074c4487 | use serde::{Deserialize, Serialize};
/// Configuration for the feed-forward network variant.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type")]
pub enum FFNConfig {
SwiGLU { intermediate_size: usize },
GELU { intermediate_size: usize },
GeGLU { intermediate_size: usize },
}
... |
eren23/synapse | synapse/crates/synapse-inference/src/config/attention.rs | rs | 2,007 | b4aea501e526281e9f90a49e0bca087511ee00d3726c97ec1566beef11260fe2 | use serde::{Deserialize, Serialize};
/// Configuration for the attention mechanism variant.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type")]
pub enum AttentionConfig {
/// Grouped-Query Attention: fewer KV heads than query heads.
GQA {
num_heads: usize,
num_kv_h... |
eren23/synapse | synapse/crates/synapse-inference/src/config/mod.rs | rs | 321 | b37b67e8eb238550bce9c66275764732fd9d0529f4ca90484983937b0602aebf | pub mod attention;
pub mod ffn;
pub mod model_config;
pub mod norm;
pub mod position;
pub mod quantization;
pub use attention::AttentionConfig;
pub use ffn::FFNConfig;
pub use model_config::{ArchitectureConfig, ModelConfig};
pub use norm::NormConfig;
pub use position::PositionConfig;
pub use quantization::QuantConfig;... |
eren23/synapse | synapse/crates/synapse-inference/src/config/position.rs | rs | 1,870 | 0ba806a820334df445aab2360bb9f9120fd27842f0a6103df2d622da1d496aff | use serde::{Deserialize, Serialize};
/// How RoPE pairs dimensions for rotation.
///
/// - **RotateHalf**: pairs `(i, i + d/2)` β used by Qwen3, LLaMA 3, Mistral.
/// - **Interleaved**: pairs `(2i, 2i + 1)` β used by GPT-NeoX, some older models.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub ... |
eren23/synapse | synapse/crates/synapse-inference/src/tokenizer/mod.rs | rs | 19,576 | 535cf5ae2bd77c2e6a7faa44bff732fa76f4b881a58ed20479b012525ab8a2e5 | use serde_json::Value;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::Path;
#[derive(Debug)]
pub enum TokenizerError {
Io(std::io::Error),
Json(serde_json::Error),
Invalid(String),
}
impl std::fmt::Display for TokenizerError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> st... |
eren23/synapse | synapse/crates/synapse-inference/src/models/mod.rs | rs | 1,505 | 15eb5885c7f66d246916109869bce7d5df59338413ccdb0c0222d1450537e886 | pub mod lm;
pub mod ssm;
pub mod text_encoder;
pub mod traits;
pub mod vision;
// Flatten LM types into the `models::` namespace (public API surface).
pub use lm::{CausalLM, DecoderLayer, LoadResult, ModelBuilder, ModelOutput};
// Re-export traits at models:: level
pub use traits::{Model, ModelState};
// Re-export t... |
eren23/synapse | synapse/crates/synapse-inference/src/models/traits.rs | rs | 3,507 | e8a32d5a743e3900eae31446de92cfe57fda361e360664399b7becf2db23b0c5 | use super::lm::causal_lm::ModelOutput;
use crate::config::ModelConfig;
use crate::kv_cache::KVCache;
/// Unified model state that works across all architecture families.
///
/// Transformers use KV cache. SSMs use recurrent state. Diffusion is stateless.
pub enum ModelState {
/// KV cache for transformer models (a... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/deltanet_state.rs | rs | 3,950 | db75c7bf0ae3eea2e518e54afb20ff5957ce154ca33538d3ee3d475adcc1ea7c | /// State for a single DeltaNet (linear attention) layer.
///
/// The memory state S has shape [num_heads, head_dim, head_dim].
/// Unlike KV cache, this does NOT grow with sequence length.
pub struct DeltaNetLayerState {
/// Memory state: [num_heads * head_dim * head_dim]
pub memory: Vec<f32>,
/// Conv1d r... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/mod.rs | rs | 873 | 8238ed34c89402d21b9ffd20e5a45d5501895ec03ba07accf333a291bd7f7e32 | //! SSM (State Space Model) and RNN-based architectures.
pub mod deltanet;
pub mod deltanet_state;
pub mod hybrid;
pub mod mamba;
pub mod rwkv;
pub use deltanet::{deltanet_seq, deltanet_step, l2_normalize};
pub use deltanet_state::DeltaNetLayerState;
pub use hybrid::config::HybridConfig;
pub use hybrid::layer::{Delta... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/deltanet.rs | rs | 10,081 | 04bdb64c56d81bb93f53a28d4dfbccce9e99f9db33f2c8d384db394c5527c06d | //! Gated DeltaNet kernel for linear-attention hybrid models (e.g. Qwen3.5).
//!
//! Implements the Gated DeltaNet recurrence:
//!
//! ```text
//! S_t = alpha_t * S_{t-1} + beta_t * outer(v_t, k_t)
//! o_t = S_t @ q_t
//! ```
//!
//! where `alpha` is a per-step decay gate and `beta` is a per-step write gate,
//! and `q... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/mamba/selective_scan.rs | rs | 8,697 | d40d81ac8a8d5b35df65dafa42aebf18d8379125b87df179c6c3b083eb0d13c7 | /// Softplus activation used to ensure `delta > 0`.
///
/// `softplus(x) = log(1 + exp(x))`
pub fn compute_delta(raw_dt: &[f32]) -> Vec<f32> {
raw_dt
.iter()
.map(|&x| {
if x >= 20.0 {
x // numerical stability: log(1+exp(x)) β x for large x
} else {
... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/mamba/config.rs | rs | 2,173 | 677fb2feaa827ef591775bbf75079c783bee155d9d8d9d4697aea84d99b99774 | /// Configuration for a Mamba SSM model.
#[derive(Debug, Clone)]
pub struct MambaConfig {
pub d_model: usize,
pub d_state: usize,
pub d_conv: usize,
pub expand: usize,
pub dt_rank: usize,
pub num_layers: usize,
pub vocab_size: usize,
pub norm_eps: f64,
}
impl MambaConfig {
pub fn d_... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/mamba/mod.rs | rs | 85 | b22bccaa47c4291aa7f800b6743e21e0a362458043c24de3d170f342dfbc8166 | pub mod block;
pub mod config;
pub mod model;
pub mod selective_scan;
pub mod state;
|
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/mamba/state.rs | rs | 3,644 | af347d2d8d4316368321490196d347d48ea9772af1bc6ff292e8d357cba29824 | /// Per-layer recurrent state for a single Mamba block.
///
/// Holds the SSM hidden state `h` of shape `[d_inner, d_state]` and the
/// rolling convolution buffer of shape `[d_inner, d_conv]`.
pub struct MambaLayerState {
/// SSM hidden state, layout: `[d_inner * d_state]` (row-major).
pub ssm_state: Vec<f32>,... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/mamba/block.rs | rs | 12,253 | 28a3f65f9661e5ade71d5e325278f0a8887842ee0ec7998e91ed30dd540d5d89 | //! MambaBlock: a single Mamba layer.
//!
//! Implements the full Mamba block forward pass:
//! RMSNorm β in_proj (split x, z) β Conv1d β SiLU β SSM β Gate (silu(z)*y) β out_proj β residual.
use crate::ops::activation::{silu, batched_softplus};
use crate::ops::matmul::matmul_t;
use crate::ops::pure_rust_ops::rmsnorm;
... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/mamba/model.rs | rs | 20,762 | bef79895fe56621e2a7eadcb656a65d998dcf4ebf403c2d63e36050eb33ddf5a | //! MambaModel: a full Mamba language model implementing the `Model` trait.
//!
//! Uses internal `RecurrentState` (not KV cache). The `Model` trait takes
//! `&mut ModelState` params but MambaModel ignores them (passes `Recurrent`
//! variant) and uses its own state via `RefCell<RecurrentState>`.
use std::cell::RefCe... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/rwkv/wkv.rs | rs | 9,588 | 76a20df1951db7ad0ec723bef18a3b3053ba2560ed42e1c0b562d97afbebd4f0 | //! WKV7 kernel for RWKV-7 "Goose" inference.
//!
//! Implements the RWKV-7 linear attention recurrence where the state matrix
//! accumulates outer products of keys and values with learnable decay and
//! a feedback term. This is NOT standard softmax attention.
//!
//! Recurrence per head per token:
//! vk = outer(k... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/rwkv/config.rs | rs | 3,335 | cad485fb4143c6f5749585a48f9ab0073298beb33ac37df7251752915466a9ec | /// Configuration for an RWKV-7 "Goose" model.
#[derive(Debug, Clone)]
pub struct RwkvConfig {
pub hidden_size: usize, // d_model (e.g., 768 for 0.1B, 1024 for 0.4B)
pub num_heads: usize, // number of attention heads
pub head_size: usize, // per-head dimension (typically 64)
pub nu... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/rwkv/state.rs | rs | 4,677 | a56a412f50f8bae454a72130b661788c9891860abfe1d1ee970ab522bc09aacd | /// Per-layer recurrent state for a single RWKV block.
///
/// Holds the WKV accumulator state of shape `[num_heads, head_size, head_size]`
/// and the previous token vectors for token shift in time mixing and channel mixing.
pub struct RwkvLayerState {
/// WKV numerator accumulator: `[num_heads * head_size * head_... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/rwkv/block.rs | rs | 19,180 | f9a88599d7cdb49bbe9cbb0685c7f4ec702f3fc88b95171ee53f2e13659f56db | //! RwkvBlock: a single RWKV-7 "Goose" layer.
//!
//! Implements the full RWKV-7 block with:
//! - 6 per-component token shift lerps (x_r, x_k, x_v, x_w, x_a, x_g)
//! - Low-rank decay (w0 + tanh(xw @ w1) @ w2), alpha (a0 + (xa @ a1) @ a2), gate (g1, g2)
//! - Key modulation (k_k, k_a) and R-K coupling (r_k)
//! - WKV7... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/rwkv/model.rs | rs | 24,147 | 560ec94129985869c8efedbe49e997bed4c5db239a8136853500b0b7a71c094b | //! RwkvModel: a full RWKV-7 language model implementing the `Model` trait.
//!
//! Uses internal `RwkvState` (not KV cache). The `Model` trait takes
//! `&mut ModelState` params but RwkvModel ignores them (passes `Recurrent`
//! variant) and uses its own state via `RefCell<RwkvState>`.
use std::cell::RefCell;
use std... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/hybrid/config.rs | rs | 9,600 | 1363fee05676edc7b4e0ce55eb3bd69b39702bd9b2cbb1c98558248ca792204a | //! Configuration for hybrid models that combine different layer types
//! (DeltaNet, GQA, LIV Conv) in an interleaved pattern.
//!
//! Supports two layer-pattern modes:
//! 1. **Interval** (Qwen3.5): `full_attention_interval = 4` β `[DN,DN,DN,GQA] x N`
//! 2. **Explicit** (LFM2.5): `layer_types = Some(vec![Conv,Conv,G... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/hybrid/layer.rs | rs | 50,256 | 96f08c618bd77a71ff1ee0e8f90380028b960a26795a8008b3641d9332e31fea | //! Decoder layers for hybrid models: DeltaNet, GQA, and LIV Conv.
//!
//! Each layer type is self-contained with its own weights and forward methods,
//! avoiding coupling with the transformer model builder infrastructure.
//!
//! Shared helpers (`conv1d_step_single`, `swiglu_ffn_forward`) are free
//! functions reuse... |
eren23/synapse | synapse/crates/synapse-inference/src/models/ssm/hybrid/model.rs | rs | 47,457 | a94c4cbf40725b79ba0cfaf65c5971726938b1d6602984fb0139b37f0f05d61e | //! HybridModel: a generalized hybrid model implementing the `Model` trait.
//!
//! Supports arbitrary mixes of DeltaNet, GQA, and LIV Conv layers in any
//! interleaving pattern. Each layer type manages its own state variant.
use std::cell::RefCell;
use crate::config::ModelConfig;
use crate::models::lm::causal_lm::M... |
eren23/synapse | synapse/crates/synapse-inference/src/models/lm/causal_lm.rs | rs | 30,108 | a79227f787045563c9adc4eb3a0211bc5115671e463163ab104ea4a51987e7fa | use std::collections::{HashMap, HashSet};
use crate::config::ModelConfig;
use crate::kv_cache::KVCache;
#[cfg(feature = "metal")]
use crate::models::lm::decoder_layer::apply_norm_dispatch;
use crate::ops::matmul::matmul_t;
use crate::ops::norm::apply_norm;
use crate::registry::NormVariant;
use crate::weight_loading::{... |
eren23/synapse | synapse/crates/synapse-inference/src/models/lm/mod.rs | rs | 223 | 8b796bdc6a0975c5de5472b1b534516f7cd544c68c4471fea7f0a84407091fdb | pub mod builder;
pub mod causal_lm;
pub mod decoder_layer;
pub use builder::ModelBuilder;
pub use causal_lm::{CausalLM, LoadResult, ModelOutput};
pub use decoder_layer::DecoderLayer;
#[cfg(test)]
mod decoder_layer_tests;
|
eren23/synapse | synapse/crates/synapse-inference/src/models/lm/decoder_layer.rs | rs | 26,500 | d7ff05ff9dd3583968ebe2ba6c07e319ae559b9ab3f1f289c60cdc882c2c5a05 | use crate::config::position::RoPEStyle;
use crate::kv_cache::KVCacheLayer;
use crate::ops::activation::{gelu, is_gated_ffn, softmax_slice};
use crate::ops::matmul::matmul_t;
#[cfg(feature = "metal")]
use crate::ops::norm::layernorm;
use crate::ops::norm::{apply_headwise_rmsnorm, apply_norm};
use crate::ops::rope::apply... |
eren23/synapse | synapse/crates/synapse-inference/src/models/lm/builder.rs | rs | 4,933 | 037629480bdc974fd2c1e0e75a14d7a75c1c8c8cdb11fe788fa6499251d9c017 | use crate::config::position::{RoPEScaling, RoPEStyle};
use crate::config::{ModelConfig, PositionConfig};
use crate::registry::{create_attention, create_ffn, create_norm};
use crate::weight_loading::AlignedBuffer;
use super::causal_lm::CausalLM;
use super::decoder_layer::DecoderLayer;
/// Assembles a [`CausalLM`] from... |
eren23/synapse | synapse/crates/synapse-inference/src/models/lm/decoder_layer_tests.rs | rs | 35,991 | d37395025c7f7de0c5bdf39502fdc55b10939371cf81855feff919b237e5d93b | use super::*;
use crate::config::position::RoPEStyle;
use crate::ops::activation::{gelu, silu};
use crate::ops::matmul::matmul_t;
use crate::ops::norm::{apply_headwise_rmsnorm, apply_norm, rmsnorm};
use crate::ops::rope::apply_rope_inplace;
// ββ Test-only naive reference implementations ββββββββββββββββββββββββ
/// ... |
eren23/synapse | synapse/crates/synapse-inference/src/models/text_encoder/roberta_q4.rs | rs | 8,773 | 70d501f0ae8f3f5c958bb7e8a98d2f6cacb560ff28bfbe3dfb3e23491cc8e4a3 | //! Q4_0-quantized RoBERTa / UniXcoder encoder.
//!
//! Mirrors [`super::roberta::RoBERTaEncoder`] with every per-layer linear
//! (Q/K/V/O + intermediate.up/output.down) replaced by [`Q4Linear`].
//! Embeddings stay fp32 β they're already table-lookup-bound and the
//! padding-aware position id cumsum trick is easier ... |
eren23/synapse | synapse/crates/synapse-inference/src/models/text_encoder/code_deltatok_q4.rs | rs | 9,889 | 14a92e33ea970d812cbc1de499aef00e2b07fee079b06e6f8d317e95b44bf96f | //! Q4_0-quantized CodeDeltaTok head.
//!
//! In-memory quantization of [`super::code_deltatok::CodeDeltaTokHead`]:
//! every `Linear`-style weight matrix (per-block attention Q/K/V/O plus
//! SwiGLU gate/up/down and the final `out_proj`) is compressed into
//! [`Q4Linear`] blocks (32-element rows, f32 scale + 16 nibbl... |
eren23/synapse | synapse/crates/synapse-inference/src/models/text_encoder/unixcoder.rs | rs | 796 | d73eca6821117afbae33561549236fcf1b0d7c4b37929603b734dc4ac5229e6d | //! `microsoft/unixcoder-base` preset for [`super::roberta::RoBERTaConfig`].
use super::roberta::RoBERTaConfig;
/// Configuration for `microsoft/unixcoder-base`.
///
/// Matches the HuggingFace `config.json` exactly (2026-04 snapshot). UniXcoder
/// is architecturally a RoBERTa-base encoder, differing only in vocabul... |
eren23/synapse | synapse/crates/synapse-inference/src/models/text_encoder/roberta.rs | rs | 21,707 | 562baebed8a6f53cf5c8e749c958a46278bae60e5c6828f612467daa8a5f299f | //! Post-norm bidirectional encoder matching HuggingFace RoBERTa numerics.
//!
//! A [`RoBERTaEncoder`] is a stack of [`RoBERTaLayer`] blocks wrapped by
//! [`RoBERTaEmbeddings`]. The embeddings reproduce RoBERTa's padding-index
//! quirk (position ids start at `pad_token_id + 1` for the first real token);
//! the laye... |
eren23/synapse | synapse/crates/synapse-inference/src/models/text_encoder/code_deltatok.rs | rs | 17,771 | 7fdfd9940a96720dd7d3b7d051de2fa3f3d0bd7a4190383651299ee62bf86b45 | //! CodeDeltaTok head β rides on top of a frozen UniXcoder backbone to
//! compress each (before, after) code pair into a single dense 768-dim
//! delta token plus a reconstruction of the after-state.
//!
//! Port of `architectures/code_deltatok/code_deltatok.py` in the
//! crucible-community-tap. The reference Python ... |
eren23/synapse | synapse/crates/synapse-inference/src/models/text_encoder/mod.rs | rs | 1,127 | 47aa1c45c9454177586aa4db4092b61fc3c1a1dca378635d8a3f7b3652084f98 | //! Encoder-only text transformers (RoBERTa family, UniXcoder, CodeBERT).
//!
//! Unlike [`crate::models::vision::vit::EncoderLayer`] which is pre-norm,
//! these use the HuggingFace RoBERTa post-norm convention
//! (`attn β add β LN β FFN β add β LN`) and require padding-aware attention
//! so the `[CLS]` feature matc... |
eren23/synapse | synapse/crates/synapse-inference/src/models/vision/lewm.rs | rs | 103,627 | dd221ccc6df3e6105fd96d53938b348d73e01c5815a2dafdf944aa84945717ff | //! LeWorldModel (LeWM) β JEPA-based world model for planning in latent space.
//!
//! Architecture: ViT encoder β projector β DiT predictor (adaLN) β pred_proj.
//! The predictor uses adaptive layer normalization (adaLN) conditioned on the
//! action embedding, following the DiT/LeJEPA design.
//!
//! Paper: <https://... |
eren23/synapse | synapse/crates/synapse-inference/src/models/vision/mod.rs | rs | 872 | 61721250e0ebd6ebf660bc487f67efbe70c29d9c9477a3a4b68a2d69467a2a1f | pub mod clip;
pub mod code_wm;
pub mod jepa;
pub mod lewm;
pub mod vit;
pub mod world_model;
pub use clip::{parse_clip_config, parse_clip_config_json, CLIPConfig, CLIPModel};
pub use code_wm::{AttentionPooling, CodeWorldModel, CodeWorldModelConfig, GeluKind, PoolMode};
pub use jepa::{JEPAConfig, JEPAModel};
pub use le... |
eren23/synapse | synapse/crates/synapse-inference/src/models/vision/code_wm.rs | rs | 42,851 | fad59f4db1dba0cc76a78616941968f3d9d134b59f1477cfe5515420ab440ae1 | //! CodeWorldModel (CWM) β text-token world model for code edits.
//!
//! Architecture: weight-shared looped encoder + tiny action MLP + 2-block
//! looped predictor. Unlike LEWM (which uses DiT-style adaLN), CWM uses
//! vanilla pre-norm transformer blocks with standard PyTorch MultiheadAttention.
//!
//! Pipeline (pe... |
eren23/synapse | synapse/crates/synapse-inference/src/models/vision/clip.rs | rs | 45,235 | fda86b8890f3905ca316676b807e40852ecc16b211a28e150d38a47f1d3d5762 | //! CLIP (Contrastive Language-Image Pre-training) dual-encoder model.
//!
//! Architecture: ViT image encoder + bidirectional text encoder,
//! aligned via learned projection into a shared embedding space.
//! Outputs paired embeddings for image-text similarity scoring.
use std::collections::{HashMap, HashSet};
use ... |
eren23/synapse | synapse/crates/synapse-inference/src/models/vision/world_model.rs | rs | 23,392 | 0d06a4cd4a2b1613bd7d4047e885419739186f9473a5745c837f3882c28ea37d | //! World Model (Latent Dynamics) for planning in latent space.
//!
//! Architecture: ViT visual encoder β latent state projection β dynamics
//! transformer (state + action) β predicted future states.
//! Enables multi-step rollout for model-based planning.
use std::collections::HashMap;
use crate::config::{Attentio... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.