repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/gemma3.rs | candle-transformers/src/models/gemma3.rs | //! Gemma LLM architecture (Google) inference implementation.
//!
//! See ["Introducing Gemma 3: The most capable model you can run on a single GPU or TPU"](https://blog.google/technology/developers/gemma-3/)
//!
//! Based on implementations from HuggingFace transformers.
use std::sync::Arc;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b as linear, Activation, Linear, VarBuilder};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub attention_bias: bool,
pub head_dim: usize,
pub hidden_activation: Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub rope_local_base_freq: f64,
pub vocab_size: usize,
pub final_logit_softcapping: Option<f64>,
pub attn_logit_softcapping: Option<f64>,
pub query_pre_attn_scalar: usize,
pub sliding_window: usize,
pub sliding_window_pattern: usize,
pub max_position_embeddings: usize,
}
#[derive(Debug, Clone)]
struct RmsNorm {
weight: Tensor,
eps: f64,
}
impl RmsNorm {
fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(dim, "weight")?;
Ok(Self { weight, eps })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(&(&self.weight + 1.0)?)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(
dtype: DType,
cfg: &Config,
dev: &Device,
sliding_window: Option<usize>,
) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let rope_freq = if sliding_window.is_some() {
cfg.rope_local_base_freq
} else {
cfg.rope_theta
};
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_freq.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?;
let up_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_activation,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
enum KvCache {
Normal(candle_nn::kv_cache::KvCache),
Rotating(candle_nn::kv_cache::RotatingKvCache),
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
q_norm: RmsNorm,
k_norm: RmsNorm,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
attn_logit_softcapping: Option<f64>,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: KvCache,
use_flash_attn: bool,
}
impl Attention {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
sliding_window: Option<usize>,
vb: VarBuilder,
) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = cfg.head_dim;
let bias = cfg.attention_bias;
let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?;
let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?;
let q_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("q_norm"))?;
let k_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("k_norm"))?;
let kv_cache = if let Some(sliding_window) = sliding_window {
KvCache::Rotating(candle_nn::kv_cache::RotatingKvCache::new(2, sliding_window))
} else {
KvCache::Normal(candle_nn::kv_cache::KvCache::new(
2,
cfg.max_position_embeddings,
))
};
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
attn_logit_softcapping: cfg.attn_logit_softcapping,
rotary_emb,
kv_cache,
use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let query_states = self.q_norm.forward(&query_states)?;
let key_states = self.k_norm.forward(&key_states)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &mut self.kv_cache {
KvCache::Normal(cache) => cache.append(&key_states, &value_states)?,
KvCache::Rotating(cache) => cache.append(&key_states, &value_states)?,
};
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, scale, attention_mask.is_some())?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match self.attn_logit_softcapping {
None => attn_weights,
Some(sc) => ((attn_weights / sc)?.tanh()? * sc)?,
};
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, ()))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
match &mut self.kv_cache {
KvCache::Normal(c) => c.reset(),
KvCache::Rotating(c) => c.reset(),
}
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
pre_feedforward_layernorm: RmsNorm,
post_feedforward_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
sliding_window: Option<usize>,
}
impl DecoderLayer {
fn new(
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
sliding_window: Option<usize>,
) -> Result<Self> {
let rotary_emb = Arc::new(RotaryEmbedding::new(
vb.dtype(),
cfg,
vb.device(),
sliding_window,
)?);
let self_attn = Attention::new(
rotary_emb,
use_flash_attn,
cfg,
sliding_window,
vb.pp("self_attn"),
)?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let pre_feedforward_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("pre_feedforward_layernorm"),
)?;
let post_feedforward_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_feedforward_layernorm"),
)?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
pre_feedforward_layernorm,
post_feedforward_layernorm,
post_attention_layernorm,
sliding_window,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = xs.apply(&self.post_attention_layernorm)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.pre_feedforward_layernorm)?;
let xs = xs.apply(&self.mlp)?;
let xs = xs.apply(&self.post_feedforward_layernorm)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
fn prepare_decoder_attention_mask(
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
sliding_window: Option<usize>,
dtype: DType,
device: &Device,
) -> Result<Tensor> {
let mask: Vec<_> = if let Some(sliding_window) = sliding_window {
(0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect()
} else {
(0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0f32 }))
.collect()
};
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(dtype)
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
final_logit_softcapping: Option<f64>,
device: Device,
dtype: DType,
hidden_size: usize,
sliding_window: usize,
}
impl Model {
pub fn new(use_flash_attn: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let sliding_window = (layer_idx + 1) % cfg.sliding_window_pattern > 0;
let layer = DecoderLayer::new(
use_flash_attn,
cfg,
vb_l.pp(layer_idx),
sliding_window.then_some(cfg.sliding_window),
)?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
final_logit_softcapping: cfg.final_logit_softcapping,
device: vb.device().clone(),
dtype: vb.dtype(),
hidden_size: cfg.hidden_size,
sliding_window: cfg.sliding_window,
})
}
fn create_attention_masks(
&self,
batch_size: usize,
seq_len: usize,
seqlen_offset: usize,
) -> Result<(Option<Tensor>, Option<Tensor>)> {
if seq_len <= 1 {
return Ok((None, None));
}
let mask = prepare_decoder_attention_mask(
batch_size,
seq_len,
seqlen_offset,
None,
self.dtype,
&self.device,
)?;
let sliding_mask = prepare_decoder_attention_mask(
batch_size,
seq_len,
seqlen_offset,
Some(self.sliding_window),
self.dtype,
&self.device,
)?;
Ok((Some(mask), Some(sliding_mask)))
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let xs = self.embed_tokens.forward(input_ids)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
let (attention_mask, sliding_attention_mask) =
self.create_attention_masks(b_size, seq_len, seqlen_offset)?;
for layer in self.layers.iter_mut() {
let mask = if layer.sliding_window.is_some() {
&sliding_attention_mask
} else {
&attention_mask
};
xs = layer.forward(&xs, mask.as_ref(), seqlen_offset)?
}
let logits = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)?;
let logits = match self.final_logit_softcapping {
None => logits,
Some(sc) => ((logits / sc)?.tanh()? * sc)?,
};
Ok(logits)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/llama2_c_weights.rs | candle-transformers/src/models/llama2_c_weights.rs | //! Llama2 inference implementation.
//!
//! See ["LLaMA 2: Open Foundation and Fine-Tuned Chat Models"](https://arxiv.org/abs/2307.09288)
//!
//! Based on the [llama2.c](https://github.com/karpathy/llama2.c) implementation
use byteorder::{LittleEndian, ReadBytesExt};
use candle::{DType, Device, IndexOp, Result, Shape, Tensor};
use candle_nn::VarBuilder;
use super::llama2_c::Config;
pub struct TransformerWeights {
// token embedding table
token_embedding_table: Tensor, // (vocab_size, dim)
// weights for rmsnorms
rms_att_weight: Tensor, // (layer, dim) rmsnorm weights
rms_ffn_weight: Tensor, // (layer, dim)
// weights for matmuls
wq: Tensor, // (layer, dim, dim)
wk: Tensor, // (layer, dim, dim)
wv: Tensor, // (layer, dim, dim)
wo: Tensor, // (layer, dim, dim)
// weights for ffn
w1: Tensor, // (layer, hidden_dim, dim)
w2: Tensor, // (layer, dim, hidden_dim)
w3: Tensor, // (layer, hidden_dim, dim)
// final rmsnorm
rms_final_weight: Tensor, // (dim,)
// freq_cis for RoPE relatively positional embeddings
freq_cis_real: Tensor, // (seq_len, head_size/2)
freq_cis_imag: Tensor, // (seq_len, head_size/2)
}
fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> {
let mut buf = [0u8; 4];
r.read_exact(&mut buf)?;
Ok(i32::from_le_bytes(buf))
}
fn read_tensor<R: std::io::Read, S: Into<Shape>>(
r: &mut R,
shape: S,
dev: &Device,
) -> Result<Tensor> {
let shape = shape.into();
let mut data_t = vec![0f32; shape.elem_count()];
r.read_f32_into::<LittleEndian>(&mut data_t)?;
let tensor = Tensor::from_vec(data_t, shape, dev)?;
Ok(tensor)
}
impl Config {
pub fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> {
let dim = read_i32(r)? as usize;
let hidden_dim = read_i32(r)? as usize;
let n_layers = read_i32(r)? as usize;
let n_heads = read_i32(r)? as usize;
let n_kv_heads = read_i32(r)? as usize;
let vocab_size = read_i32(r)? as usize;
let seq_len = read_i32(r)? as usize;
Ok(Self {
dim,
hidden_dim,
n_layers,
n_heads,
n_kv_heads,
vocab_size,
seq_len,
norm_eps: 1e-5,
})
}
pub fn head_size(&self) -> usize {
self.dim / self.n_heads
}
}
impl TransformerWeights {
pub fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> {
let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?;
let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?;
let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?;
let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?;
let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?;
let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?;
let rms_final_weight = read_tensor(r, c.dim, dev)?;
let head_size = c.head_size();
let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?;
let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?;
Ok(Self {
token_embedding_table,
rms_att_weight,
wq,
wk,
wv,
wo,
rms_ffn_weight,
w1,
w2,
w3,
rms_final_weight,
freq_cis_real,
freq_cis_imag,
})
}
pub fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder<'static>> {
// TODO: As of 2023-08-04, gemm is slower than expected when multiplying a matrix of
// size (1, k) with the transpose of a matrix of size (k, n) as it ends up transposing the
// second matrix back. We detect this case here and as a temporary hack make the weight
// matrix column major rather than row major. This ends up speeding up text generation from
// 120 token/s to 220 token/s on a Ryzen 2600X.
let tr = device.is_cpu() && !candle::utils::has_mkl();
let tr = |x: Tensor| if tr { x.t()?.contiguous()?.t() } else { Ok(x) };
let mut ws = std::collections::HashMap::new();
let mut insert = |name: &str, t: Tensor| {
ws.insert(name.to_string(), t);
};
insert("rot.freq_cis_real", self.freq_cis_real.clone());
insert("rot.freq_cis_imag", self.freq_cis_imag.clone());
insert(
"model.embed_tokens.weight",
self.token_embedding_table.clone(),
);
insert("lm_head.weight", tr(self.token_embedding_table.clone())?);
insert("model.norm.weight", self.rms_final_weight.clone());
for layer in 0..cfg.n_layers {
ws.insert(
format!("model.layers.{layer}.self_attn.q_proj.weight"),
tr(self.wq.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.k_proj.weight"),
tr(self.wk.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.v_proj.weight"),
tr(self.wv.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.o_proj.weight"),
tr(self.wo.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.gate_proj.weight"),
tr(self.w1.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.down_proj.weight"),
tr(self.w2.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.up_proj.weight"),
tr(self.w3.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.input_layernorm.weight"),
self.rms_att_weight.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.post_attention_layernorm.weight"),
self.rms_ffn_weight.i(layer)?,
);
}
let vb = VarBuilder::from_tensors(ws, DType::F32, device);
Ok(vb)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mamba.rs | candle-transformers/src/models/mamba.rs | //! Mamba inference implementation.
//!
//! See ["Mamba: Linear-Time Sequence Modeling with Selective State Spaces"](https://arxiv.org/abs/2312.00752)
//!
//! Based on reference implementation from the AlbertMamba project
//! A fast implementation of mamba for inference only.
//! Based on Laurent Mazare's rust implementation: [mamba.rs](https://github.com/LaurentMazare/mamba.rs)
use crate::models::with_tracing::{linear, linear_no_bias, Linear};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{RmsNorm, VarBuilder};
const D_CONV: usize = 4;
const D_STATE: usize = 16;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub d_model: usize,
pub n_layer: usize,
pub vocab_size: usize,
pub pad_vocab_size_multiple: usize,
}
impl Config {
fn vocab_size(&self) -> usize {
let pad = self.pad_vocab_size_multiple;
self.vocab_size.div_ceil(pad) * pad
}
fn dt_rank(&self) -> usize {
self.d_model.div_ceil(16)
}
fn d_inner(&self) -> usize {
self.d_model * 2
}
}
pub struct State {
pub hs: Vec<Tensor>,
pub prev_xs: Vec<[Tensor; D_CONV]>,
pub pos: usize,
}
impl State {
pub fn new(batch_size: usize, cfg: &Config, dtype: DType, device: &Device) -> Result<Self> {
let mut hs = Vec::with_capacity(cfg.n_layer);
let mut prev_xs = Vec::with_capacity(cfg.n_layer);
for _i in 0..cfg.n_layer {
let h = Tensor::zeros((batch_size, cfg.d_inner(), D_STATE), dtype, device)?;
let x = Tensor::zeros((batch_size, cfg.d_inner()), dtype, device)?;
hs.push(h);
prev_xs.push([x.clone(), x.clone(), x.clone(), x.clone()]);
}
Ok(Self {
hs,
prev_xs,
pos: 0,
})
}
}
#[derive(Clone, Debug)]
pub struct MambaBlock {
in_proj: Linear,
conv1d_bias: Tensor,
conv1d_weights: [Tensor; D_CONV],
x_proj: Linear,
dt_proj: Linear,
a_log: Tensor,
d: Tensor,
out_proj: Linear,
dt_rank: usize,
layer_index: usize,
d_inner: usize,
}
impl MambaBlock {
pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let d_inner = cfg.d_inner();
let dt_rank = cfg.dt_rank();
let in_proj = linear_no_bias(cfg.d_model, d_inner * 2, vb.pp("in_proj"))?;
let x_proj = linear_no_bias(d_inner, dt_rank + D_STATE * 2, vb.pp("x_proj"))?;
let dt_proj = linear(dt_rank, d_inner, vb.pp("dt_proj"))?;
let a_log = vb.get((d_inner, D_STATE), "A_log")?;
let d = vb.get(d_inner, "D")?;
let out_proj = linear_no_bias(d_inner, cfg.d_model, vb.pp("out_proj"))?;
let conv1d_bias = vb.get(d_inner, "conv1d.bias")?;
let conv1d_weight = vb.get((d_inner, 1, D_CONV), "conv1d.weight")?;
let conv1d_weights = [
conv1d_weight.i((.., 0, 0))?,
conv1d_weight.i((.., 0, 1))?,
conv1d_weight.i((.., 0, 2))?,
conv1d_weight.i((.., 0, 3))?,
];
Ok(Self {
in_proj,
conv1d_bias,
conv1d_weights,
x_proj,
dt_proj,
a_log,
d,
out_proj,
dt_rank,
layer_index,
d_inner,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let (b_sz, _dim) = xs.dims2()?;
let li = self.layer_index;
let mut xs = xs.apply(&self.in_proj)?.chunk(2, D::Minus1)?;
let proj_for_silu = xs.remove(1);
state.prev_xs[li][state.pos % D_CONV] = xs.remove(0);
let mut proj_for_conv = self.conv1d_bias.broadcast_as((b_sz, self.d_inner))?;
for d_c in 0..D_CONV {
proj_for_conv = (proj_for_conv
+ self.conv1d_weights[d_c]
.broadcast_mul(&state.prev_xs[li][(d_c + 1 + state.pos) % D_CONV])?)?;
}
let proj_for_conv = candle_nn::ops::silu(&proj_for_conv)?;
// SSM + Selection, we're doing inference here so only need the last step of
// the sequence.
// Algorithm 3.2 on page 6, https://arxiv.org/pdf/2312.00752.pdf
let x_proj = self.x_proj.forward(&proj_for_conv)?;
let delta = x_proj.narrow(D::Minus1, 0, self.dt_rank)?.contiguous()?;
let b = x_proj.narrow(D::Minus1, self.dt_rank, D_STATE)?;
let c = x_proj.narrow(D::Minus1, self.dt_rank + D_STATE, D_STATE)?;
let delta = delta.apply(&self.dt_proj)?;
// softplus
let delta = (delta.exp()? + 1.)?.log()?;
let a = self.a_log.to_dtype(delta.dtype())?.exp()?.neg()?;
let d = self.d.to_dtype(delta.dtype())?;
// Selective scan part
// Eqn (2a), page 3, h_t = Ab h_{t-1} + Bb x_t
let delta = delta
.unsqueeze(D::Minus1)?
.broadcast_as((b_sz, self.d_inner, D_STATE))?;
let a = a.broadcast_as((b_sz, self.d_inner, D_STATE))?;
let b = b.broadcast_as((b_sz, self.d_inner, D_STATE))?;
let proj_for_conv_b =
proj_for_conv
.unsqueeze(D::Minus1)?
.broadcast_as((b_sz, self.d_inner, D_STATE))?;
state.hs[li] = ((&state.hs[li] * (&delta * &a)?.exp()?)? + &delta * &b * &proj_for_conv_b)?;
let ss = (state.hs[li]
.matmul(&c.unsqueeze(D::Minus1)?)?
.squeeze(D::Minus1)?
+ proj_for_conv.broadcast_mul(&d)?)?;
let ys = (ss * candle_nn::ops::silu(&proj_for_silu))?;
ys.apply(&self.out_proj)
}
}
#[derive(Clone, Debug)]
pub struct ResidualBlock {
mixer: MambaBlock,
norm: RmsNorm,
}
impl ResidualBlock {
pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let norm = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm"))?;
let mixer = MambaBlock::new(layer_index, cfg, vb.pp("mixer"))?;
Ok(Self { mixer, norm })
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
self.mixer.forward(&xs.apply(&self.norm)?, state)? + xs
}
}
// https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L56
#[derive(Clone, Debug)]
pub struct Model {
embedding: candle_nn::Embedding,
layers: Vec<ResidualBlock>,
norm_f: RmsNorm,
lm_head: Linear,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embedding = candle_nn::embedding(cfg.vocab_size(), cfg.d_model, vb.pp("embedding"))?;
let mut layers = Vec::with_capacity(cfg.n_layer);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.n_layer {
let layer = ResidualBlock::new(layer_idx, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm_f = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm_f"))?;
let lm_head = Linear::from_weights(embedding.embeddings().clone(), None);
Ok(Self {
embedding,
layers,
norm_f,
lm_head,
dtype: vb.dtype(),
})
}
pub fn forward(&self, input_ids: &Tensor, state: &mut State) -> Result<Tensor> {
let _b_size = input_ids.dims1()?;
let mut xs = self.embedding.forward(input_ids)?;
for layer in self.layers.iter() {
xs = layer.forward(&xs, state)?
}
state.pos += 1;
xs.apply(&self.norm_f)?.apply(&self.lm_head)
}
pub fn dtype(&self) -> DType {
self.dtype
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_blip_text.rs | candle-transformers/src/models/quantized_blip_text.rs | //! Quantized BLIP text module implementation.
//!
//! Provides the text decoder portion of the BLIP model with 8-bit quantization.
//! Uses a BERT-style transformer architecture for text processing.
//!
//! Key components:
//! - Text embeddings layer with position embeddings
//! - Multi-head self attention layers
//! - Cross-attention for vision-text fusion
//! - Layer normalization and feed-forward layers
//! - Quantized linear transformations
//!
//! References:
//! - [BLIP Paper](https://arxiv.org/abs/2201.12086)
//! - [Hugging Face Implementation](https://huggingface.co/docs/transformers/model_doc/blip)
//!
use crate::models::with_tracing::QMatMul;
use crate::quantized_nn::{layer_norm, linear, Embedding, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{Module, Result, Tensor, D};
use candle_nn::LayerNorm;
pub type Config = super::blip_text::Config;
#[derive(Debug, Clone)]
struct TextEmbeddings {
word_embeddings: Embedding,
position_embeddings: Embedding,
layer_norm: LayerNorm,
position_ids: Tensor,
}
impl TextEmbeddings {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let word_embeddings =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?;
let position_embeddings = Embedding::new(
cfg.max_position_embeddings,
cfg.hidden_size,
vb.pp("position_embeddings"),
)?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
let position_ids =
Tensor::arange(0, cfg.max_position_embeddings as u32, vb.device())?.unsqueeze(0)?;
Ok(Self {
word_embeddings,
position_embeddings,
layer_norm,
position_ids,
})
}
fn forward(&self, xs: &Tensor, past_kv_len: usize) -> Result<Tensor> {
let seq_len = xs.dim(1)?;
let position_ids = self.position_ids.narrow(1, past_kv_len, seq_len)?;
let embeddings = self.word_embeddings.forward(xs)?;
let position_embeddings = self.position_embeddings.forward(&position_ids)?;
(embeddings + position_embeddings)?.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextSelfAttention {
query: Linear,
key: Linear,
value: Linear,
attention_head_size: usize,
num_attention_heads: usize,
attention_scale: f64,
kv_cache: Option<(Tensor, Tensor)>,
}
impl TextSelfAttention {
fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> {
let num_attention_heads = cfg.num_attention_heads;
let attention_head_size = cfg.hidden_size / num_attention_heads;
let all_head_size = cfg.num_attention_heads * attention_head_size;
let query = linear(cfg.hidden_size, all_head_size, vb.pp("query"))?;
let in_size = if is_cross_attention {
cfg.encoder_hidden_size
} else {
cfg.hidden_size
};
let key = linear(in_size, all_head_size, vb.pp("key"))?;
let value = linear(in_size, all_head_size, vb.pp("value"))?;
let attention_scale = 1f64 / (attention_head_size as f64).sqrt();
Ok(Self {
query,
key,
value,
attention_head_size,
num_attention_heads,
attention_scale,
kv_cache: None,
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, _) = xs.dims3()?;
xs.reshape((
b_size,
seq_len,
self.num_attention_heads,
self.attention_head_size,
))?
.permute((0, 2, 1, 3))
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let query = self
.transpose_for_scores(&self.query.forward(xs)?)?
.contiguous()?;
let (key, value) = match encoder_hidden_states {
None => {
let key = self.transpose_for_scores(&self.key.forward(xs)?)?;
let value = self.transpose_for_scores(&self.value.forward(xs)?)?;
let (key, value) = match &self.kv_cache {
None => (key, value),
Some((prev_key, prev_value)) => {
let key = Tensor::cat(&[prev_key, &key], 2)?;
let value = Tensor::cat(&[prev_value, &value], 2)?;
(key, value)
}
};
self.kv_cache = Some((key.clone(), value.clone()));
(key, value)
}
Some(xs) => {
let key = self.transpose_for_scores(&self.key.forward(xs)?)?;
let value = self.transpose_for_scores(&self.value.forward(xs)?)?;
// no kv-cache in this case, but the results could probably be memoized.
(key, value)
}
};
let key = key.contiguous()?;
let value = value.contiguous()?;
let attention_scores = query.matmul(&key.t()?)?;
let attention_scores = (attention_scores * self.attention_scale)?;
let attention_scores = match attention_mask {
Some(mask) => attention_scores.broadcast_add(mask)?,
None => attention_scores,
};
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
attention_probs
.matmul(&value)?
.permute((0, 2, 1, 3))?
.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct TextSelfOutput {
dense: Linear,
layer_norm: LayerNorm,
}
impl TextSelfOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layer_norm })
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
(xs.apply(&self.dense) + input_tensor)?.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextAttention {
self_: TextSelfAttention,
output: TextSelfOutput,
}
impl TextAttention {
fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> {
let self_ = TextSelfAttention::new(cfg, is_cross_attention, vb.pp("self"))?;
let output = TextSelfOutput::new(cfg, vb.pp("output"))?;
Ok(Self { self_, output })
}
fn reset_kv_cache(&mut self) {
self.self_.reset_kv_cache()
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let self_outputs = self
.self_
.forward(xs, encoder_hidden_states, attention_mask)?;
self.output.forward(&self_outputs, xs)
}
}
#[derive(Debug, Clone)]
struct TextIntermediate {
dense: Linear,
intermediate_act_fn: candle_nn::Activation,
}
impl TextIntermediate {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?;
Ok(Self {
dense,
intermediate_act_fn: cfg.hidden_act,
})
}
}
impl Module for TextIntermediate {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)?.apply(&self.intermediate_act_fn)
}
}
#[derive(Debug, Clone)]
struct TextOutput {
dense: Linear,
layer_norm: LayerNorm,
}
impl TextOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layer_norm })
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
(xs.apply(&self.dense)? + input_tensor)?.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextLayer {
attention: TextAttention,
cross_attention: Option<TextAttention>,
intermediate: TextIntermediate,
output: TextOutput,
}
impl TextLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = TextAttention::new(cfg, false, vb.pp("attention"))?;
let cross_attention = if cfg.is_decoder {
Some(TextAttention::new(cfg, true, vb.pp("crossattention"))?)
} else {
None
};
let intermediate = TextIntermediate::new(cfg, vb.pp("intermediate"))?;
let output = TextOutput::new(cfg, vb.pp("output"))?;
Ok(Self {
attention,
cross_attention,
intermediate,
output,
})
}
fn reset_kv_cache(&mut self) {
self.attention.reset_kv_cache();
if let Some(ca) = &mut self.cross_attention {
ca.reset_kv_cache()
}
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
let attention_output = self.attention.forward(xs, None, Some(attention_mask))?;
let attention_output = match &mut self.cross_attention {
Some(ca) => ca.forward(&attention_output, Some(encoder_hidden_states), None)?,
None => candle::bail!("expected some cross-attn"),
};
let intermediate_output = self.intermediate.forward(&attention_output)?;
self.output.forward(&intermediate_output, &attention_output)
}
}
#[derive(Debug, Clone)]
struct TextEncoder {
layers: Vec<TextLayer>,
}
impl TextEncoder {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("layer");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for i in 0..cfg.num_hidden_layers {
let layer = TextLayer::new(cfg, vb.pp(i))?;
layers.push(layer)
}
Ok(Self { layers })
}
fn reset_kv_cache(&mut self) {
self.layers.iter_mut().for_each(|l| l.reset_kv_cache())
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, encoder_hidden_states, attention_mask)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct TextPooler {
dense: Linear,
}
impl TextPooler {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
Ok(Self { dense })
}
}
impl Module for TextPooler {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.narrow(D::Minus1, 0, 1)?
.squeeze(D::Minus1)?
.apply(&self.dense)?
.tanh()
}
}
#[derive(Debug, Clone)]
struct TextPredictionHeadTransform {
dense: Linear,
transform_act_fn: candle_nn::Activation,
layer_norm: LayerNorm,
}
impl TextPredictionHeadTransform {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self {
dense,
transform_act_fn: cfg.hidden_act,
layer_norm,
})
}
}
impl Module for TextPredictionHeadTransform {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)?
.apply(&self.transform_act_fn)?
.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextLMPredictionHead {
transform: TextPredictionHeadTransform,
decoder: Linear,
}
impl TextLMPredictionHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let transform = TextPredictionHeadTransform::new(cfg, vb.pp("transform"))?;
let weight = QMatMul::new(cfg.hidden_size, cfg.vocab_size, vb.pp("decoder"))?;
let bias = vb.get(cfg.vocab_size, "bias")?.dequantize(vb.device())?;
let decoder = Linear::from_weights(weight, Some(bias));
Ok(Self { transform, decoder })
}
}
impl Module for TextLMPredictionHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.transform)?.apply(&self.decoder)
}
}
#[derive(Debug, Clone)]
struct TextOnlyMLMHead {
predictions: TextLMPredictionHead,
}
impl TextOnlyMLMHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let predictions = TextLMPredictionHead::new(cfg, vb.pp("predictions"))?;
Ok(Self { predictions })
}
}
impl Module for TextOnlyMLMHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.predictions.forward(xs)
}
}
#[derive(Debug, Clone)]
struct TextModel {
embeddings: TextEmbeddings,
encoder: TextEncoder,
past_kv_len: usize,
// We do not need the pooler for caption generation
}
impl TextModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embeddings = TextEmbeddings::new(cfg, vb.pp("embeddings"))?;
let encoder = TextEncoder::new(cfg, vb.pp("encoder"))?;
Ok(Self {
embeddings,
encoder,
past_kv_len: 0,
})
}
fn forward(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
let (_b_sz, seq_len) = input_ids.dims2()?;
let embedding_output = self.embeddings.forward(input_ids, self.past_kv_len)?;
let sequence_output =
self.encoder
.forward(&embedding_output, encoder_hidden_states, attention_mask)?;
self.past_kv_len += seq_len;
// We're interested in the sequence-output rather than the pooled-output.
Ok(sequence_output)
}
fn reset_kv_cache(&mut self) {
self.past_kv_len = 0;
self.encoder.reset_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct TextLMHeadModel {
bert: TextModel,
cls: TextOnlyMLMHead,
}
impl TextLMHeadModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let bert = TextModel::new(cfg, vb.pp("bert"))?;
let cls = TextOnlyMLMHead::new(cfg, vb.pp("cls"))?;
Ok(Self { bert, cls })
}
pub fn forward(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: &Tensor,
) -> Result<Tensor> {
let seq_len = input_ids.dim(1)?;
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_vec(mask, (seq_len, seq_len), input_ids.device())?;
let sequence_output = self.bert.forward(input_ids, encoder_hidden_states, &mask)?;
let prediction_scores = self.cls.forward(&sequence_output)?;
// return_logits is false so we don't discard the last sequence element.
Ok(prediction_scores)
}
pub fn reset_kv_cache(&mut self) {
self.bert.reset_kv_cache()
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/parler_tts.rs | candle-transformers/src/models/parler_tts.rs | //! Parler Model implementation for parler_tts text-to-speech synthesis
//!
//! Implements a transformer-based decoder architecture for generating audio tokens
//! from text using discrete tokens. The model converts text into audio segments
//! using multiple codebooks of quantized audio tokens.
//!
//! The model architecture includes:
//! - Multi-head attention layers for text and audio processing
//! - Feed-forward networks
//! - Layer normalization
//! - Positional embeddings
//! - Multiple codebook prediction heads
//!
//! The implementation follows the original parler_tts architecture while focusing
//! on audio token generation for text-to-speech synthesis.
//!
use crate::generation::LogitsProcessor;
use crate::models::t5;
use candle::{IndexOp, Result, Tensor};
use candle_nn::{layer_norm, linear_b as linear, Activation, LayerNorm, Linear, VarBuilder};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct DecoderConfig {
pub vocab_size: usize,
pub max_position_embeddings: usize,
pub num_hidden_layers: usize,
pub ffn_dim: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: Option<usize>,
pub num_cross_attention_key_value_heads: Option<usize>,
pub activation_function: Activation,
pub hidden_size: usize,
pub scale_embedding: bool,
pub num_codebooks: usize,
pub pad_token_id: usize,
pub bos_token_id: usize,
pub eos_token_id: usize,
pub tie_word_embeddings: bool,
pub rope_embeddings: bool,
pub rope_theta: f64,
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub decoder_start_token_id: u32,
pub pad_token_id: u32,
pub decoder: DecoderConfig,
pub text_encoder: t5::Config,
pub vocab_size: usize,
pub audio_encoder: crate::models::dac::Config,
}
#[derive(Debug, Clone)]
pub struct Attention {
k_proj: Linear,
v_proj: Linear,
q_proj: Linear,
out_proj: Linear,
is_causal: bool,
kv_cache: Option<(Tensor, Tensor)>,
scaling: f64,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
}
impl Attention {
fn new(
num_kv_heads: usize,
is_causal: bool,
cfg: &DecoderConfig,
vb: VarBuilder,
) -> Result<Self> {
if cfg.rope_embeddings {
candle::bail!("rope embeddings are not supported");
}
let embed_dim = cfg.hidden_size;
let head_dim = embed_dim / cfg.num_attention_heads;
let kv_out_dim = num_kv_heads * head_dim;
let k_proj = linear(embed_dim, kv_out_dim, false, vb.pp("k_proj"))?;
let v_proj = linear(embed_dim, kv_out_dim, false, vb.pp("v_proj"))?;
let q_proj = linear(embed_dim, embed_dim, false, vb.pp("q_proj"))?;
let out_proj = linear(embed_dim, embed_dim, false, vb.pp("out_proj"))?;
Ok(Self {
k_proj,
v_proj,
q_proj,
out_proj,
is_causal,
kv_cache: None,
scaling: (head_dim as f64).powf(-0.5),
num_heads: cfg.num_attention_heads,
num_kv_heads,
num_kv_groups: cfg.num_attention_heads / num_kv_heads,
head_dim,
})
}
fn forward(
&mut self,
xs: &Tensor,
key_value_states: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_sz, tgt_len, _) = xs.dims3()?;
let query_states = (xs.apply(&self.q_proj)? * self.scaling)?
.reshape((b_sz, tgt_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let key_states = match key_value_states {
Some(states) => states.apply(&self.k_proj)?,
None => xs.apply(&self.k_proj)?,
};
let key_states = key_states
.reshape((b_sz, (), self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let value_states = match key_value_states {
Some(states) => states.apply(&self.v_proj)?,
None => xs.apply(&self.v_proj)?,
};
let value_states = value_states
.reshape((b_sz, (), self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
if self.is_causal {
self.kv_cache = Some((key_states.clone(), value_states.clone()));
}
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_weights = query_states.matmul(&key_states.transpose(2, 3)?)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&value_states)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, tgt_len, ()))?
.apply(&self.out_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
pub struct DecoderLayer {
self_attn: Attention,
self_attn_layer_norm: LayerNorm,
encoder_attn: Attention,
encoder_attn_layer_norm: LayerNorm,
fc1: Linear,
fc2: Linear,
final_layer_norm: LayerNorm,
activation: Activation,
}
impl DecoderLayer {
fn new(cfg: &DecoderConfig, vb: VarBuilder) -> Result<Self> {
let kv_heads = cfg.num_key_value_heads.unwrap_or(cfg.num_attention_heads);
let kv_heads_cross = cfg.num_cross_attention_key_value_heads.unwrap_or(kv_heads);
let self_attn = Attention::new(kv_heads, true, cfg, vb.pp("self_attn"))?;
let encoder_attn = Attention::new(kv_heads_cross, false, cfg, vb.pp("encoder_attn"))?;
let self_attn_layer_norm =
layer_norm(cfg.hidden_size, 1e-5, vb.pp("self_attn_layer_norm"))?;
let encoder_attn_layer_norm =
layer_norm(cfg.hidden_size, 1e-5, vb.pp("encoder_attn_layer_norm"))?;
let fc1 = linear(cfg.hidden_size, cfg.ffn_dim, false, vb.pp("fc1"))?;
let fc2 = linear(cfg.ffn_dim, cfg.hidden_size, false, vb.pp("fc2"))?;
let final_layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb.pp("final_layer_norm"))?;
Ok(Self {
self_attn,
self_attn_layer_norm,
encoder_attn,
encoder_attn_layer_norm,
fc1,
fc2,
final_layer_norm,
activation: cfg.activation_function,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
encoder_xs: &Tensor,
encoder_attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
// Self attention
let residual = xs;
let xs = xs.apply(&self.self_attn_layer_norm)?;
let xs = self.self_attn.forward(&xs, None, attention_mask)?;
let xs = (residual + xs)?;
// Cross attention
let residual = &xs;
let xs = xs.apply(&self.encoder_attn_layer_norm)?;
let xs = self
.encoder_attn
.forward(&xs, Some(encoder_xs), encoder_attention_mask)?;
let xs = (residual + xs)?;
// Fully connected
let residual = &xs;
let xs = xs
.apply(&self.final_layer_norm)?
.apply(&self.fc1)?
.apply(&self.activation)?
.apply(&self.fc2)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
self.encoder_attn.clear_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct Decoder {
embed_tokens: Vec<candle_nn::Embedding>,
embed_positions: Tensor,
layers: Vec<DecoderLayer>,
layer_norm: LayerNorm,
num_codebooks: usize,
hidden_size: usize,
lm_heads: Vec<Linear>,
dtype: candle::DType,
}
impl Decoder {
pub fn new(cfg: &DecoderConfig, vb: VarBuilder) -> Result<Self> {
let vb_d = vb.pp("model.decoder");
let mut embed_tokens = Vec::with_capacity(cfg.num_codebooks);
let vb_e = vb_d.pp("embed_tokens");
for embed_idx in 0..cfg.num_codebooks {
let e = candle_nn::embedding(cfg.vocab_size + 1, cfg.hidden_size, vb_e.pp(embed_idx))?;
embed_tokens.push(e)
}
let embed_positions = vb_d.get(
(cfg.max_position_embeddings, cfg.hidden_size),
"embed_positions.weights",
)?;
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_d.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb_d.pp("layer_norm"))?;
let mut lm_heads = Vec::with_capacity(cfg.num_codebooks);
let vb_l = vb.pp("lm_heads");
for lm_idx in 0..cfg.num_codebooks {
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, false, vb_l.pp(lm_idx))?;
lm_heads.push(lm_head)
}
Ok(Self {
embed_tokens,
embed_positions,
layers,
layer_norm,
num_codebooks: cfg.num_codebooks,
lm_heads,
hidden_size: cfg.hidden_size,
dtype: vb.dtype(),
})
}
pub fn forward(
&mut self,
input_ids: &Tensor,
prompt_hidden_states: Option<&Tensor>,
attention_mask: Option<&Tensor>,
encoder_xs: &Tensor,
encoder_attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Vec<Tensor>> {
let (b_sz, num_codebooks, seq_len) = input_ids.dims3()?;
if num_codebooks != self.num_codebooks {
candle::bail!("unexpected num codebooks in input {:?}", input_ids.shape())
}
let mut inputs_embeds = Tensor::zeros(
(b_sz, seq_len, self.hidden_size),
self.dtype,
input_ids.device(),
)?;
for (idx, embs) in self.embed_tokens.iter().enumerate() {
let e = input_ids.i((.., idx))?.apply(embs)?;
inputs_embeds = (inputs_embeds + e)?
}
let inputs_embeds = match prompt_hidden_states {
None => inputs_embeds,
Some(pis) => Tensor::cat(&[pis, &inputs_embeds], 1)?,
};
let embed_positions = self
.embed_positions
.i(seqlen_offset..seqlen_offset + inputs_embeds.dim(1)?)?;
let mut xs = (inputs_embeds + embed_positions.unsqueeze(0))?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask, encoder_xs, encoder_attention_mask)?;
}
let xs = xs.apply(&self.layer_norm)?;
let mut lm_logits = Vec::with_capacity(self.num_codebooks);
for lm_head in self.lm_heads.iter() {
let logits = xs.apply(lm_head)?;
lm_logits.push(logits)
}
Ok(lm_logits)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
#[derive(Debug, Clone)]
pub struct Model {
pub embed_prompts: candle_nn::Embedding,
pub enc_to_dec_proj: Option<Linear>,
pub decoder: Decoder,
pub text_encoder: t5::T5EncoderModel,
pub decoder_start_token_id: u32,
pub pad_token_id: u32,
pub audio_encoder: crate::models::dac::Model,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let text_encoder = t5::T5EncoderModel::load(vb.pp("text_encoder"), &cfg.text_encoder)?;
let decoder = Decoder::new(&cfg.decoder, vb.pp("decoder"))?;
let embed_prompts = candle_nn::embedding(
cfg.vocab_size,
cfg.decoder.hidden_size,
vb.pp("embed_prompts"),
)?;
let enc_to_dec_proj = if cfg.text_encoder.d_model != cfg.decoder.hidden_size {
let proj = linear(
cfg.text_encoder.d_model,
cfg.decoder.hidden_size,
true,
vb.pp("enc_to_dec_proj"),
)?;
Some(proj)
} else {
None
};
let audio_encoder =
crate::models::dac::Model::new(&cfg.audio_encoder, vb.pp("audio_encoder.model"))?;
Ok(Self {
decoder,
text_encoder,
embed_prompts,
enc_to_dec_proj,
decoder_start_token_id: cfg.decoder_start_token_id,
pad_token_id: cfg.pad_token_id,
audio_encoder,
})
}
/// Note that the returned tensor uses the CPU device.
pub fn generate(
&mut self,
prompt_tokens: &Tensor,
description_tokens: &Tensor,
mut lp: LogitsProcessor,
max_steps: usize,
) -> Result<Tensor> {
self.decoder.clear_kv_cache();
self.text_encoder.clear_kv_cache();
let encoded = self.text_encoder.forward(description_tokens)?;
let encoded = match self.enc_to_dec_proj.as_ref() {
None => encoded,
Some(proj) => encoded.apply(proj)?,
};
let prompt_hidden_states = prompt_tokens.apply(&self.embed_prompts)?;
let num_codebooks = self.decoder.num_codebooks;
let mut audio_tokens = vec![self.decoder_start_token_id; num_codebooks];
let mut all_audio_tokens = vec![vec![]; num_codebooks];
let prompt_len = prompt_hidden_states.dim(1)?;
for step in 0..max_steps {
let input_ids = Tensor::from_slice(
audio_tokens.as_slice(),
(1, num_codebooks, 1),
prompt_tokens.device(),
)?;
let (prompt_hidden_states, pos) = if step == 0 {
(Some(&prompt_hidden_states), 0)
} else {
(None, step + prompt_len)
};
let causal_mask = if pos == 0 {
self.prepare_causal_mask(prompt_len + 1, prompt_len + 1, input_ids.device())?
} else {
self.prepare_causal_mask(1, pos + 1, input_ids.device())?
};
let logits = self.decoder.forward(
&input_ids,
prompt_hidden_states,
Some(&causal_mask),
&encoded,
None,
pos,
)?;
for (logit_idx, logit) in logits.iter().enumerate() {
if logit_idx > step {
break;
}
if audio_tokens[logit_idx] != self.pad_token_id {
let logit = logit.i((0, logit.dim(1)? - 1))?;
let token = lp.sample(&logit)?;
audio_tokens[logit_idx] = token
}
}
if audio_tokens.iter().all(|v| v == &self.pad_token_id) {
break;
}
for (cb_idx, &token) in audio_tokens.iter().enumerate() {
if token != self.decoder_start_token_id && token != self.pad_token_id {
all_audio_tokens[cb_idx].push(token)
}
}
}
let min_len = all_audio_tokens.iter().map(|v| v.len()).min().unwrap_or(0);
all_audio_tokens.iter_mut().for_each(|v| {
v.resize(min_len, 0);
});
let all_audio_tokens = Tensor::new(all_audio_tokens, &candle::Device::Cpu)?;
Ok(all_audio_tokens)
}
fn prepare_causal_mask(
&self,
q_len: usize,
kv_len: usize,
device: &candle::Device,
) -> Result<Tensor> {
let mask: Vec<_> = (0..q_len)
.flat_map(|i| {
(0..kv_len).map(move |j| {
if i + kv_len < j + q_len {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
Tensor::from_slice(&mask, (q_len, kv_len), device)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/convnext.rs | candle-transformers/src/models/convnext.rs | //! ConvNeXt implementation.
//!
//! This candle implementation uses a pre-trained ConvNeXt network for inference. The
//! classification head has been trained on the ImageNet dataset and returns the
//! probabilities for the top-5 classes.
//!
//! Original code:
//! - 💻 [ConvNeXt](https://github.com/facebookresearch/ConvNeXt/)
//! - 💻 [ConvNeXt-V2](https://github.com/facebookresearch/ConvNeXt-V2/)
//! - 💻 [timm](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/convnext.py)
//! - 📝 [Paper](https://arxiv.org/abs/2201.03545) A ConvNet for the 2020s
//! - 📝 [Paper](https://arxiv.org/abs/2301.00808) ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders
//!
use candle::shape::ShapeWithOneHole;
use candle::{Result, D};
use candle_nn::{conv2d, layer_norm, linear, Conv2dConfig, Func, VarBuilder};
#[derive(Clone)]
pub struct Config {
blocks: [usize; 4],
channels: [usize; 4],
use_conv_mlp: bool,
}
impl Config {
pub fn atto() -> Self {
Self {
blocks: [2, 2, 6, 2],
channels: [40, 80, 160, 320],
use_conv_mlp: true,
}
}
pub fn femto() -> Self {
Self {
blocks: [2, 2, 6, 2],
channels: [48, 96, 192, 384],
use_conv_mlp: true,
}
}
pub fn pico() -> Self {
Self {
blocks: [2, 2, 6, 2],
channels: [64, 128, 256, 512],
use_conv_mlp: true,
}
}
pub fn nano() -> Self {
Self {
blocks: [2, 2, 8, 2],
channels: [80, 160, 320, 640],
use_conv_mlp: true,
}
}
pub fn tiny() -> Self {
Self {
blocks: [3, 3, 9, 3],
channels: [96, 192, 384, 768],
use_conv_mlp: false,
}
}
pub fn small() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [96, 192, 384, 768],
use_conv_mlp: false,
}
}
pub fn base() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [128, 256, 512, 1024],
use_conv_mlp: false,
}
}
pub fn large() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [192, 384, 768, 1536],
use_conv_mlp: false,
}
}
pub fn xlarge() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [256, 512, 1024, 2048],
use_conv_mlp: false,
}
}
pub fn huge() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [352, 704, 1408, 2816],
use_conv_mlp: false,
}
}
}
// Layer norm for data in channels-last format.
fn layer_norm_cl(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(dim, 1e-6, vb)?;
Ok(Func::new(move |xs| xs.apply(&norm)))
}
// Layer norm for data in channels-first format.
fn layer_norm_cf(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(dim, 1e-6, vb)?;
Ok(Func::new(move |xs| {
let xs = xs
.permute((0, 2, 3, 1))?
.apply(&norm)?
.permute((0, 3, 1, 2))?;
Ok(xs)
}))
}
// Global response normalization layer
// Based on https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/grn.py
fn convnext2_grn(dim: usize, channels_last: bool, vb: VarBuilder) -> Result<Func<'static>> {
let (shape, spatial_dim, channel_dim) = if channels_last {
((1, 1, 1, ()).into_shape(dim)?, [1, 2], 3)
} else {
((1, (), 1, 1).into_shape(dim)?, [2, 3], 1)
};
let gamma = vb.get(dim, "weight")?.reshape(&shape)?;
let beta = vb.get(dim, "bias")?.reshape(&shape)?;
Ok(Func::new(move |xs| {
let residual = xs;
let gx = xs
.sqr()?
.sum_keepdim(spatial_dim)?
.mean_keepdim(spatial_dim)?
.sqrt()?;
let gxmean = gx.mean_keepdim(channel_dim)?;
let nx = gx.broadcast_div(&(gxmean + 1e-6)?)?;
let xs = xs
.broadcast_mul(&nx)?
.broadcast_mul(&gamma)?
.broadcast_add(&beta)?;
xs + residual
}))
}
// Initial downsampling via a patchify layer.
fn convnext_stem(out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 4,
..Default::default()
};
let patchify = conv2d(3, out_channels, 4, conv2d_cfg, vb.pp(0))?;
let norm = layer_norm_cf(out_channels, vb.pp(1))?;
Ok(Func::new(move |xs| xs.apply(&patchify)?.apply(&norm)))
}
// Downsampling applied after the stages.
fn convnext_downsample(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 2,
..Default::default()
};
let norm = layer_norm_cf(dim / 2, vb.pp(0))?;
let conv = conv2d(dim / 2, dim, 2, conv2d_cfg, vb.pp(1))?;
Ok(Func::new(move |xs| xs.apply(&norm)?.apply(&conv)))
}
// MLP block from the original paper with optional GRN layer (v2 models).
fn convnext_mlp(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let fc1 = linear(dim, 4 * dim, vb.pp("fc1"))?;
let fc2 = linear(4 * dim, dim, vb.pp("fc2"))?;
let grn = convnext2_grn(4 * dim, true, vb.pp("grn"));
Ok(Func::new(move |xs| {
let mut xs = xs.apply(&fc1)?.gelu_erf()?;
if let Ok(g) = &grn {
xs = xs.apply(g)?;
}
xs = xs.apply(&fc2)?;
Ok(xs)
}))
}
// MLP block using pointwise convolutions, with optional GRN layer (v2 models).
fn convnext_conv_mlp(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let fc1 = conv2d(dim, 4 * dim, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(4 * dim, dim, 1, conv2d_cfg, vb.pp("fc2"))?;
let grn = convnext2_grn(4 * dim, false, vb.pp("grn"));
Ok(Func::new(move |xs| {
let mut xs = xs.apply(&fc1)?.gelu_erf()?;
if let Ok(g) = &grn {
xs = xs.apply(g)?;
}
xs = xs.apply(&fc2)?;
Ok(xs)
}))
}
// A block consisting of a depthwise convolution, a MLP and layer scaling (v1 models only).
fn convnext_block(dim: usize, use_conv_mlp: bool, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
groups: dim,
padding: 3,
..Default::default()
};
let conv_dw = conv2d(dim, dim, 7, conv2d_cfg, vb.pp("conv_dw"))?;
let gamma = vb.get(dim, "gamma");
let (mlp, norm) = if use_conv_mlp {
(
convnext_conv_mlp(dim, vb.pp("mlp"))?,
layer_norm_cf(dim, vb.pp("norm"))?,
)
} else {
(
convnext_mlp(dim, vb.pp("mlp"))?,
layer_norm_cl(dim, vb.pp("norm"))?,
)
};
Ok(Func::new(move |xs| {
let residual = xs;
let mut xs = xs.apply(&conv_dw)?;
xs = if use_conv_mlp {
xs.apply(&norm)?.apply(&mlp)?
} else {
xs.permute((0, 2, 3, 1))?
.apply(&norm)?
.apply(&mlp)?
.permute((0, 3, 1, 2))?
};
if let Ok(g) = &gamma {
xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?;
};
xs + residual
}))
}
// Each stage contains blocks and a downsampling layer for the previous stage.
fn convnext_stage(cfg: &Config, stage_idx: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.blocks[stage_idx];
let mut blocks = Vec::with_capacity(nblocks);
let dim = cfg.channels[stage_idx];
if stage_idx > 0 {
blocks.push(convnext_downsample(dim, vb.pp("downsample"))?);
}
for block_idx in 0..nblocks {
blocks.push(convnext_block(
dim,
cfg.use_conv_mlp,
vb.pp(format!("blocks.{block_idx}")),
)?);
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
// Classification head.
fn convnext_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm_cl(outputs, vb.pp("norm"))?;
let linear = linear(outputs, nclasses, vb.pp("fc"))?;
Ok(Func::new(move |xs| xs.apply(&norm)?.apply(&linear)))
}
// Build a convnext model for a given configuration.
fn convnext_model(
config: &Config,
nclasses: Option<usize>,
vb: VarBuilder,
) -> Result<Func<'static>> {
let head = match nclasses {
None => None,
Some(nclasses) => {
let head = convnext_head(config.channels[3], nclasses, vb.pp("head"))?;
Some(head)
}
};
let stem = convnext_stem(config.channels[0], vb.pp("stem"))?;
let vb = vb.pp("stages");
let stage1 = convnext_stage(config, 0, vb.pp(0))?;
let stage2 = convnext_stage(config, 1, vb.pp(1))?;
let stage3 = convnext_stage(config, 2, vb.pp(2))?;
let stage4 = convnext_stage(config, 3, vb.pp(3))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.apply(&stage4)?
.mean(D::Minus2)?
.mean(D::Minus1)?;
match &head {
None => Ok(xs),
Some(head) => xs.apply(head),
}
}))
}
pub fn convnext(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
convnext_model(cfg, Some(nclasses), vb)
}
pub fn convnext_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
convnext_model(cfg, None, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/hiera.rs | candle-transformers/src/models/hiera.rs | //! Hiera inference implementation based on timm.
//!
//!
//! - 💻 [Hiera](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/hiera.py)
//! - 📝 [Paper](https://arxiv.org/abs/2306.00989). Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles
use candle::{Result, D};
use candle_nn::{conv2d, layer_norm, linear, ops::softmax, Conv2dConfig, Func, VarBuilder};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
channels: usize,
heads: usize,
stages: [usize; 4],
}
impl Config {
pub fn tiny() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 7, 2],
}
}
pub fn small() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 11, 2],
}
}
pub fn base() -> Self {
Self {
channels: 96,
heads: 1,
stages: [2, 3, 16, 3],
}
}
pub fn base_plus() -> Self {
Self {
channels: 112,
heads: 2,
stages: [2, 3, 16, 3],
}
}
pub fn large() -> Self {
Self {
channels: 144,
heads: 2,
stages: [2, 6, 36, 4],
}
}
pub fn huge() -> Self {
Self {
channels: 256,
heads: 4,
stages: [2, 6, 36, 4],
}
}
}
const NUM_TOKENS: usize = 56 * 56;
fn hiera_embeddings(channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv_cfg = Conv2dConfig {
stride: 4,
padding: 3,
..Default::default()
};
let proj = conv2d(3, channels, 7, conv_cfg, vb.pp("patch_embed.proj"))?;
let pos_embed = vb.get((1, NUM_TOKENS, channels), "pos_embed")?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&proj)?;
let (b, c, _, _) = xs.dims4()?;
let xs = xs.reshape((b, c, ()))?.transpose(1, 2)?;
let xs = xs.broadcast_add(&pos_embed)?;
Ok(xs)
}))
}
fn hiera_unroll() -> Result<Func<'static>> {
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let (mut b, _, c) = xs.dims3()?;
let mut size = 56;
xs = xs.reshape((b, size, size, c))?;
for _ in 0..3 {
size /= 2;
let new_shape = &[b, size, 2, size, 2, c];
xs = xs.reshape(new_shape)?;
xs = xs.permute((0, 2, 4, 1, 3, 5))?;
xs = xs.flatten(0, 2)?;
b *= 4;
}
xs = xs.reshape(((), NUM_TOKENS, c))?;
Ok(xs)
}))
}
fn hiera_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let fc1 = linear(in_channels, out_channels, vb.pp("fc1"))?;
let fc2 = linear(out_channels, in_channels, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&fc1)?.gelu()?.apply(&fc2)?;
Ok(xs)
}))
}
fn hiera_attention(
in_channels: usize,
out_channels: usize,
heads: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let head_dim = out_channels / heads;
let scale = (head_dim as f64).powf(-0.5);
let proj = linear(out_channels, out_channels, vb.pp("proj"))?;
let qkv = linear(in_channels, out_channels * 3, vb.pp("qkv"))?;
Ok(Func::new(move |xs| {
let (b, n, _) = xs.dims3()?;
let num_windows = if use_mask_attention {
n / (q_stride * window_size)
} else {
1
};
let qkv = xs.apply(&qkv)?;
let ec = qkv.elem_count();
let s = ec / (b * num_windows * 3 * heads * head_dim);
let qkv = qkv
.reshape((b, s, num_windows, 3, heads, head_dim))?
.permute((3, 0, 4, 2, 1, 5))?;
let mut q = qkv.get(0)?;
let k = qkv.get(1)?;
let v = qkv.get(2)?;
if q_stride > 1 {
let ec = q.elem_count();
let s = ec / (b * num_windows * q_stride * heads * head_dim);
q = q
.reshape((b, heads, num_windows, q_stride, s, head_dim))?
.max(3)?;
}
let q = (q * scale)?;
// Q, K and V are 6 dimensional with the first dimension being 1.
// Squeeze them for the attention calculation since 6 dimensional matmuls are not supported.
let att = q
.squeeze(0)?
.matmul(&k.squeeze(0)?.transpose(D::Minus2, D::Minus1)?)?;
let att = softmax(&att, D::Minus1)?;
let xs = att.matmul(&v.squeeze(0)?)?.unsqueeze(0)?;
let xs = xs.transpose(1, 3)?.reshape((b, (), out_channels))?;
let xs = xs.apply(&proj)?;
Ok(xs)
}))
}
fn hiera_block(
heads: usize,
in_channels: usize,
out_channels: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let norm1 = layer_norm(in_channels, 1e-6, vb.pp("norm1"))?;
let norm2 = layer_norm(out_channels, 1e-6, vb.pp("norm2"))?;
let proj = linear(in_channels, out_channels, vb.pp("proj"));
let stride = 4;
let mlp = hiera_mlp(out_channels, out_channels * 4, vb.pp("mlp"))?;
let attn = hiera_attention(
in_channels,
out_channels,
heads,
q_stride,
window_size,
use_mask_attention,
vb.pp("attn"),
)?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let xs_norm = xs.apply_t(&norm1, false)?;
if let Ok(p) = &proj {
xs = xs_norm.apply(p)?;
let (a, _, d) = xs.dims3()?;
xs = xs.reshape((a, stride, (), d))?.max(1)?;
}
let xs = (xs + &xs_norm.apply(&attn)?)?;
let xs = (&xs + &xs.apply_t(&norm2, false)?.apply(&mlp)?)?;
Ok(xs)
}))
}
fn hiera_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.stages.iter().sum();
let mut blocks = Vec::with_capacity(nblocks);
let mut out_channels = cfg.channels;
let mut in_channels = out_channels;
let mut heads = cfg.heads;
let mut b = 0;
let mut q_stride = 1;
let mut window_size = 64;
for s in 0..4 {
let use_mask_attention = s < 2;
for _ in 0..cfg.stages[s] {
blocks.push(hiera_block(
heads,
in_channels,
out_channels,
q_stride,
window_size,
use_mask_attention,
vb.pp(b),
)?);
b += 1;
in_channels = out_channels;
q_stride = 1;
}
q_stride = 4;
out_channels *= 2;
heads *= 2;
window_size /= 4;
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
fn hiera_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(outputs, 1e-6, vb.pp("norm"))?;
let linear = linear(outputs, nclasses, vb.pp("fc"))?;
Ok(Func::new(move |xs| {
xs.apply_t(&norm, false)?.apply(&linear)
}))
}
// Build a hiera model for a given configuration.
fn hiera_model(cfg: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = cfg.channels * 8;
let head = hiera_head(outputs, nclasses, vb.pp("head"))?;
Some(head)
}
};
let embeddings = hiera_embeddings(cfg.channels, vb.clone())?;
let unroll = hiera_unroll()?;
let blocks = hiera_blocks(cfg, vb.pp("blocks"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&embeddings)?
.apply(&unroll)?
.apply(&blocks)?
.mean(1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn hiera(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, Some(nclasses), vb)
}
pub fn hiera_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, None, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/deepseek2.rs | candle-transformers/src/models/deepseek2.rs | #![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)]
use std::{f32::consts::PI, sync::Arc};
use candle::{
shape::Dim, CpuStorage, CustomOp1, DType, Device, Error, IndexOp, Layout, Result, Shape,
Tensor, WithDType, D,
};
use candle_nn::{embedding, rms_norm, Activation, Embedding, Linear, Module, RmsNorm, VarBuilder};
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use serde::Deserialize;
struct NonZero {}
impl NonZero {
// Sequential version
fn nonzero<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Vec<u32> {
let n = layout.dims().len();
let mut result = Vec::new();
let mut indices = vec![0u32; n];
for (i, v) in vs.iter().enumerate() {
if !v.is_zero() {
let mut idx = i;
for (dim_index, dim) in layout.dims().iter().enumerate().rev() {
let d = idx % dim;
indices[dim_index] = u32::try_from(d).unwrap();
idx /= dim;
}
result.extend_from_slice(&indices);
}
}
result
}
}
impl CustomOp1 for NonZero {
fn name(&self) -> &'static str {
"nonzero"
}
fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> {
if !layout.is_contiguous() {
return Err(Error::RequiresContiguous { op: "nonzero" });
}
let result = match storage {
candle::CpuStorage::U8(vs) => self.nonzero(vs, layout),
candle::CpuStorage::U32(vs) => self.nonzero(vs, layout),
candle::CpuStorage::I16(vs) => self.nonzero(vs, layout),
candle::CpuStorage::I32(vs) => self.nonzero(vs, layout),
candle::CpuStorage::I64(vs) => self.nonzero(vs, layout),
candle::CpuStorage::BF16(vs) => self.nonzero(vs, layout),
candle::CpuStorage::F16(vs) => self.nonzero(vs, layout),
candle::CpuStorage::F32(vs) => self.nonzero(vs, layout),
candle::CpuStorage::F64(vs) => self.nonzero(vs, layout),
candle::CpuStorage::F8E4M3(vs) => self.nonzero(vs, layout),
// Dummy types don't support nonzero operation
candle::CpuStorage::F6E2M3(_) => {
return Err(
candle::Error::UnsupportedDTypeForOp(candle::DType::F6E2M3, "nonzero").bt(),
)
}
candle::CpuStorage::F6E3M2(_) => {
return Err(
candle::Error::UnsupportedDTypeForOp(candle::DType::F6E3M2, "nonzero").bt(),
)
}
candle::CpuStorage::F4(_) => {
return Err(candle::Error::UnsupportedDTypeForOp(candle::DType::F4, "nonzero").bt())
}
candle::CpuStorage::F8E8M0(_) => {
return Err(
candle::Error::UnsupportedDTypeForOp(candle::DType::F8E8M0, "nonzero").bt(),
)
}
};
let index_len = layout.dims().len();
let result_len = result.len() / index_len;
let result = CpuStorage::U32(result);
let shape = Shape::from_dims(&[result_len, index_len]);
Ok((result, shape))
}
}
pub trait NonZeroOp {
fn nonzero(&self) -> Result<Tensor>;
}
impl NonZeroOp for Tensor {
fn nonzero(&self) -> Result<Tensor> {
if !self.is_contiguous() {
return Err(candle::Error::RequiresContiguous { op: "nonzero" });
}
let original_device = self.device();
self.to_device(&candle::Device::Cpu)?
.apply_op1_no_bwd(&NonZero {})?
.to_device(original_device)
}
}
pub struct TopKOutput {
pub values: Tensor,
pub indices: Tensor,
}
pub trait TopKLastDimOp {
/// Topk in the last dim. `values` retains a gradient but `indices` has none w.r.t self.
/// This expects a contiguous tensor.
/// Note: this implements torch.topk with sorted=True.
fn topk(&self, topk: usize) -> Result<TopKOutput>;
/// Topk in the last dim. `values` retains a gradient but `indices` has none w.r.t self.
/// This expects a contiguous tensor.
/// Note: this implements torch.topk with sorted=False.
fn topk_unsorted(&self, topk: usize) -> Result<TopKOutput>;
}
impl TopKLastDimOp for Tensor {
fn topk(&self, topk: usize) -> Result<TopKOutput> {
// Sorted descending
let sorted_indices = self.arg_sort_last_dim(false)?;
let topk_indices = sorted_indices.narrow(D::Minus1, 0, topk)?.contiguous()?;
Ok(TopKOutput {
values: self.gather(&topk_indices, D::Minus1)?,
indices: topk_indices,
})
}
fn topk_unsorted(&self, topk: usize) -> Result<TopKOutput> {
// Sorted descending
let sorted_indices_all = self.arg_sort_last_dim(false)?;
let topk_indices_sorted = sorted_indices_all
.narrow(D::Minus1, 0, topk)?
.contiguous()?;
let topk_values_sorted = self.gather(&topk_indices_sorted, D::Minus1)?;
// Reorder the indices ascending
let reorder_indices = topk_indices_sorted.arg_sort_last_dim(true)?;
let topk_indices_unsorted = topk_indices_sorted.gather(&reorder_indices, D::Minus1)?;
let topk_values_unsorted = topk_values_sorted.gather(&reorder_indices, D::Minus1)?;
Ok(TopKOutput {
values: topk_values_unsorted,
indices: topk_indices_unsorted,
})
}
}
pub trait SplitOp {
fn split<D: Dim>(&self, splits: &[usize], dim: D) -> Result<Vec<Tensor>>;
}
impl SplitOp for Tensor {
fn split<D: Dim>(&self, splits: &[usize], dim: D) -> Result<Vec<Tensor>> {
let dim = dim.to_index(self.shape(), "split")?;
let mut split_res = Vec::new();
let mut index = 0;
for split in splits {
split_res.push(self.narrow(dim, index, *split)?);
index += *split;
}
Ok(split_res)
}
}
pub trait BincountOp {
fn bincount(&self, minlength: u32) -> Result<Vec<u32>>;
}
fn bincount(values: &[u32], minlength: u32) -> Vec<u32> {
// Find the maximum value in `values` (or zero if empty)
let max_val = values.par_iter().max().copied().unwrap_or(0);
// The final size of the bin counts must be at least `minlength`
// and large enough to include the largest value in `values`.
let result_len = (max_val + 1).max(minlength);
// Each thread creates a local histogram (`fold`),
// and then they are merged together (`reduce`).
values
.par_iter()
.fold(
// Create a local histogram
|| vec![0u32; result_len as usize],
// Update the local histogram
|mut local_counts, &val| {
local_counts[val as usize] += 1;
local_counts
},
)
// Merge histograms from all threads
.reduce(
// Identity (empty histogram)
|| vec![0u32; result_len as usize],
// Combine two histograms
|mut global_counts, local_counts| {
for (g, l) in global_counts.iter_mut().zip(local_counts) {
*g += l;
}
global_counts
},
)
}
impl BincountOp for Tensor {
fn bincount(&self, minlength: u32) -> Result<Vec<u32>> {
let values = self.to_vec1::<u32>()?;
Ok(bincount(&values, minlength))
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[doc(hidden)]
#[macro_export]
macro_rules! serde_default_fn {
($t:ty, $name:ident, $v:expr) => {
fn $name() -> $t {
$v
}
};
}
serde_default_fn!(f64, routed_scaling_factor, 1.0);
serde_default_fn!(TopkMethod, topk_method, TopkMethod::Greedy);
serde_default_fn!(usize, moe_layer_freq, 1);
serde_default_fn!(usize, first_k_dense_replace, 0);
serde_default_fn!(bool, norm_topk_prob, false);
serde_default_fn!(ScoringFunc, scoring_func, ScoringFunc::Softmax);
serde_default_fn!(Activation, hidden_act, Activation::Silu);
serde_default_fn!(bool, tie_word_embeddings, false);
#[derive(Deserialize, Clone, Debug)]
enum TopkMethod {
#[serde(rename = "greedy")]
Greedy,
#[serde(rename = "group_limited_greedy")]
GroupLimitedGreedy,
}
#[derive(Deserialize, Clone, Debug)]
enum ScoringFunc {
#[serde(rename = "softmax")]
Softmax,
}
#[derive(Deserialize, Clone, Debug)]
pub struct DeepSeekV2Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) moe_intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) n_shared_experts: Option<usize>,
pub(crate) n_routed_experts: Option<usize>,
#[serde(default = "routed_scaling_factor")]
pub(crate) routed_scaling_factor: f64,
#[serde(default = "topk_method")]
topk_method: TopkMethod,
pub(crate) num_experts_per_tok: Option<usize>,
#[serde(default = "moe_layer_freq")]
pub(crate) moe_layer_freq: usize,
#[serde(default = "first_k_dense_replace")]
pub(crate) first_k_dense_replace: usize,
// k dense layers
#[serde(default = "norm_topk_prob")]
pub(crate) norm_topk_prob: bool,
#[serde(default = "scoring_func")]
scoring_func: ScoringFunc,
#[serde(default = "hidden_act")]
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) rms_norm_eps: f64,
#[serde(default = "tie_word_embeddings")]
pub(crate) tie_word_embeddings: bool,
pub(crate) rope_theta: f32,
pub(crate) rope_scaling: Option<DeepSeekV2RopeScaling>,
pub(crate) attention_bias: bool,
pub(crate) q_lora_rank: Option<usize>,
pub(crate) qk_rope_head_dim: usize,
pub(crate) kv_lora_rank: usize,
pub(crate) v_head_dim: usize,
pub(crate) qk_nope_head_dim: usize,
pub(crate) n_group: usize,
pub(crate) topk_group: usize,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum ScaledRopeType {
#[serde(alias = "su")]
#[serde(alias = "longrope")]
Su,
#[serde(alias = "yarn")]
Yarn,
#[serde(alias = "dynamic")]
Dynamic,
#[serde(alias = "linear")]
Linear,
}
#[derive(Debug, Clone)]
pub struct DeepSeekV2RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum DeepSeekV2RopeScaling {
Yarn {
original_max_position_embeddings: usize,
beta_fast: f32,
beta_slow: f32,
mscale: f32,
mscale_all_dim: f32,
factor: f32,
#[serde(rename = "type")]
scaling_type: ScaledRopeType,
},
LinearOrDynamic {
#[serde(rename = "type")]
scaling_type: ScaledRopeType,
factor: f64,
},
}
pub struct DeepSeekV2RopeConfig {
pub rope_scaling: Option<DeepSeekV2RopeScaling>,
pub max_position_embeddings: usize,
pub rope_theta: f32,
pub qk_rope_head_dim: usize,
}
impl DeepSeekV2RotaryEmbedding {
fn new_unscaled(cfg: &DeepSeekV2RopeConfig, dtype: DType, dev: &Device) -> Result<Self> {
let max_seq_len = cfg.max_position_embeddings;
let dim = cfg.qk_rope_head_dim;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let sin = freqs.sin()?.to_dtype(dtype)?;
let cos = freqs.cos()?.to_dtype(dtype)?;
Ok(Self { sin, cos })
}
fn yarn_find_correction_dim(
num_rot: f32,
dim: usize,
base: f32,
max_position_embeddings: usize,
) -> f32 {
(dim as f32 * (max_position_embeddings as f32 / (num_rot * 2. * PI)).ln())
/ (2. * base.ln())
}
fn yarn_find_correction_range(
low_rot: f32,
high_rot: f32,
dim: usize,
base: f32,
max_position_embeddings: usize,
) -> (f32, f32) {
let low =
Self::yarn_find_correction_dim(low_rot, dim, base, max_position_embeddings).floor();
let high =
Self::yarn_find_correction_dim(high_rot, dim, base, max_position_embeddings).ceil();
(low.max(0.), high.min(dim as f32 - 1.))
}
fn yarn_linear_ramp_mask(min: f32, mut max: f32, dim: usize, dev: &Device) -> Result<Tensor> {
if min == max {
// https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite/blob/604d5664dddd88a0433dbae533b7fe9472482de0/modeling_deepseek.py#L255
max += 0.001;
}
let linear_func =
((Tensor::arange(0f32, dim as f32, dev)? - min as f64)? / (max as f64 - min as f64))?;
linear_func.clamp(0., 1.)
}
pub(crate) fn yarn_get_mscale(scale: f32, mscale: f32) -> f32 {
if scale <= 1. {
return 1.;
}
0.1 * mscale * scale.ln() + 1.
}
#[allow(clippy::too_many_arguments)]
fn new_yarn(
cfg: &DeepSeekV2RopeConfig,
dtype: DType,
dev: &Device,
original_max_position_embeddings: usize,
beta_fast: f32,
beta_slow: f32,
factor: f32,
mscale: f32,
mscale_all_dim: f32,
) -> Result<Self> {
let freq_extra: Vec<_> = (0..cfg.qk_rope_head_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / cfg.qk_rope_head_dim as f32))
.collect();
let freq_extra_len = freq_extra.len();
let freq_extra = Tensor::from_vec(freq_extra, freq_extra_len, dev)?;
let freq_inter: Vec<_> = (0..cfg.qk_rope_head_dim)
.step_by(2)
.map(|i| 1f32 / (factor * cfg.rope_theta.powf(i as f32 / cfg.qk_rope_head_dim as f32)))
.collect();
let freq_inter_len = freq_inter.len();
let freq_inter = Tensor::from_vec(freq_inter, (1, freq_inter_len), dev)?;
let (low, high) = Self::yarn_find_correction_range(
beta_fast,
beta_slow,
cfg.qk_rope_head_dim,
cfg.rope_theta,
original_max_position_embeddings,
);
let inv_freq_mask =
(1. - Self::yarn_linear_ramp_mask(low, high, cfg.qk_rope_head_dim / 2, dev)?)?;
let inv_freq = freq_inter
.broadcast_mul(&(1. - &inv_freq_mask)?)?
.broadcast_add(&freq_extra.broadcast_mul(&inv_freq_mask)?)?;
let t = Tensor::arange(0u32, cfg.max_position_embeddings as u32, dev)?
.to_dtype(DType::F32)?
.reshape((cfg.max_position_embeddings, 1))?;
let freqs = t.matmul(&inv_freq)?;
let mscale =
Self::yarn_get_mscale(factor, mscale) / Self::yarn_get_mscale(factor, mscale_all_dim);
let sin = (freqs.sin()? * mscale as f64)?.to_dtype(dtype)?;
let cos = (freqs.cos()? * mscale as f64)?.to_dtype(dtype)?;
Ok(Self { sin, cos })
}
pub fn new(cfg: &DeepSeekV2RopeConfig, dtype: DType, dev: &Device) -> Result<Self> {
match &cfg.rope_scaling {
Some(DeepSeekV2RopeScaling::LinearOrDynamic {
scaling_type: _,
factor: _,
}) => candle::bail!("linear and dynamic rope are not implemented yet!"),
Some(DeepSeekV2RopeScaling::Yarn {
original_max_position_embeddings,
beta_fast,
beta_slow,
factor,
mscale,
mscale_all_dim,
scaling_type: _,
}) => Self::new_yarn(
cfg,
dtype,
dev,
*original_max_position_embeddings,
*beta_fast,
*beta_slow,
*factor,
*mscale,
*mscale_all_dim,
),
None => Self::new_unscaled(cfg, dtype, dev),
}
}
pub fn forward(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope_i(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope_i(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
impl DeepSeekV2Config {
pub(crate) fn q_head_dim(&self) -> usize {
self.qk_rope_head_dim + self.qk_nope_head_dim
}
fn softmax_scale(&self) -> f32 {
let mut softmax_scale = 1.0 / (self.q_head_dim() as f32).sqrt();
if let Some(DeepSeekV2RopeScaling::Yarn {
mscale_all_dim,
factor,
..
}) = self.rope_scaling
{
let mscale = DeepSeekV2RotaryEmbedding::yarn_get_mscale(factor, mscale_all_dim);
softmax_scale = softmax_scale * mscale * mscale;
}
softmax_scale
}
}
enum QProj {
Plain(Linear),
Lora { a: Linear, norm: RmsNorm, b: Linear },
}
impl QProj {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Lora { a, norm, b } => b.forward(&norm.forward(&a.forward(xs)?)?),
Self::Plain(lin) => lin.forward(xs),
}
}
}
struct Attention {
q: QProj,
kv_a_proj_with_mqa: Linear,
kv_a_layernorm: RmsNorm,
kv_b_proj: Linear,
o_proj: Linear,
rotary_emb: Arc<DeepSeekV2RotaryEmbedding>,
cfg: DeepSeekV2Config,
q_head_dim: usize,
softmax_scale: f64,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(
rotary_emb: Arc<DeepSeekV2RotaryEmbedding>,
cfg: &DeepSeekV2Config,
vb: VarBuilder,
) -> Result<Self> {
let q_head_dim = cfg.q_head_dim();
let q = match cfg.q_lora_rank {
Some(lora_rank) => {
let a = candle_nn::linear_b(
cfg.hidden_size,
lora_rank,
cfg.attention_bias,
vb.pp("q_a_proj"),
)?;
let norm = rms_norm(lora_rank, cfg.rms_norm_eps, vb.pp("q_a_layernorm"))?;
let b = candle_nn::linear_no_bias(
lora_rank,
cfg.num_attention_heads * q_head_dim,
vb.pp("q_b_proj"),
)?;
QProj::Lora { a, norm, b }
}
None => QProj::Plain(candle_nn::linear_no_bias(
cfg.hidden_size,
cfg.num_attention_heads * q_head_dim,
vb.pp("q_proj"),
)?),
};
let kv_a_proj_with_mqa = candle_nn::linear_b(
cfg.hidden_size,
cfg.kv_lora_rank + cfg.qk_rope_head_dim,
cfg.attention_bias,
vb.pp("kv_a_proj_with_mqa"),
)?;
let kv_a_layernorm = rms_norm(cfg.kv_lora_rank, cfg.rms_norm_eps, vb.pp("kv_a_layernorm"))?;
let kv_b_proj = candle_nn::linear_no_bias(
cfg.kv_lora_rank,
cfg.num_attention_heads * (q_head_dim - cfg.qk_rope_head_dim + cfg.v_head_dim),
vb.pp("kv_b_proj"),
)?;
let o_proj = candle_nn::linear_b(
cfg.num_attention_heads * cfg.v_head_dim,
cfg.hidden_size,
cfg.attention_bias,
vb.pp("o_proj"),
)?;
Ok(Self {
q,
kv_a_proj_with_mqa,
kv_a_layernorm,
kv_b_proj,
o_proj,
rotary_emb,
cfg: cfg.clone(),
q_head_dim,
softmax_scale: cfg.softmax_scale() as f64,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (bs, seq_len, _) = xs.dims3()?;
let q = {
let q = self.q.forward(xs)?;
q.reshape((bs, seq_len, self.cfg.num_attention_heads, self.q_head_dim))?
.transpose(1, 2)?
};
let q_split = q.split(
&[self.cfg.qk_nope_head_dim, self.cfg.qk_rope_head_dim],
D::Minus1,
)?;
let q_nope = q_split[0].clone();
let q_pe = q_split[1].clone();
let compressed_kv = self.kv_a_proj_with_mqa.forward(xs)?;
let ckv_split = compressed_kv.split(
&[self.cfg.kv_lora_rank, self.cfg.qk_rope_head_dim],
D::Minus1,
)?;
let compressed_kv = ckv_split[0].clone();
let k_pe = {
let k_pe = ckv_split[1].clone();
k_pe.reshape((bs, seq_len, 1, self.cfg.qk_rope_head_dim))?
.transpose(1, 2)?
};
let kv = {
let kv = self
.kv_b_proj
.forward(&self.kv_a_layernorm.forward(&compressed_kv)?)?;
kv.reshape((
bs,
seq_len,
self.cfg.num_attention_heads,
self.cfg.qk_nope_head_dim + self.cfg.v_head_dim,
))?
.transpose(1, 2)?
};
let kv_split = kv.split(&[self.cfg.qk_nope_head_dim, self.cfg.v_head_dim], D::Minus1)?;
let k_nope = kv_split[0].clone();
let v = kv_split[1].clone();
let (q_pe, k_pe) = self.rotary_emb.forward(&q_pe, &k_pe, seqlen_offset)?;
let q = Tensor::cat(&[q_nope, q_pe], D::Minus1)?;
let k = Tensor::cat(&[k_nope, k_pe.repeat((1, q.dim(1)?, 1, 1))?], D::Minus1)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &k], 2)?;
let value_states = Tensor::cat(&[prev_v, &v], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let attn_out = {
let att = (q.contiguous()?.matmul(&k.t()?.contiguous()?)? * self.softmax_scale)?;
let att = match attention_mask {
Some(mask) => att.broadcast_add(mask)?,
None => att,
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?
};
let attn_out = if attention_mask.is_some() {
attn_out.transpose(1, 2)?.reshape((bs, seq_len, ()))?
} else {
attn_out.reshape((bs, seq_len, ()))?
};
self.o_proj.forward(&attn_out)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
struct Mlp {
gate: Linear,
up: Linear,
down: Linear,
act: Activation,
}
impl Mlp {
fn new(
cfg: &DeepSeekV2Config,
vb: VarBuilder,
hidden_size: Option<usize>,
intermediate_size: Option<usize>,
) -> Result<Self> {
let hidden_size = hidden_size.unwrap_or(cfg.hidden_size);
let intermediate_size = intermediate_size.unwrap_or(cfg.intermediate_size);
Ok(Self {
gate: candle_nn::linear_no_bias(hidden_size, intermediate_size, vb.pp("gate_proj"))?,
up: candle_nn::linear_no_bias(hidden_size, intermediate_size, vb.pp("up_proj"))?,
down: candle_nn::linear_no_bias(intermediate_size, hidden_size, vb.pp("down_proj"))?,
act: cfg.hidden_act,
})
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = self.gate.forward(xs)?.apply(&self.act)?;
let rhs = self.up.forward(xs)?;
self.down.forward(&(&lhs * &rhs)?)
}
}
struct MoeGate {
weight: Tensor,
cfg: DeepSeekV2Config,
top_k: usize,
n_routed_experts: usize,
}
impl MoeGate {
fn new(cfg: &DeepSeekV2Config, vb: VarBuilder, n_routed_experts: usize) -> Result<Self> {
let weight = vb.get((n_routed_experts, cfg.hidden_size), "weight")?;
Ok(Self {
weight,
cfg: cfg.clone(),
top_k: cfg.num_experts_per_tok.unwrap(),
n_routed_experts,
})
}
/// (topk_idx, topk_weight)
fn forward(&self, xs: &Tensor) -> Result<(Tensor, Tensor)> {
let (bs, seq_len, h) = xs.dims3()?;
// Compute gating score
let xs = xs.reshape(((), h))?;
let logits = xs
.to_dtype(DType::F32)?
.broadcast_matmul(&self.weight.t()?.to_dtype(DType::F32)?)?;
let scores = match self.cfg.scoring_func {
ScoringFunc::Softmax => candle_nn::ops::softmax_last_dim(&logits)?,
};
// Select top-k experts
let (mut topk_weight, topk_idx) = match self.cfg.topk_method {
TopkMethod::Greedy => {
let TopKOutput { values, indices } = scores.topk_unsorted(self.top_k)?;
(values, indices)
}
TopkMethod::GroupLimitedGreedy => {
// (n, n_group)
let group_scores = scores
.reshape((bs * seq_len, self.cfg.n_group, ()))?
.max(D::Minus1)?;
// (n, topk_group)
let group_idx = scores.topk_unsorted(self.cfg.topk_group)?.indices;
// (n, n_group)
let group_mask = group_scores.zeros_like()?.scatter_add(
&group_idx,
&group_idx.ones_like()?.to_dtype(group_scores.dtype())?,
1,
)?;
// (n, e)
let score_mask = group_mask
.unsqueeze(D::Minus1)?
.expand((
bs * seq_len,
self.cfg.n_group,
self.n_routed_experts / self.cfg.n_group,
))?
.reshape((bs, seq_len, ()))?;
// (n, e)
// Invert the mask
let tmp_scores = masked_fill(&score_mask, &(1. - &score_mask.ne(0.)?)?, 0.)?;
let TopKOutput { values, indices } = tmp_scores.topk_unsorted(self.top_k)?;
(values, indices)
}
};
if self.top_k > 1 && self.cfg.norm_topk_prob {
let denominator = (topk_weight.sum_keepdim(D::Minus1)? + 1e-20)?;
topk_weight = (topk_weight / denominator)?;
} else {
topk_weight = (topk_weight * self.cfg.routed_scaling_factor)?;
}
Ok((topk_idx, topk_weight))
}
}
struct Moe {
experts: Vec<Mlp>,
shared_experts: Option<Mlp>,
gate: MoeGate,
}
impl Moe {
fn new(
cfg: &DeepSeekV2Config,
vb: VarBuilder,
n_shared_experts: Option<usize>,
n_routed_experts: usize,
) -> Result<Self> {
let mut experts = Vec::with_capacity(n_routed_experts);
for i in 0..n_routed_experts {
let vb_e = vb.pp("experts").pp(i);
experts.push(Mlp::new(cfg, vb_e, None, Some(cfg.moe_intermediate_size))?);
}
let shared_experts = if let Some(n_shared_experts) = n_shared_experts {
let intermediate_size = cfg.moe_intermediate_size * n_shared_experts;
Some(Mlp::new(
cfg,
vb.pp("shared_experts"),
None,
Some(intermediate_size),
)?)
} else {
None
};
let gate = MoeGate::new(cfg, vb.pp("gate"), n_routed_experts)?;
Ok(Self {
experts,
shared_experts,
gate,
})
}
fn moe_infer(&self, xs: &Tensor, topk_ids: &Tensor, topk_weight: &Tensor) -> Result<Tensor> {
let mut y = xs.zeros_like()?;
let counts = topk_ids
.flatten_all()?
.bincount(self.experts.len() as u32)?;
for (i, expert) in self.experts.iter().enumerate() {
if counts[i] == 0 {
continue;
}
let idx_top = topk_ids.eq(i as f64)?.nonzero()?.t()?;
let idx = &idx_top.i(0)?.contiguous()?;
let top = &idx_top.i(1)?.contiguous()?;
y = y.index_add(
idx,
&expert.forward(&xs.index_select(idx, 0)?)?.broadcast_mul(
&topk_weight
.index_select(idx, 0)?
.gather(&top.unsqueeze(1)?, 1)?
.squeeze(1)?
.unsqueeze(D::Minus1)?
.to_dtype(xs.dtype())?,
)?,
0,
)?;
}
Ok(y)
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let identity = xs.clone();
let orig_shape = xs.shape();
let (topk_idx, topk_weight) = self.gate.forward(xs)?;
let xs = xs.reshape(((), xs.dim(D::Minus1)?))?;
let mut y = self
.moe_infer(&xs, &topk_idx, &topk_weight)?
.reshape(orig_shape)?;
if let Some(ref shared_experts) = self.shared_experts {
y = (y + shared_experts.forward(&identity)?)?;
}
Ok(y)
}
}
enum MoeOrMlp {
Moe(Box<Moe>),
Mlp(Box<Mlp>),
}
impl MoeOrMlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Mlp(mlp) => mlp.forward(xs),
Self::Moe(moe) => moe.forward(xs),
}
}
}
struct DecoderLayer {
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
attn: Attention,
moe_or_mlp: MoeOrMlp,
}
impl DecoderLayer {
fn new(
rotary_emb: Arc<DeepSeekV2RotaryEmbedding>,
cfg: &DeepSeekV2Config,
vb: VarBuilder,
layer_idx: usize,
) -> Result<Self> {
let attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let input_layernorm =
rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = rms_norm(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
let moe_or_mlp = if cfg.n_routed_experts.is_some()
&& layer_idx >= cfg.first_k_dense_replace
&& layer_idx.is_multiple_of(cfg.moe_layer_freq)
{
MoeOrMlp::Moe(
Moe::new(
cfg,
vb.pp("mlp"),
cfg.n_shared_experts,
cfg.n_routed_experts.unwrap(),
)?
.into(),
)
} else {
MoeOrMlp::Mlp(Mlp::new(cfg, vb.pp("mlp"), None, None)?.into())
};
Ok(Self {
input_layernorm,
post_attention_layernorm,
attn,
moe_or_mlp,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self
.moe_or_mlp
.forward(&xs.apply(&self.post_attention_layernorm)?)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.attn.clear_kv_cache();
}
}
pub struct DeepSeekV2 {
lm_head: Linear,
embed_tokens: Embedding,
norm: RmsNorm,
layers: Vec<DecoderLayer>,
dtype: DType,
device: Device,
}
impl DeepSeekV2 {
pub fn new(cfg: &DeepSeekV2Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens = embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let lm_head = if !cfg.tie_word_embeddings {
candle_nn::linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
} else {
candle_nn::Linear::new(embed_tokens.embeddings().clone(), None)
};
let norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let rope_cfg = DeepSeekV2RopeConfig {
rope_scaling: cfg.rope_scaling.clone(),
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_llama2_c.rs | candle-transformers/src/models/quantized_llama2_c.rs | //! Quantized Llama2 model implementation.
//!
//! This provides an 8-bit quantized implementation of Meta's LLaMA2 language model
//! for reduced memory usage and faster inference.
//!
//! Key characteristics:
//! - Decoder-only transformer architecture
//! - RoPE position embeddings
//! - Grouped Query Attention
//! - 8-bit quantization of weights
//!
//! References:
//! - [LLaMA2 Paper](https://arxiv.org/abs/2307.09288)
//! - [LLaMA2 Technical Report](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/)
//!
use super::llama2_c::{Cache, Config};
use crate::quantized_nn::{linear_no_bias as linear, Embedding, Linear, RmsNorm};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, IndexOp, Module, Result, Tensor, D};
fn silu(xs: &Tensor) -> Result<Tensor> {
xs / (xs.neg()?.exp()? + 1.0)?
}
#[derive(Debug, Clone)]
struct CausalSelfAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
n_head: usize,
n_key_value_head: usize,
head_dim: usize,
}
impl CausalSelfAttention {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> {
let (b_sz, seq_len, h, n_embd) = x.dims4()?;
let cos = cache.cos.i(index_pos..index_pos + seq_len)?;
let sin = cache.sin.i(index_pos..index_pos + seq_len)?;
let cos = cos.unsqueeze(1)?;
let sin = sin.unsqueeze(1)?;
let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?;
let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?;
let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?;
let x0 = x.narrow(D::Minus1, 0, 1)?;
let x1 = x.narrow(D::Minus1, 1, 1)?;
let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?;
let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?;
let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?;
Ok(rope)
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let (b_sz, seq_len, n_embd) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?;
let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?;
let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?;
let q = self.apply_rotary_emb(&q, index_pos, cache)?;
let mut k = self.apply_rotary_emb(&k, index_pos, cache)?;
if cache.use_kv_cache {
if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] {
k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?;
}
cache.kvs[block_idx] = Some((k.clone(), v.clone()))
}
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let q = q.transpose(1, 2)?.contiguous()?;
let k = k.transpose(1, 2)?.contiguous()?;
let v = v.transpose(1, 2)?.contiguous()?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if seq_len <= 1 {
att
} else {
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
masked_fill(&att, &mask, f32::NEG_INFINITY)?
};
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let y = att.matmul(&v.contiguous()?)?;
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
let n_rep = self.n_head / self.n_key_value_head;
if n_rep == 1 {
Ok(x)
} else {
let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?;
let x = x
.unsqueeze(3)?
.expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))?
.reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?;
Ok(x)
}
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let size_in = cfg.dim;
let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads;
let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads;
let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?;
let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?;
let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?;
let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
n_head: cfg.n_heads,
n_key_value_head: cfg.n_kv_heads,
head_dim: cfg.dim / cfg.n_heads,
})
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct Mlp {
c_fc1: Linear,
c_fc2: Linear,
c_proj: Linear,
}
impl Mlp {
fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self {
Self {
c_fc1,
c_fc2,
c_proj,
}
}
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?;
self.c_proj.forward(&x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let h_size = cfg.dim;
let i_size = cfg.hidden_dim;
let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?;
let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?;
let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?;
Ok(Self::new(c_fc1, c_fc2, c_proj))
}
}
#[derive(Debug, Clone)]
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
mlp: Mlp,
}
impl Block {
fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self {
Self {
rms_1,
attn,
rms_2,
mlp,
}
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let residual = x;
let x = self.rms_1.forward(x)?;
let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?;
let residual = &x;
let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?;
let mlp = Mlp::load(vb.pp("mlp"), cfg)?;
let input_layernorm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm =
RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?;
Ok(Self::new(
input_layernorm,
attn,
post_attention_layernorm,
mlp,
))
}
}
#[derive(Debug, Clone)]
pub struct QLlama {
wte: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
lm_head: Linear,
pub config: Config,
}
impl QLlama {
pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> {
let (_b_sz, _seq_len) = x.dims2()?;
let mut x = self.wte.forward(x)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> {
let wte = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?;
let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?;
let ln_f = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.n_layers)
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), &cfg).unwrap())
.collect();
Ok(Self {
wte,
blocks,
ln_f,
lm_head,
config: cfg,
})
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/blip_text.rs | candle-transformers/src/models/blip_text.rs | //! Implementation of BLIP text encoder/decoder.
//!
//! - 📝 [Paper](https://arxiv.org/abs/2201.12086). BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation"
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning)
//! - 💻 [GH Link](https://github.com/salesforce/BLIP)
//! - 🤗 [HF Link](https://huggingface.co/Salesforce/blip-image-captioning-base)
//! - 📝 [Paper](https://arxiv.org/abs/2201.12086)
//!
use super::with_tracing::{linear, Embedding, Linear};
use candle::{Module, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, VarBuilder};
use serde::Deserialize;
#[derive(Debug, Clone, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub encoder_hidden_size: usize,
pub intermediate_size: usize,
pub projection_dim: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub max_position_embeddings: usize,
pub hidden_act: candle_nn::Activation,
pub layer_norm_eps: f64,
pub is_decoder: bool,
}
#[derive(Debug, Clone)]
struct TextEmbeddings {
word_embeddings: Embedding,
position_embeddings: Embedding,
layer_norm: LayerNorm,
position_ids: Tensor,
}
impl TextEmbeddings {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let word_embeddings =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?;
let position_embeddings = Embedding::new(
cfg.max_position_embeddings,
cfg.hidden_size,
vb.pp("position_embeddings"),
)?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
let position_ids =
Tensor::arange(0, cfg.max_position_embeddings as u32, vb.device())?.unsqueeze(0)?;
Ok(Self {
word_embeddings,
position_embeddings,
layer_norm,
position_ids,
})
}
fn forward(&self, xs: &Tensor, past_kv_len: usize) -> Result<Tensor> {
let seq_len = xs.dim(1)?;
let position_ids = self.position_ids.narrow(1, past_kv_len, seq_len)?;
let embeddings = self.word_embeddings.forward(xs)?;
let position_embeddings = self.position_embeddings.forward(&position_ids)?;
(embeddings + position_embeddings)?.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextSelfAttention {
query: Linear,
key: Linear,
value: Linear,
attention_head_size: usize,
num_attention_heads: usize,
attention_scale: f64,
kv_cache: Option<(Tensor, Tensor)>,
}
impl TextSelfAttention {
fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> {
let num_attention_heads = cfg.num_attention_heads;
let attention_head_size = cfg.hidden_size / num_attention_heads;
let all_head_size = cfg.num_attention_heads * attention_head_size;
let query = linear(cfg.hidden_size, all_head_size, vb.pp("query"))?;
let in_size = if is_cross_attention {
cfg.encoder_hidden_size
} else {
cfg.hidden_size
};
let key = linear(in_size, all_head_size, vb.pp("key"))?;
let value = linear(in_size, all_head_size, vb.pp("value"))?;
let attention_scale = 1f64 / (attention_head_size as f64).sqrt();
Ok(Self {
query,
key,
value,
attention_head_size,
num_attention_heads,
attention_scale,
kv_cache: None,
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, _) = xs.dims3()?;
xs.reshape((
b_size,
seq_len,
self.num_attention_heads,
self.attention_head_size,
))?
.permute((0, 2, 1, 3))
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let query = self
.transpose_for_scores(&self.query.forward(xs)?)?
.contiguous()?;
let (key, value) = match encoder_hidden_states {
None => {
let key = self.transpose_for_scores(&self.key.forward(xs)?)?;
let value = self.transpose_for_scores(&self.value.forward(xs)?)?;
let (key, value) = match &self.kv_cache {
None => (key, value),
Some((prev_key, prev_value)) => {
let key = Tensor::cat(&[prev_key, &key], 2)?;
let value = Tensor::cat(&[prev_value, &value], 2)?;
(key, value)
}
};
self.kv_cache = Some((key.clone(), value.clone()));
(key, value)
}
Some(xs) => {
let key = self.transpose_for_scores(&self.key.forward(xs)?)?;
let value = self.transpose_for_scores(&self.value.forward(xs)?)?;
// no kv-cache in this case, but the results could probably be memoized.
(key, value)
}
};
let key = key.contiguous()?;
let value = value.contiguous()?;
let attention_scores = query.matmul(&key.t()?)?;
let attention_scores = (attention_scores * self.attention_scale)?;
let attention_scores = match attention_mask {
Some(mask) => attention_scores.broadcast_add(mask)?,
None => attention_scores,
};
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
attention_probs
.matmul(&value)?
.permute((0, 2, 1, 3))?
.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct TextSelfOutput {
dense: Linear,
layer_norm: LayerNorm,
}
impl TextSelfOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layer_norm })
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
(xs.apply(&self.dense) + input_tensor)?.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextAttention {
self_: TextSelfAttention,
output: TextSelfOutput,
}
impl TextAttention {
fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> {
let self_ = TextSelfAttention::new(cfg, is_cross_attention, vb.pp("self"))?;
let output = TextSelfOutput::new(cfg, vb.pp("output"))?;
Ok(Self { self_, output })
}
fn reset_kv_cache(&mut self) {
self.self_.reset_kv_cache()
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let self_outputs = self
.self_
.forward(xs, encoder_hidden_states, attention_mask)?;
self.output.forward(&self_outputs, xs)
}
}
#[derive(Debug, Clone)]
struct TextIntermediate {
dense: Linear,
intermediate_act_fn: candle_nn::Activation,
}
impl TextIntermediate {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?;
Ok(Self {
dense,
intermediate_act_fn: cfg.hidden_act,
})
}
}
impl Module for TextIntermediate {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)?.apply(&self.intermediate_act_fn)
}
}
#[derive(Debug, Clone)]
struct TextOutput {
dense: Linear,
layer_norm: LayerNorm,
}
impl TextOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layer_norm })
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
(xs.apply(&self.dense)? + input_tensor)?.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextLayer {
attention: TextAttention,
cross_attention: Option<TextAttention>,
intermediate: TextIntermediate,
output: TextOutput,
}
impl TextLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = TextAttention::new(cfg, false, vb.pp("attention"))?;
let cross_attention = if cfg.is_decoder {
Some(TextAttention::new(cfg, true, vb.pp("crossattention"))?)
} else {
None
};
let intermediate = TextIntermediate::new(cfg, vb.pp("intermediate"))?;
let output = TextOutput::new(cfg, vb.pp("output"))?;
Ok(Self {
attention,
cross_attention,
intermediate,
output,
})
}
fn reset_kv_cache(&mut self) {
self.attention.reset_kv_cache();
if let Some(ca) = &mut self.cross_attention {
ca.reset_kv_cache()
}
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
let attention_output = self.attention.forward(xs, None, Some(attention_mask))?;
let attention_output = match &mut self.cross_attention {
Some(ca) => ca.forward(&attention_output, Some(encoder_hidden_states), None)?,
None => candle::bail!("expected some cross-attn"),
};
let intermediate_output = self.intermediate.forward(&attention_output)?;
self.output.forward(&intermediate_output, &attention_output)
}
}
#[derive(Debug, Clone)]
struct TextEncoder {
layers: Vec<TextLayer>,
}
impl TextEncoder {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("layer");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for i in 0..cfg.num_hidden_layers {
let layer = TextLayer::new(cfg, vb.pp(i))?;
layers.push(layer)
}
Ok(Self { layers })
}
fn reset_kv_cache(&mut self) {
self.layers.iter_mut().for_each(|l| l.reset_kv_cache())
}
fn forward(
&mut self,
xs: &Tensor,
encoder_hidden_states: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, encoder_hidden_states, attention_mask)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct TextPooler {
dense: Linear,
}
impl TextPooler {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
Ok(Self { dense })
}
}
impl Module for TextPooler {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.narrow(D::Minus1, 0, 1)?
.squeeze(D::Minus1)?
.apply(&self.dense)?
.tanh()
}
}
#[derive(Debug, Clone)]
struct TextPredictionHeadTransform {
dense: Linear,
transform_act_fn: candle_nn::Activation,
layer_norm: LayerNorm,
}
impl TextPredictionHeadTransform {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self {
dense,
transform_act_fn: cfg.hidden_act,
layer_norm,
})
}
}
impl Module for TextPredictionHeadTransform {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)?
.apply(&self.transform_act_fn)?
.apply(&self.layer_norm)
}
}
#[derive(Debug, Clone)]
struct TextLMPredictionHead {
transform: TextPredictionHeadTransform,
decoder: Linear,
}
impl TextLMPredictionHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let transform = TextPredictionHeadTransform::new(cfg, vb.pp("transform"))?;
let weight = vb.get((cfg.vocab_size, cfg.hidden_size), "decoder.weight")?;
let bias = vb.get(cfg.vocab_size, "bias")?;
let decoder = Linear::from_weights(weight, Some(bias));
Ok(Self { transform, decoder })
}
}
impl Module for TextLMPredictionHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.transform)?.apply(&self.decoder)
}
}
#[derive(Debug, Clone)]
struct TextOnlyMLMHead {
predictions: TextLMPredictionHead,
}
impl TextOnlyMLMHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let predictions = TextLMPredictionHead::new(cfg, vb.pp("predictions"))?;
Ok(Self { predictions })
}
}
impl Module for TextOnlyMLMHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.predictions.forward(xs)
}
}
#[derive(Debug, Clone)]
struct TextModel {
embeddings: TextEmbeddings,
encoder: TextEncoder,
past_kv_len: usize,
// We do not need the pooler for caption generation
}
impl TextModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embeddings = TextEmbeddings::new(cfg, vb.pp("embeddings"))?;
let encoder = TextEncoder::new(cfg, vb.pp("encoder"))?;
Ok(Self {
embeddings,
encoder,
past_kv_len: 0,
})
}
fn forward(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
let (_b_sz, seq_len) = input_ids.dims2()?;
let embedding_output = self.embeddings.forward(input_ids, self.past_kv_len)?;
let sequence_output =
self.encoder
.forward(&embedding_output, encoder_hidden_states, attention_mask)?;
self.past_kv_len += seq_len;
// We're interested in the sequence-output rather than the pooled-output.
Ok(sequence_output)
}
fn reset_kv_cache(&mut self) {
self.past_kv_len = 0;
self.encoder.reset_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct TextLMHeadModel {
bert: TextModel,
cls: TextOnlyMLMHead,
}
impl TextLMHeadModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let bert = TextModel::new(cfg, vb.pp("bert"))?;
let cls = TextOnlyMLMHead::new(cfg, vb.pp("cls"))?;
Ok(Self { bert, cls })
}
pub fn forward(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: &Tensor,
) -> Result<Tensor> {
let seq_len = input_ids.dim(1)?;
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_vec(mask, (seq_len, seq_len), input_ids.device())?;
let sequence_output = self.bert.forward(input_ids, encoder_hidden_states, &mask)?;
let prediction_scores = self.cls.forward(&sequence_output)?;
// return_logits is false so we don't discard the last sequence element.
Ok(prediction_scores)
}
pub fn reset_kv_cache(&mut self) {
self.bert.reset_kv_cache()
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/phi.rs | candle-transformers/src/models/phi.rs | //! Microsoft Phi model implementation
//!
//! The Phi series are decoder-only transformers designed for code and language tasks.
//!
//! Key characteristics:
//! - Decoder-only transformer architecture
//! - RoPE embeddings
//! - Layer normalization
//! - QK normalization
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-phi1-phi2-wasm-demo)
//! - 🤗 [HF Link](https://huggingface.co/microsoft/phi-2)
//!
use crate::models::with_tracing::{layer_norm, linear, Embedding, LayerNorm, Linear};
/// Phi model.
/// https://huggingface.co/microsoft/phi-2
/// There is an alternative implementation of the phi model in mixformers.rs.
/// This corresponds to the model update made with the following commit:
/// https://huggingface.co/microsoft/phi-2/commit/cb2f4533604d8b67de604e7df03bfe6f3ca22869
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use serde::Deserialize;
// https://huggingface.co/microsoft/phi-2/blob/main/configuration_phi.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: Option<usize>,
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) layer_norm_eps: f64,
pub(crate) tie_word_embeddings: bool,
pub(crate) rope_theta: f32,
pub(crate) partial_rotary_factor: f64,
pub(crate) qk_layernorm: bool,
}
impl Config {
fn num_key_value_heads(&self) -> usize {
self.num_key_value_heads.unwrap_or(self.num_attention_heads)
}
fn head_dim(&self) -> usize {
self.hidden_size / self.num_attention_heads
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
dim: usize,
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(cfg: &Config, dev: &Device) -> Result<Self> {
let dim = (cfg.partial_rotary_factor * cfg.head_dim() as f64) as usize;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, cfg.max_position_embeddings as u32, dev)?
.to_dtype(DType::F32)?
.reshape((cfg.max_position_embeddings, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
dim,
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (_b_size, _num_heads, seq_len, _headdim) = xs.dims4()?;
let xs_rot = xs.i((.., .., .., ..self.dim))?.contiguous()?;
let xs_pass = xs.i((.., .., .., self.dim..))?;
let c = self.cos.narrow(0, seqlen_offset, seq_len)?;
let s = self.sin.narrow(0, seqlen_offset, seq_len)?;
let xs_rot = candle_nn::rotary_emb::rope(&xs_rot, &c, &s)?;
Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1)
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
fc1: Linear,
fc2: Linear,
act: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?;
let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?;
Ok(Self {
fc1,
fc2,
// This does not match the mixformers implementation where Gelu is used rather than
// GeluNew.
act: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
dense: Linear,
kv_cache: Option<(Tensor, Tensor)>,
q_layernorm: Option<LayerNorm>,
k_layernorm: Option<LayerNorm>,
rotary_emb: RotaryEmbedding,
softmax_scale: f64,
num_heads: usize,
num_kv_heads: usize,
head_dim: usize,
span: tracing::Span,
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads();
let head_dim = cfg.head_dim();
let q_proj = linear(cfg.hidden_size, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(cfg.hidden_size, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(cfg.hidden_size, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let dense = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("dense"))?;
// Alternative rope scalings are not supported.
let rotary_emb = RotaryEmbedding::new(cfg, vb.device())?;
let (q_layernorm, k_layernorm) = if cfg.qk_layernorm {
let q_layernorm = layer_norm(head_dim, cfg.layer_norm_eps, vb.pp("q_layernorm"))?;
let k_layernorm = layer_norm(head_dim, cfg.layer_norm_eps, vb.pp("k_layernorm"))?;
(Some(q_layernorm), Some(k_layernorm))
} else {
(None, None)
};
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
Ok(Self {
q_proj,
k_proj,
v_proj,
dense,
kv_cache: None,
q_layernorm,
k_layernorm,
rotary_emb,
softmax_scale,
num_heads,
num_kv_heads,
head_dim,
span: tracing::span!(tracing::Level::TRACE, "attention"),
})
}
fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> {
crate::utils::repeat_kv(xs, self.num_heads / self.num_kv_heads)
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = match &self.q_layernorm {
None => query_states,
Some(ln) => query_states.apply(ln)?,
};
let key_states = match &self.k_layernorm {
None => key_states,
Some(ln) => key_states.apply(ln)?,
};
let query_states = query_states
.reshape((b_size, seq_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_size, seq_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_size, seq_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
// Rotary embeddings.
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(2)?,
};
let query_states = self
.rotary_emb
.apply_rotary_emb(&query_states, seqlen_offset)?;
let key_states = self
.rotary_emb
.apply_rotary_emb(&key_states, seqlen_offset)?;
// KV cache.
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key_states], 2)?;
let v = Tensor::cat(&[prev_v, &value_states], 2)?;
(k, v)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
// Repeat kv.
let key_states = self.repeat_kv(key_states)?.contiguous()?;
let value_states = self.repeat_kv(value_states)?.contiguous()?;
let attn_weights = (query_states
.to_dtype(DType::F32)?
.contiguous()?
.matmul(&key_states.to_dtype(DType::F32)?.t()?)?
* self.softmax_scale)?;
let attn_weights = match mask {
None => attn_weights,
Some(mask) => masked_fill(
&attn_weights,
&mask.broadcast_left((b_size, self.num_heads))?,
f32::NEG_INFINITY,
)?,
};
let attn_weights =
candle_nn::ops::softmax_last_dim(&attn_weights)?.to_dtype(value_states.dtype())?;
let attn_output = attn_weights.matmul(&value_states)?;
let attn_output = attn_output
.transpose(1, 2)?
.reshape((b_size, seq_len, ()))?;
attn_output.apply(&self.dense)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: LayerNorm,
span: tracing::Span,
}
impl DecoderLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb.pp("input_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = xs;
let xs = xs.apply(&self.input_layernorm)?;
let attn_outputs = self.self_attn.forward(&xs, mask)?;
let feed_forward_hidden_states = self.mlp.forward(&xs)?;
attn_outputs + feed_forward_hidden_states + residual
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Clone)]
pub struct Model {
embed_tokens: Embedding,
layers: Vec<DecoderLayer>,
final_layernorm: LayerNorm,
lm_head: Linear,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let final_layernorm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb_m.pp("final_layernorm"),
)?;
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_m = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(cfg, vb_m.pp(layer_idx))?;
layers.push(layer)
}
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
final_layernorm,
lm_head,
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embed_tokens)?;
let mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, mask.as_ref())?;
}
xs.apply(&self.final_layernorm)?
.narrow(1, seq_len - 1, 1)?
.apply(&self.lm_head)?
.squeeze(1)
}
pub fn clear_kv_cache(&mut self) {
self.layers.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mixformer.rs | candle-transformers/src/models/mixformer.rs | //! MixFormer (Microsoft's Phi Architecture)
//!
//! See "Textbooks Are All You Need II: phi-1.5 technical report", Lin et al. 2023
//! - [Arxiv](https://arxiv.org/abs/2309.05463)
//! - [GitHub](https://huggingface.co/microsoft/phi-1_5)
//!
use crate::models::with_tracing::{linear, Embedding as E, Linear};
/// MixFormer model.
/// https://huggingface.co/microsoft/phi-1_5
/// https://arxiv.org/abs/2309.05463
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use serde::Deserialize;
const MAX_SEQ_LEN: usize = 4096;
// https://huggingface.co/microsoft/phi-1_5/blob/d38e6f954ec29b96fe2cf033937dad64e279b5d9/configuration_mixformer_sequential.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) n_positions: usize,
pub(crate) n_embd: usize,
pub(crate) n_layer: usize,
pub(crate) n_inner: Option<usize>,
pub(crate) n_head: usize,
pub(crate) rotary_dim: usize,
pub(crate) activation_function: Activation,
pub(crate) layer_norm_epsilon: f64,
pub(crate) tie_word_embeddings: bool,
pub(crate) pad_vocab_size_multiple: usize,
}
impl Config {
pub fn v1() -> Self {
Self {
vocab_size: 50304,
n_positions: 2048,
n_embd: 1024,
n_layer: 20,
n_inner: None,
n_head: 16,
rotary_dim: usize::min(32, 1024 / 16),
activation_function: Activation::Gelu,
layer_norm_epsilon: 1e-5,
tie_word_embeddings: false,
pad_vocab_size_multiple: 64,
}
}
pub fn v1_5() -> Self {
Self {
vocab_size: 51200,
n_positions: 2048,
n_embd: 2048,
n_layer: 24,
n_inner: None,
n_head: 32,
rotary_dim: usize::min(32, 2048 / 32),
activation_function: Activation::Gelu,
layer_norm_epsilon: 1e-5,
tie_word_embeddings: false,
pad_vocab_size_multiple: 64,
}
}
pub fn v2() -> Self {
Self {
vocab_size: 51200,
n_positions: 2048,
n_embd: 2560,
n_layer: 32,
n_inner: None,
n_head: 32,
rotary_dim: usize::min(32, 2560 / 32),
activation_function: Activation::Gelu,
layer_norm_epsilon: 1e-5,
tie_word_embeddings: false,
pad_vocab_size_multiple: 64,
}
}
// https://huggingface.co/teknium/Puffin-Phi-v2/blob/main/config.json
pub fn puffin_phi_v2() -> Self {
Self {
vocab_size: 50304,
n_positions: 2048,
n_embd: 2048,
n_layer: 24,
n_inner: None,
n_head: 32,
rotary_dim: usize::min(32, 2048 / 32),
activation_function: Activation::Gelu,
layer_norm_epsilon: 1e-5,
tie_word_embeddings: false,
pad_vocab_size_multiple: 64,
}
}
// https://huggingface.co/teknium/Phi-Hermes-1.3B/blob/main/config.json
pub fn phi_hermes_1_3b() -> Self {
Self {
vocab_size: 50304,
n_positions: 2048,
n_embd: 2048,
n_layer: 24,
n_inner: None,
n_head: 32,
rotary_dim: usize::min(32, 2048 / 32),
activation_function: Activation::NewGelu,
layer_norm_epsilon: 1e-5,
tie_word_embeddings: false,
pad_vocab_size_multiple: 64,
}
}
}
#[derive(Debug, Clone)]
struct Embedding {
wte: E,
}
impl Embedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let wte = E::new(cfg.vocab_size, cfg.n_embd, vb.pp("wte"))?;
Ok(Self { wte })
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.wte.forward(xs)
}
}
fn get_mask(size: usize, dtype: DType, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| if j > i { f32::NEG_INFINITY } else { 0. }))
.collect();
Tensor::from_slice(&mask, (size, size), device)?.to_dtype(dtype)
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dim: usize, max_seq_len: usize, dtype: DType, dev: &Device) -> Result<Self> {
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?.to_dtype(dtype)?,
cos: freqs.cos()?.to_dtype(dtype)?,
})
}
fn apply_rotary_emb_qkv(
&self,
qkv: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor, Tensor)> {
let (_b_size, seqlen, three, _, _headdim) = qkv.dims5()?;
if three != 3 {
candle::bail!("unexpected shape for qkv {:?}", qkv.shape())
}
let (_rotary_seqlen, rotary_dim) = self.cos.dims2()?;
let rotary_dim = rotary_dim * 2;
let q_rot = qkv.i((.., .., 0, .., ..rotary_dim))?.contiguous()?;
let q_pass = qkv.i((.., .., 0, .., rotary_dim..))?;
let k_rot = qkv.i((.., .., 1, .., ..rotary_dim))?.contiguous()?;
let k_pass = qkv.i((.., .., 1, .., rotary_dim..))?;
let c = self.cos.narrow(0, seqlen_offset, seqlen)?;
let s = self.sin.narrow(0, seqlen_offset, seqlen)?;
let q_rot = candle_nn::rotary_emb::rope_thd(&q_rot, &c, &s)?;
let k_rot = candle_nn::rotary_emb::rope_thd(&k_rot, &c, &s)?;
let q = Tensor::cat(&[&q_rot, &q_pass], D::Minus1)?;
let k = Tensor::cat(&[&k_rot, &k_pass], D::Minus1)?;
let v = qkv.i((.., .., 2))?;
Ok((q, k, v))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
fc1: Linear,
fc2: Linear,
act: Activation,
span: tracing::Span,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_inner = cfg.n_inner.unwrap_or(4 * cfg.n_embd);
let fc1 = linear(cfg.n_embd, n_inner, vb.pp("fc1"))?;
let fc2 = linear(n_inner, cfg.n_embd, vb.pp("fc2"))?;
Ok(Self {
fc1,
fc2,
act: cfg.activation_function,
span: tracing::span!(tracing::Level::TRACE, "mlp"),
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct CausalLMHead {
ln: candle_nn::LayerNorm,
linear: Linear,
}
impl CausalLMHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?;
let linear = linear(cfg.n_embd, cfg.vocab_size, vb.pp("linear"))?;
Ok(Self { ln, linear })
}
}
impl Module for CausalLMHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.ln)?
.apply(&self.linear)?
.to_dtype(DType::F32)
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MHA {
wqkv: Linear,
out_proj: Linear,
rotary_emb: RotaryEmbedding,
kv_cache: Option<(Tensor, Tensor)>,
head_dim: usize,
softmax_scale: f64,
span: tracing::Span,
span_rope: tracing::Span,
span_mask: tracing::Span,
span_softmax: tracing::Span,
}
impl MHA {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.n_embd / cfg.n_head;
let op_size = cfg.n_embd;
let wqkv = linear(cfg.n_embd, 3 * op_size, vb.pp("Wqkv"))?;
let out_proj = linear(op_size, cfg.n_embd, vb.pp("out_proj"))?;
let rotary_emb =
RotaryEmbedding::new(cfg.rotary_dim, MAX_SEQ_LEN, vb.dtype(), vb.device())?;
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
Ok(Self {
wqkv,
out_proj,
head_dim,
kv_cache: None,
rotary_emb,
softmax_scale,
span: tracing::span!(tracing::Level::TRACE, "mha"),
span_rope: tracing::span!(tracing::Level::TRACE, "rope"),
span_mask: tracing::span!(tracing::Level::TRACE, "mask"),
span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self
.wqkv
.forward(xs)?
.reshape((b_size, seq_len, 3, (), self.head_dim))?;
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(1)?,
};
// In the python implementation, a single tensor is returned with the third axis of size 3.
let (q, k, v) = {
let _enter = self.span_rope.enter();
self.rotary_emb.apply_rotary_emb_qkv(&qkv, seqlen_offset)?
};
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 1)?;
let v = Tensor::cat(&[prev_v, &v], 1)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
// scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
let q = q.transpose(1, 2)?.flatten_to(1)?; // b*h, t, d
let k = k.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d
let v = v.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d
let attn_weights = (q.matmul(&k.t()?)? * self.softmax_scale)?; // b*h, t, s
// causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=scores.device), 1)
// scores = scores + causal_mask.to(dtype=scores.dtype)
let attn_weights = match mask {
None => attn_weights,
Some(mask) => {
let _enter = self.span_mask.enter();
attn_weights.broadcast_add(mask)?
}
};
let attn_weights = {
let _enter = self.span_softmax.enter();
candle_nn::ops::softmax_last_dim(&attn_weights)?
};
// output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
// attn_weights: b*h,t,s, v: b*h,s,d
let attn_output = attn_weights.matmul(&v)?;
// b*h,t,d
let attn_output = attn_output
.reshape((b_size, (), seq_len, self.head_dim))?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
attn_output.apply(&self.out_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct ParallelBlock {
ln: candle_nn::LayerNorm,
mixer: MHA,
mlp: MLP,
span: tracing::Span,
}
impl ParallelBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?;
let mixer = MHA::new(cfg, vb.pp("mixer"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
ln,
mixer,
mlp,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = xs;
let xs = xs.apply(&self.ln)?;
let attn_outputs = self.mixer.forward(&xs, mask)?;
let feed_forward_hidden_states = self.mlp.forward(&xs)?;
attn_outputs + feed_forward_hidden_states + residual
}
fn clear_kv_cache(&mut self) {
self.mixer.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct MixFormerSequentialForCausalLM {
embedding: Embedding,
blocks: Vec<ParallelBlock>,
head: CausalLMHead,
span: tracing::Span,
}
impl MixFormerSequentialForCausalLM {
pub fn new_v2(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_head = vb.pp("lm_head");
let vb = vb.pp("transformer");
let embedding = Embedding::new(cfg, vb.pp("embd"))?;
let mut blocks = Vec::new();
for i in 0..cfg.n_layer {
let block = ParallelBlock::new(cfg, vb.pp("h").pp(i))?;
blocks.push(block)
}
let head = CausalLMHead::new(cfg, vb_head)?;
Ok(Self {
embedding,
blocks,
head,
span: tracing::span!(tracing::Level::TRACE, "mixformer"),
})
}
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("layers");
let embedding = Embedding::new(cfg, vb.pp(0))?;
let mut blocks = Vec::new();
for i in 0..cfg.n_layer {
let block = ParallelBlock::new(cfg, vb.pp(i + 1))?;
blocks.push(block)
}
let head = CausalLMHead::new(cfg, vb.pp(cfg.n_layer + 1))?;
Ok(Self {
embedding,
blocks,
head,
span: tracing::span!(tracing::Level::TRACE, "mixformer"),
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embedding)?;
let mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.dtype(), xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?
}
xs.narrow(1, seq_len - 1, 1)?.apply(&self.head)?.squeeze(1)
}
pub fn forward_with_img(
&mut self,
bos_token: &Tensor,
xs: &Tensor,
img_embeds: &Tensor,
) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = xs.apply(&self.embedding)?;
let bos_token = bos_token.apply(&self.embedding)?;
// Python implementation sequence order is <bos token embedding><img embedding><rest of text embedding>
// https://github.com/vikhyat/moondream/blob/a9d788a20d1543fb1479edc54106e88cff7759d3/moondream/moondream.py#L43-L56
let mut xs = Tensor::cat(&[bos_token, img_embeds.clone(), xs], 1)?;
let (_b_size, seq_len, _embds) = xs.dims3()?;
let mask = Some(get_mask(seq_len, xs.dtype(), xs.device())?);
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?
}
let xs = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.head)?
.squeeze(1)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/siglip.rs | candle-transformers/src/models/siglip.rs | //! Siglip model implementation.
//!
//! Siglip architecture combining vision and language for zero-shot tasks.
//!
//! References:
//! - 🤗 [Model Card](https://huggingface.co/google/siglip-base-patch16-224)
//!
use crate::models::clip::div_l2_norm;
use candle::{IndexOp, Module, Result, Tensor, D};
use candle_nn::{layer_norm, linear, LayerNorm, Linear, VarBuilder};
fn default_text_vocab_size() -> usize {
32000
}
fn default_text_hidden_size() -> usize {
768
}
fn default_text_intermediate_size() -> usize {
3072
}
fn default_text_num_hidden_layers() -> usize {
12
}
fn default_text_num_attention_heads() -> usize {
12
}
fn default_text_max_position_embeddings() -> usize {
64
}
fn default_text_layer_norm_eps() -> f64 {
1e-6
}
fn default_text_pad_token_id() -> u32 {
1
}
fn default_text_bos_token_id() -> u32 {
49406
}
fn default_text_eos_token_id() -> u32 {
49407
}
fn default_text_hidden_act() -> candle_nn::Activation {
candle_nn::Activation::GeluPytorchTanh
}
// https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/configuration_siglip.py#L27
#[derive(serde::Deserialize, Clone, Debug)]
pub struct TextConfig {
#[serde(default = "default_text_vocab_size")]
pub vocab_size: usize,
#[serde(default = "default_text_hidden_size")]
pub hidden_size: usize,
#[serde(default = "default_text_intermediate_size")]
pub intermediate_size: usize,
#[serde(default = "default_text_num_hidden_layers")]
pub num_hidden_layers: usize,
#[serde(default = "default_text_num_attention_heads")]
pub num_attention_heads: usize,
#[serde(default = "default_text_max_position_embeddings")]
pub max_position_embeddings: usize,
#[serde(default = "default_text_hidden_act")]
pub hidden_act: candle_nn::Activation,
#[serde(default = "default_text_layer_norm_eps")]
pub layer_norm_eps: f64,
#[serde(default = "default_text_pad_token_id")]
pub pad_token_id: u32,
#[serde(default = "default_text_bos_token_id")]
pub bos_token_id: u32,
#[serde(default = "default_text_eos_token_id")]
pub eos_token_id: u32,
}
fn default_vision_hidden_size() -> usize {
768
}
fn default_vision_intermediate_size() -> usize {
3072
}
fn default_vision_num_hidden_layers() -> usize {
12
}
fn default_vision_num_attention_heads() -> usize {
12
}
fn default_vision_num_channels() -> usize {
3
}
fn default_vision_image_size() -> usize {
224
}
fn default_vision_batch_size() -> usize {
16
}
fn default_vision_layer_norm_eps() -> f64 {
1e-6
}
fn default_vision_hidden_act() -> candle_nn::Activation {
candle_nn::Activation::GeluPytorchTanh
}
// https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/configuration_siglip.py#L132
#[derive(serde::Deserialize, Clone, Debug)]
pub struct VisionConfig {
#[serde(default = "default_vision_hidden_size")]
pub hidden_size: usize,
#[serde(default = "default_vision_intermediate_size")]
pub intermediate_size: usize,
#[serde(default = "default_vision_num_hidden_layers")]
pub num_hidden_layers: usize,
#[serde(default = "default_vision_num_attention_heads")]
pub num_attention_heads: usize,
#[serde(default = "default_vision_num_channels")]
pub num_channels: usize,
#[serde(default = "default_vision_image_size")]
pub image_size: usize,
#[serde(default = "default_vision_batch_size")]
pub patch_size: usize,
#[serde(default = "default_vision_hidden_act")]
pub hidden_act: candle_nn::Activation,
#[serde(default = "default_vision_layer_norm_eps")]
pub layer_norm_eps: f64,
}
trait TransformerConfig {
fn hidden_size(&self) -> usize;
fn intermediate_size(&self) -> usize;
fn num_attention_heads(&self) -> usize;
fn num_hidden_layers(&self) -> usize;
fn layer_norm_eps(&self) -> f64;
fn hidden_act(&self) -> candle_nn::Activation;
}
impl TransformerConfig for TextConfig {
fn hidden_size(&self) -> usize {
self.hidden_size
}
fn intermediate_size(&self) -> usize {
self.intermediate_size
}
fn num_attention_heads(&self) -> usize {
self.num_attention_heads
}
fn num_hidden_layers(&self) -> usize {
self.num_hidden_layers
}
fn layer_norm_eps(&self) -> f64 {
self.layer_norm_eps
}
fn hidden_act(&self) -> candle_nn::Activation {
self.hidden_act
}
}
impl TransformerConfig for VisionConfig {
fn hidden_size(&self) -> usize {
self.hidden_size
}
fn intermediate_size(&self) -> usize {
self.intermediate_size
}
fn num_attention_heads(&self) -> usize {
self.num_attention_heads
}
fn num_hidden_layers(&self) -> usize {
self.num_hidden_layers
}
fn layer_norm_eps(&self) -> f64 {
self.layer_norm_eps
}
fn hidden_act(&self) -> candle_nn::Activation {
self.hidden_act
}
}
impl VisionConfig {
pub fn paligemma_3b_224() -> Self {
Self {
// https://huggingface.co/google/paligemma-3b-pt-224/blob/main/config.json
patch_size: 14,
num_attention_heads: 16,
num_hidden_layers: 27,
hidden_size: 1152,
intermediate_size: 4304,
image_size: 224, // num_image_tokens: (224 / 14)^2 = 256
// Default values.
num_channels: 3,
hidden_act: candle_nn::Activation::GeluPytorchTanh,
layer_norm_eps: 1e-6,
}
}
pub fn paligemma_3b_448() -> Self {
Self {
// https://huggingface.co/google/paligemma-3b-pt-448/blob/main/config.json
patch_size: 14,
num_attention_heads: 16,
num_hidden_layers: 27,
hidden_size: 1152,
intermediate_size: 4304,
image_size: 448, // num_image_tokens: (448 / 14)^2 = 1024
// Default values.
num_channels: 3,
hidden_act: candle_nn::Activation::GeluPytorchTanh,
layer_norm_eps: 1e-6,
}
}
pub fn paligemma_3b_896() -> Self {
Self {
// https://huggingface.co/google/paligemma-3b-pt-448/blob/main/config.json
patch_size: 14,
num_attention_heads: 16,
num_hidden_layers: 27,
hidden_size: 1152,
intermediate_size: 4304,
image_size: 896, // num_image_tokens: (896 / 14)^2 = 4096
// Default values.
num_channels: 3,
hidden_act: candle_nn::Activation::GeluPytorchTanh,
layer_norm_eps: 1e-6,
}
}
pub fn num_patches(&self) -> usize {
(self.image_size / self.patch_size).pow(2)
}
}
// https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/configuration_siglip.py#L228
#[derive(serde::Deserialize, Clone, Debug)]
pub struct Config {
pub text_config: TextConfig,
pub vision_config: VisionConfig,
}
impl Config {
pub fn base_patch16_224() -> Self {
let text_config = TextConfig {
// https://huggingface.co/google/siglip-base-patch16-224/blob/main/config.json
hidden_size: 768,
intermediate_size: 3072,
num_attention_heads: 12,
vocab_size: 32000,
// Default values.
pad_token_id: 1,
bos_token_id: 49406,
eos_token_id: 49407,
layer_norm_eps: 1e-6,
hidden_act: candle_nn::Activation::GeluPytorchTanh,
max_position_embeddings: 64,
num_hidden_layers: 12,
};
let vision_config = VisionConfig {
patch_size: 16,
// Default values.
hidden_size: 768,
intermediate_size: 3072,
num_hidden_layers: 12,
num_attention_heads: 12,
num_channels: 3,
image_size: 224,
hidden_act: candle_nn::Activation::GeluPytorchTanh,
layer_norm_eps: 1e-6,
};
Self {
text_config,
vision_config,
}
}
}
#[derive(Clone, Debug)]
struct MultiheadAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
num_heads: usize,
}
impl MultiheadAttention {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let w_in_proj = vb.get((3 * h, h), "in_proj_weight")?.chunk(3, 0)?;
let b_in_proj = vb.get(3 * h, "in_proj_bias")?.chunk(3, 0)?;
let q_proj = Linear::new(w_in_proj[0].clone(), Some(b_in_proj[0].clone()));
let k_proj = Linear::new(w_in_proj[1].clone(), Some(b_in_proj[1].clone()));
let v_proj = Linear::new(w_in_proj[2].clone(), Some(b_in_proj[2].clone()));
let out_proj = linear(h, h, vb.pp("out_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
num_heads,
})
}
fn separate_heads(&self, x: &Tensor) -> Result<Tensor> {
let (b, n, c) = x.dims3()?;
x.reshape((b, n, self.num_heads, c / self.num_heads))?
.transpose(1, 2)?
.contiguous()
}
fn recombine_heads(&self, x: &Tensor) -> Result<Tensor> {
let (b, n_heads, n_tokens, c_per_head) = x.dims4()?;
x.transpose(1, 2)?
.reshape((b, n_tokens, n_heads * c_per_head))
}
fn forward(&self, q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let q = self.q_proj.forward(&q.contiguous()?)?;
let k = self.k_proj.forward(&k.contiguous()?)?;
let v = self.v_proj.forward(&v.contiguous()?)?;
let q = self.separate_heads(&q)?;
let k = self.separate_heads(&k)?;
let v = self.separate_heads(&v)?;
let (_, _, _, c_per_head) = q.dims4()?;
let attn = (q.matmul(&k.t()?)? / (c_per_head as f64).sqrt())?;
let attn = candle_nn::ops::softmax_last_dim(&attn)?;
let out = attn.matmul(&v)?;
self.recombine_heads(&out)?.apply(&self.out_proj)
}
}
#[derive(Debug, Clone)]
struct MultiheadAttentionPoolingHead {
probe: Tensor,
attention: MultiheadAttention,
layernorm: LayerNorm,
mlp: Mlp,
}
impl MultiheadAttentionPoolingHead {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layernorm"))?;
let probe = vb.get((1, 1, cfg.hidden_size), "probe")?;
let attention = MultiheadAttention::new(cfg, vb.pp("attention"))?;
Ok(Self {
probe,
attention,
layernorm,
mlp,
})
}
}
impl Module for MultiheadAttentionPoolingHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let batch_size = xs.dim(0)?;
let probe = self.probe.repeat((batch_size, 1, 1))?;
let xs = self.attention.forward(&probe, xs, xs)?;
let residual = &xs;
let xs = xs.apply(&self.layernorm)?.apply(&self.mlp)?;
(xs + residual)?.i((.., 0))
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
num_heads: usize,
head_dim: usize,
scale: f64,
}
impl Attention {
fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.hidden_size();
let q_proj = linear(embed_dim, embed_dim, vb.pp("q_proj"))?;
let k_proj = linear(embed_dim, embed_dim, vb.pp("k_proj"))?;
let v_proj = linear(embed_dim, embed_dim, vb.pp("v_proj"))?;
let out_proj = linear(embed_dim, embed_dim, vb.pp("out_proj"))?;
let num_heads = cfg.num_attention_heads();
let head_dim = embed_dim / num_heads;
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
num_heads,
head_dim,
scale: (head_dim as f64).powf(-0.5),
})
}
fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let (batch_size, q_len, _) = xs.dims3()?;
let query_states = xs.apply(&self.q_proj)?;
let key_states = xs.apply(&self.k_proj)?;
let value_states = xs.apply(&self.v_proj)?;
let shape = (batch_size, q_len, self.num_heads, self.head_dim);
let query_states = query_states.reshape(shape)?.transpose(1, 2)?.contiguous()?;
let key_states = key_states.reshape(shape)?.transpose(1, 2)?.contiguous()?;
let value_states = value_states.reshape(shape)?.transpose(1, 2)?.contiguous()?;
let attn_weights = (query_states.matmul(&key_states.t()?)? * self.scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
// The original implementation upcasts to f32 but candle_nn::ops::softmax should handle this properly.
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_outputs = attn_weights
.matmul(&value_states)?
.transpose(1, 2)?
.reshape((batch_size, q_len, ()))?
.apply(&self.out_proj)?;
Ok(attn_outputs)
}
}
// https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/modeling_siglip.py#L599
#[derive(Debug, Clone)]
struct Mlp {
fc1: Linear,
fc2: Linear,
activation_fn: candle_nn::Activation,
}
impl Mlp {
fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size();
let intermediate_size = cfg.intermediate_size();
let fc1 = candle_nn::linear(hidden_size, intermediate_size, vb.pp("fc1"))?;
let fc2 = candle_nn::linear(intermediate_size, hidden_size, vb.pp("fc2"))?;
Ok(Self {
fc1,
fc2,
activation_fn: cfg.hidden_act(),
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &candle::Tensor) -> Result<candle::Tensor> {
xs.apply(&self.fc1)?
.apply(&self.activation_fn)?
.apply(&self.fc2)
}
}
// https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/modeling_siglip.py#L614
#[derive(Debug, Clone)]
struct EncoderLayer {
self_attn: Attention,
layer_norm1: LayerNorm,
mlp: Mlp,
layer_norm2: LayerNorm,
}
impl EncoderLayer {
fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size();
let layer_norm_eps = cfg.layer_norm_eps();
let self_attn = Attention::new(cfg, vb.pp("self_attn"))?;
let layer_norm1 = layer_norm(hidden_size, layer_norm_eps, vb.pp("layer_norm1"))?;
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
let layer_norm2 = layer_norm(hidden_size, layer_norm_eps, vb.pp("layer_norm2"))?;
Ok(Self {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.layer_norm1)?;
let xs = self.self_attn.forward(&xs, attention_mask)?;
let xs = (residual + xs)?;
let residual = &xs;
let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?;
let xs = (xs + residual)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct Encoder {
layers: Vec<EncoderLayer>,
}
impl Encoder {
fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> {
let mut layers = vec![];
let vb = vb.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers() {
let layer = EncoderLayer::new(cfg, vb.pp(layer_idx))?;
layers.push(layer)
}
Ok(Self { layers })
}
fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, attention_mask)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct VisionEmbeddings {
patch_embedding: candle_nn::Conv2d,
position_embedding: Tensor,
patch_size: usize,
base_num_patches_per_side: usize,
}
impl VisionEmbeddings {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let conv2d_cfg = candle_nn::Conv2dConfig {
stride: cfg.patch_size,
..Default::default()
};
let patch_embedding = candle_nn::conv2d(
cfg.num_channels,
cfg.hidden_size,
cfg.patch_size,
conv2d_cfg,
vb.pp("patch_embedding"),
)?;
let num_patches_per_side = cfg.image_size / cfg.patch_size;
let embedder = candle_nn::embedding(
num_patches_per_side.pow(2),
cfg.hidden_size(),
vb.pp("position_embedding"),
)?;
let position_embedding = embedder.embeddings();
let position_embedding = position_embedding
.reshape((
1,
num_patches_per_side,
num_patches_per_side,
cfg.hidden_size(),
))?
.permute((0, 3, 1, 2))?;
Ok(Self {
patch_embedding,
position_embedding,
patch_size: cfg.patch_size,
base_num_patches_per_side: num_patches_per_side,
})
}
}
impl Module for VisionEmbeddings {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
//embed tokens
let (_batch, _channels, _height, _width) = xs.dims4()?;
let embeddings = xs.apply(&self.patch_embedding)?;
// interpolate position embeddings for the current image size (if needed)
let num_patches_h = _height / self.patch_size;
let num_patches_w = _width / self.patch_size;
let resized_position_embedding = if num_patches_w == self.base_num_patches_per_side
&& num_patches_h == self.base_num_patches_per_side
{
self.position_embedding.clone()
} else {
self.position_embedding
.interpolate2d(num_patches_h, num_patches_w)?
};
// Add position embeddings to tokens and flatten from 2D patches to 1D sequence
let embeddings = embeddings
.broadcast_add(&resized_position_embedding)?
.flatten_from(2)?
.transpose(1, 2)?;
Ok(embeddings)
}
}
#[derive(Debug, Clone)]
struct VisionTransformer {
embeddings: VisionEmbeddings,
encoder: Encoder,
post_layernorm: LayerNorm,
head: Option<MultiheadAttentionPoolingHead>,
}
impl VisionTransformer {
fn new(cfg: &VisionConfig, use_head: bool, vb: VarBuilder) -> Result<Self> {
let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?;
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let post_layernorm =
layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?;
let head = if use_head {
Some(MultiheadAttentionPoolingHead::new(cfg, vb.pp("head"))?)
} else {
None
};
Ok(Self {
embeddings,
encoder,
post_layernorm,
head,
})
}
}
impl Module for VisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.embeddings)?;
let xs = self.encoder.forward(&xs, None)?;
let xs = xs.apply(&self.post_layernorm)?;
match self.head.as_ref() {
None => Ok(xs),
Some(h) => xs.apply(h),
}
}
}
#[derive(Debug, Clone)]
pub struct VisionModel {
vision_model: VisionTransformer,
}
impl VisionModel {
pub fn new(cfg: &VisionConfig, use_head: bool, vb: VarBuilder) -> Result<Self> {
let vision_model = VisionTransformer::new(cfg, use_head, vb)?;
Ok(Self { vision_model })
}
}
impl Module for VisionModel {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.vision_model)
}
}
#[derive(Debug, Clone)]
struct TextEmbeddings {
token_embedding: candle_nn::Embedding,
position_embedding: candle_nn::Embedding,
position_ids: Tensor,
}
impl TextEmbeddings {
fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> {
let token_embedding =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("token_embedding"))?;
let position_embedding = candle_nn::embedding(
cfg.max_position_embeddings,
cfg.hidden_size,
vb.pp("position_embedding"),
)?;
let position_ids =
Tensor::arange(0u32, cfg.max_position_embeddings as u32, vb.device())?.unsqueeze(0)?;
Ok(Self {
token_embedding,
position_embedding,
position_ids,
})
}
}
impl Module for TextEmbeddings {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let seq_length = input_ids.dim(D::Minus1)?;
let inputs_embeds = self.token_embedding.forward(input_ids)?;
let position_ids = self.position_ids.narrow(1, 0, seq_length)?;
let position_embedding = self.position_embedding.forward(&position_ids)?;
inputs_embeds.broadcast_add(&position_embedding)
}
}
#[derive(Debug, Clone)]
pub struct TextTransformer {
embeddings: TextEmbeddings,
encoder: Encoder,
final_layer_norm: LayerNorm,
pub head: Linear,
}
impl TextTransformer {
fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> {
let embeddings = TextEmbeddings::new(cfg, vb.pp("embeddings"))?;
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let final_layer_norm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb.pp("final_layer_norm"),
)?;
let head = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("head"))?;
Ok(Self {
embeddings,
encoder,
final_layer_norm,
head,
})
}
}
impl Module for TextTransformer {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let (_bsz, seq_len) = input_ids.dims2()?;
let input_ids = self.embeddings.forward(input_ids)?;
let input_ids = self.encoder.forward(&input_ids, None)?;
let last_hidden_state = self.final_layer_norm.forward(&input_ids)?;
last_hidden_state
.i((.., seq_len - 1, ..))?
.contiguous()?
.apply(&self.head)
}
}
#[derive(Debug, Clone)]
pub struct TextModel {
pub text_model: TextTransformer,
}
impl TextModel {
pub fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> {
let text_model = TextTransformer::new(cfg, vb)?;
Ok(Self { text_model })
}
}
impl Module for TextModel {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.text_model)
}
}
#[derive(Clone, Debug)]
pub struct Model {
text_model: TextModel,
vision_model: VisionModel,
logit_bias: Tensor,
logit_scale: Tensor,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let text_model = TextModel::new(&cfg.text_config, vb.pp("text_model"))?;
let vision_model = VisionModel::new(&cfg.vision_config, true, vb.pp("vision_model"))?;
let logit_scale = vb.get(&[1], "logit_scale")?;
let logit_bias = vb.get(&[1], "logit_bias")?;
Ok(Self {
text_model,
vision_model,
logit_bias,
logit_scale,
})
}
pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> {
input_ids.apply(&self.text_model)
}
pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> {
pixel_values.apply(&self.vision_model)
}
pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> {
let image_features = self.get_image_features(pixel_values)?;
let text_features = self.get_text_features(input_ids)?;
let image_features_normalized = div_l2_norm(&image_features)?;
let text_features_normalized = div_l2_norm(&text_features)?;
let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?;
let logit_scale = self.logit_scale.exp()?;
let logits_per_text = logits_per_text
.broadcast_mul(&logit_scale)?
.broadcast_add(&self.logit_bias)?;
let logits_per_image = logits_per_text.t()?;
Ok((logits_per_text, logits_per_image))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/granite.rs | candle-transformers/src/models/granite.rs | //! Granite is a Long Context Transformer Language Model.
//!
//! A high performance transformer model optimized for efficient processing
//! of very long context sequences
use super::with_tracing::{linear_no_bias as linear, Linear, RmsNorm};
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use std::{collections::HashMap, f32::consts::PI};
pub const DEFAULT_MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub enum GraniteRopeType {
#[serde(rename = "granite")]
Granite,
#[default]
#[serde(rename = "default")]
Default,
}
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub struct GraniteRopeConfig {
pub factor: f32,
pub low_freq_factor: f32,
pub high_freq_factor: f32,
pub original_max_position_embeddings: usize,
pub rope_type: GraniteRopeType,
}
#[derive(Debug, Clone, serde::Deserialize)]
#[serde(untagged)]
pub enum GraniteEosToks {
Single(u32),
Multiple(Vec<u32>),
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct GraniteConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: Option<usize>,
pub rms_norm_eps: f64,
#[serde(default = "default_rope")]
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<GraniteEosToks>,
pub rope_scaling: Option<GraniteRopeConfig>,
pub max_position_embeddings: usize,
}
impl GraniteConfig {
pub fn num_key_value_heads(&self) -> usize {
self.num_key_value_heads.unwrap_or(self.num_attention_heads)
}
}
fn default_rope() -> f32 {
10_000.0
}
impl GraniteConfig {
pub fn into_config(self, use_flash_attn: bool) -> Config {
Config {
hidden_size: self.hidden_size,
intermediate_size: self.intermediate_size,
vocab_size: self.vocab_size,
num_hidden_layers: self.num_hidden_layers,
num_attention_heads: self.num_attention_heads,
num_key_value_heads: self.num_key_value_heads(),
rms_norm_eps: self.rms_norm_eps,
rope_theta: self.rope_theta,
use_flash_attn,
bos_token_id: self.bos_token_id,
eos_token_id: self.eos_token_id,
rope_scaling: self.rope_scaling,
max_position_embeddings: self.max_position_embeddings,
}
}
}
#[derive(Debug, Clone)]
pub struct Config {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub use_flash_attn: bool,
pub rms_norm_eps: f64,
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<GraniteEosToks>,
pub rope_scaling: Option<GraniteRopeConfig>,
pub max_position_embeddings: usize,
}
#[derive(Debug, Clone)]
pub struct Cache {
masks: HashMap<usize, Tensor>,
pub use_kv_cache: bool,
kvs: Vec<Option<(Tensor, Tensor)>>,
cos: Tensor,
sin: Tensor,
device: Device,
}
fn calculate_default_inv_freq(cfg: &Config) -> Vec<f32> {
let head_dim = cfg.hidden_size / cfg.num_attention_heads;
(0..head_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / head_dim as f32))
.collect()
}
impl Cache {
pub fn new(use_kv_cache: bool, dtype: DType, config: &Config, device: &Device) -> Result<Self> {
// precompute freqs_cis
let theta = match &config.rope_scaling {
None
| Some(GraniteRopeConfig {
rope_type: GraniteRopeType::Default,
..
}) => calculate_default_inv_freq(config),
Some(rope_scaling) => {
let low_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.low_freq_factor;
let high_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.high_freq_factor;
calculate_default_inv_freq(config)
.into_iter()
.map(|freq| {
let wavelen = 2. * PI / freq;
if wavelen < high_freq_wavelen {
freq
} else if wavelen > low_freq_wavelen {
freq / rope_scaling.factor
} else {
let smooth = (rope_scaling.original_max_position_embeddings as f32
/ wavelen
- rope_scaling.low_freq_factor)
/ (rope_scaling.high_freq_factor - rope_scaling.low_freq_factor);
(1. - smooth) * freq / rope_scaling.factor + smooth * freq
}
})
.collect::<Vec<_>>()
}
};
let theta = Tensor::new(theta, device)?;
let idx_theta = Tensor::arange(0, config.max_position_embeddings as u32, device)?
.to_dtype(DType::F32)?
.reshape((config.max_position_embeddings, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?.to_dtype(dtype)?;
let sin = idx_theta.sin()?.to_dtype(dtype)?;
Ok(Self {
masks: HashMap::new(),
use_kv_cache,
kvs: vec![None; config.num_hidden_layers],
device: device.clone(),
cos,
sin,
})
}
fn mask(&mut self, t: usize) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), &self.device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
}
#[derive(Debug, Clone)]
struct CausalSelfAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: usize,
use_flash_attn: bool,
span: tracing::Span,
span_rot: tracing::Span,
max_position_embeddings: usize,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl CausalSelfAttention {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _, seq_len, _hidden_size) = x.dims4()?;
let cos = cache.cos.narrow(0, index_pos, seq_len)?;
let sin = cache.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(x, &cos, &sin)
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seq_len, hidden_size) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let mut v = v
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let q = self.apply_rotary_emb(&q, index_pos, cache)?;
let mut k = self.apply_rotary_emb(&k, index_pos, cache)?;
if cache.use_kv_cache {
if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] {
k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?;
let k_seq_len = k.dims()[1];
if k_seq_len > self.max_position_embeddings {
k = k
.narrow(
D::Minus1,
k_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
let v_seq_len = v.dims()[1];
if v_seq_len > 2 * self.max_position_embeddings {
v = v
.narrow(
D::Minus1,
v_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
}
cache.kvs[block_idx] = Some((k.clone(), v.clone()))
}
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let y = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?.transpose(1, 2)?
} else {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if seq_len == 1 {
att
} else {
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
masked_fill(&att, &mask, f32::NEG_INFINITY)?
};
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, hidden_size])?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
crate::utils::repeat_kv(x, self.num_attention_heads / self.num_key_value_heads)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let size_in = cfg.hidden_size;
let size_q = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_attention_heads;
let size_kv = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_key_value_heads;
let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?;
let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?;
let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?;
let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_attention_heads: cfg.num_attention_heads,
num_key_value_heads: cfg.num_key_value_heads,
head_dim: cfg.hidden_size / cfg.num_attention_heads,
use_flash_attn: cfg.use_flash_attn,
span,
span_rot,
max_position_embeddings: cfg.max_position_embeddings,
})
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct Mlp {
c_fc1: Linear,
c_fc2: Linear,
c_proj: Linear,
span: tracing::Span,
}
impl Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let x = (candle_nn::ops::silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?;
self.c_proj.forward(&x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "mlp");
let h_size = cfg.hidden_size;
let i_size = cfg.intermediate_size;
let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?;
let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?;
let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?;
Ok(Self {
c_fc1,
c_fc2,
c_proj,
span,
})
}
}
#[derive(Debug, Clone)]
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
mlp: Mlp,
span: tracing::Span,
}
impl Block {
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = x;
let x = self.rms_1.forward(x)?;
let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?;
let residual = &x;
let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "block");
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?;
let mlp = Mlp::load(vb.pp("mlp"), cfg)?;
let rms_1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let rms_2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
rms_1,
attn,
rms_2,
mlp,
span,
})
}
}
#[derive(Debug, Clone)]
pub struct Granite {
wte: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
lm_head: Linear,
}
impl Granite {
pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mut x = self.wte.forward(x)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?.contiguous()?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wte = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.num_hidden_layers)
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cfg).unwrap())
.collect();
Ok(Self {
wte,
blocks,
ln_f,
lm_head,
})
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_phi.rs | candle-transformers/src/models/quantized_phi.rs | //! Phi2 model implementation with quantization support.
//!
//! Phi2 is a 2.7B parameter language model using scaled-up Transformer decoder architecture.
//! This implementation provides quantization for reduced memory and compute usage.
//!
//! Key characteristics:
//! - Partial attention with learned mixing to reduce quadratic costs
//! - Layer reuse for improved inference efficiency
//! - Linear transformations with scalar mixing
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - [Phi2 Paper](https://arxiv.org/abs/2309.05463)
//! - [Model Card](https://huggingface.co/microsoft/phi-2)
//!
use std::collections::HashMap;
use candle::quantized::gguf_file;
use candle::quantized::QTensor;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{Embedding, LayerNorm};
pub const MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone)]
struct QLinear {
inner: candle::quantized::QMatMul,
bias: Tensor,
span: tracing::Span,
}
impl QLinear {
fn new<R: std::io::Read + std::io::Seek>(
ct: &gguf_file::Content,
r: &mut R,
name: &str,
device: &Device,
) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
let w = ct.tensor(r, &format!("{name}.weight"), device)?;
let b = ct.tensor(r, &format!("{name}.bias"), device)?;
let inner = candle::quantized::QMatMul::from_qtensor(w)?;
let bias = b.dequantize(device)?;
Ok(Self { inner, bias, span })
}
}
impl Module for QLinear {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)?.broadcast_add(&self.bias)
}
}
#[derive(Debug, Clone)]
struct Mlp {
ffn_up: QLinear,
ffn_down: QLinear,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.ffn_up)?.gelu()?.apply(&self.ffn_down)
}
}
#[derive(Debug, Clone)]
struct LayerWeights {
attn_qkv: QLinear,
attn_output: QLinear,
attn_norm: LayerNorm,
mlp: Mlp,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
cos: Tensor,
sin: Tensor,
rope_dim: usize,
neg_inf: Tensor,
kv_cache: Option<(Tensor, Tensor)>,
span_attn: tracing::Span,
span_rot: tracing::Span,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> {
let shape = mask.shape();
let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?;
Ok(m)
}
impl LayerWeights {
fn apply_rotary_emb(&self, xs: &Tensor, index_pos: usize) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _n_head, seq_len, _n_embd) = xs.dims4()?;
let xs_rot = xs.i((.., .., .., ..self.rope_dim))?;
let xs_pass = xs.i((.., .., .., self.rope_dim..))?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
let xs_rot = candle_nn::rotary_emb::rope(&xs_rot.contiguous()?, &cos, &sin)?;
Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, n_embd) = x.dims3()?;
let qkv =
self.attn_qkv
.forward(x)?
.reshape((b_sz, seq_len, 3, self.n_head, self.head_dim))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
// This call to contiguous ensures that the fast kernel can be called below. It's
// actually a no-op except when processing the initial prompt so has no significant
// impact on performance.
let v = v.contiguous()?;
let q = self.apply_rotary_emb(&q, index_pos)?.contiguous()?;
let k = self.apply_rotary_emb(&k, index_pos)?;
let (k, v) = match &self.kv_cache {
None => (k.contiguous()?, v.contiguous()?),
Some((k_cache, v_cache)) => {
if index_pos == 0 {
(k.contiguous()?, v.contiguous()?)
} else {
let k = Tensor::cat(&[k_cache, &k], 2)?;
let v = Tensor::cat(&[v_cache, &v], 2)?;
(k.contiguous()?, v.contiguous()?)
}
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = match mask {
None => att,
Some(mask) => {
let mask = mask.broadcast_as(att.shape())?;
masked_fill(&att, &mask, &self.neg_inf)?
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let y = att.matmul(&v.contiguous()?)?;
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.attn_output.forward(&y)?;
Ok(y)
}
}
#[derive(Debug, Clone)]
pub struct ModelWeights {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
output_norm: LayerNorm,
output: QLinear,
masks: HashMap<usize, Tensor>,
span: tracing::Span,
span_output: tracing::Span,
}
fn precomput_freqs_cis(
head_dim: usize,
freq_base: f32,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)?
.to_dtype(DType::F32)?
.reshape((MAX_SEQ_LEN, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok((cos, sin))
}
fn layer_norm(w: QTensor, b: QTensor, eps: f64) -> Result<LayerNorm> {
let w = w.dequantize(&w.device())?;
let b = b.dequantize(&b.device())?;
let ln = LayerNorm::new(w, b, eps);
Ok(ln)
}
impl ModelWeights {
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let md_get = |s: &str| match ct.metadata.get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
// Parameter extraction from metadata.
let head_count = md_get("phi2.attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("phi2.attention.head_count_kv")?.to_u32()? as usize;
let block_count = md_get("phi2.block_count")?.to_u32()? as usize;
let embedding_length = md_get("phi2.embedding_length")?.to_u32()? as usize;
let rope_dim = md_get("phi2.rope.dimension_count")?.to_u32()? as usize;
let ln_eps = md_get("phi2.attention.layer_norm_epsilon")?.to_f32()? as f64;
let (cos, sin) = precomput_freqs_cis(rope_dim, 10_000., device)?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings.dequantize(device)?;
let output_norm = layer_norm(
ct.tensor(reader, "output_norm.weight", device)?,
ct.tensor(reader, "output_norm.bias", device)?,
ln_eps,
)?;
let output = QLinear::new(&ct, reader, "output", device)?;
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let ffn_up = QLinear::new(&ct, reader, &format!("{prefix}.ffn_up"), device)?;
let ffn_down = QLinear::new(&ct, reader, &format!("{prefix}.ffn_down"), device)?;
let mlp = Mlp { ffn_up, ffn_down };
let attn_norm = layer_norm(
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?,
ct.tensor(reader, &format!("{prefix}.attn_norm.bias"), device)?,
ln_eps,
)?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
layers.push(LayerWeights {
attn_qkv: QLinear::new(&ct, reader, &format!("{prefix}.attn_qkv"), device)?,
attn_output: QLinear::new(&ct, reader, &format!("{prefix}.attn_output"), device)?,
attn_norm,
mlp,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim: embedding_length / head_count,
cos: cos.clone(),
sin: sin.clone(),
rope_dim,
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_rot,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
output_norm,
output,
masks: HashMap::new(),
span,
span_output,
})
}
fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
pub fn forward(&mut self, xs: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = xs.dims2()?;
let mask = if seq_len == 1 {
None
} else {
Some(self.mask(seq_len, xs.device())?)
};
let _enter = self.span.enter();
let mut xs = self.tok_embeddings.forward(xs)?;
for layer in self.layers.iter_mut() {
let residual = &xs;
let xs_norm = xs.apply(&layer.attn_norm)?;
let attn_outputs = layer.forward_attn(&xs_norm, mask.as_ref(), index_pos)?;
let feed_forward_hidden_states = layer.mlp.forward(&xs_norm)?;
xs = (attn_outputs + feed_forward_hidden_states + residual)?
}
let xs = xs.apply(&self.output_norm)?.i((.., seq_len - 1, ..))?;
let _enter = self.span_output.enter();
self.output.forward(&xs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/gemma.rs | candle-transformers/src/models/gemma.rs | //! Gemma inference implementation.
//!
//! See ["Gemma: Open Models Based on Gemini Technology"](https://blog.google/technology/developers/gemma-open-ai-model/)
//!
//! Based on implementation from Google and PyTorch
use std::sync::Arc;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b as linear, Activation, Linear, VarBuilder};
fn default_max_position_embeddings() -> usize {
4096
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub attention_bias: bool,
pub head_dim: usize,
// The code gemma configs include both hidden_act and hidden_activation.
pub hidden_act: Option<Activation>,
pub hidden_activation: Option<Activation>,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub vocab_size: usize,
#[serde(default = "default_max_position_embeddings")]
pub max_position_embeddings: usize,
}
impl Config {
fn hidden_act(&self) -> Result<Activation> {
match (self.hidden_act, self.hidden_activation) {
(None, Some(act)) | (Some(act), None) => Ok(act),
(Some(_), Some(_)) => candle::bail!("both hidden_act and hidden_activation are set"),
(None, None) => candle::bail!("none of hidden_act and hidden_activation are set"),
}
}
}
#[derive(Debug, Clone)]
struct RmsNorm {
weight: Tensor,
eps: f64,
}
impl RmsNorm {
fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(dim, "weight")?;
Ok(Self { weight, eps })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(&(&self.weight + 1.0)?)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?;
let up_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act()?,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = cfg.head_dim;
let bias = cfg.attention_bias;
let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?;
let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
rotary_emb,
kv_cache: None,
use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, scale, attention_mask.is_some())?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, ()))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, use_flash_attn, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
hidden_size: usize,
}
impl Model {
pub fn new(use_flash_attn: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer =
DecoderLayer::new(rotary_emb.clone(), use_flash_attn, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
hidden_size: cfg.hidden_size,
})
}
pub fn embed_tokens(&self) -> &candle_nn::Embedding {
&self.embed_tokens
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let xs = self.embed_tokens.forward(input_ids)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn forward_embeds(
&mut self,
xs: &Tensor,
attn_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (_, seq_len, _) = xs.dims3()?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attn_mask, seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
// Forward the model and return the hidden states without the lm_head
pub fn forward_embeds_without_projection(
&mut self,
xs: &Tensor,
attn_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (_, _, _) = xs.dims3()?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attn_mask, seqlen_offset)?
}
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/glm4.rs | candle-transformers/src/models/glm4.rs | //! GLM-4 inference implementation.
//!
//! An open bilingual language model with 130B parameters.
//!
//! Based on implementation from [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B)
use crate::models::with_tracing::{linear_b as linear, Linear};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
use serde::de::{self, Deserializer, Visitor};
use serde::Deserialize;
use std::fmt;
#[derive(Debug, Clone)]
pub enum EosTokenId {
Single(u32),
Multiple(Vec<u32>),
}
impl<'de> Deserialize<'de> for EosTokenId {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct EosTokenIdVisitor;
impl<'de> Visitor<'de> for EosTokenIdVisitor {
type Value = EosTokenId;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("an integer or a list of integers")
}
fn visit_u64<E>(self, value: u64) -> std::result::Result<Self::Value, E>
where
E: de::Error,
{
if value <= u32::MAX as u64 {
Ok(EosTokenId::Single(value as u32))
} else {
Err(de::Error::custom("value too large for u32"))
}
}
fn visit_seq<A>(self, mut seq: A) -> std::result::Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut values = Vec::new();
while let Some(value) = seq.next_element::<u32>()? {
values.push(value);
}
Ok(EosTokenId::Multiple(values))
}
}
deserializer.deserialize_any(EosTokenIdVisitor)
}
}
fn default_one() -> usize {
1
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub num_layers: usize,
pub padded_vocab_size: usize,
pub hidden_size: usize,
pub ffn_hidden_size: usize,
pub kv_channels: usize,
pub num_attention_heads: usize,
pub seq_length: usize,
pub layernorm_epsilon: f64,
pub rmsnorm: bool,
pub apply_residual_connection_post_layernorm: bool,
pub post_layer_norm: bool,
pub add_bias_linear: bool,
pub add_qkv_bias: bool,
pub bias_dropout_fusion: bool,
pub multi_query_attention: bool,
pub multi_query_group_num: usize,
pub apply_query_key_layer_scaling: bool,
pub attention_softmax_in_fp32: bool,
pub fp32_residual_connection: bool,
#[serde(default = "default_one")]
pub rope_ratio: usize,
pub eos_token_id: Option<EosTokenId>,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
cache: Tensor,
}
impl RotaryEmbedding {
fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> {
let rotary_dim = cfg.kv_channels;
let n_elem = rotary_dim / 2;
let base = 10_000f64 * cfg.rope_ratio as f64;
let inv_freq: Vec<_> = (0..n_elem)
.step_by(2)
.map(|i| 1f32 / base.powf(i as f64 / n_elem as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)?
.to_dtype(dtype)?
.reshape((cfg.seq_length, 1))?;
let freqs = t.matmul(&inv_freq)?;
let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?;
Ok(Self { cache })
}
fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (seqlen, _b, np, _hn) = xs.dims4()?;
let cache = self.cache.narrow(0, seqlen_offset, seqlen)?;
let rot_dim = cache.dim(D::Minus2)? * 2;
let (xs, xs_pass) = (
xs.narrow(D::Minus1, 0, rot_dim)?,
xs.narrow(D::Minus1, rot_dim, rot_dim)?,
);
let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?;
let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?;
let (xshaped0, xshaped1) = (
xshaped.i((.., .., .., .., 0))?,
xshaped.i((.., .., .., .., 1))?,
);
let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?);
let xs_out = Tensor::stack(
&[
(xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?,
(xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?,
],
D::Minus1,
)?;
let xs_out = xs_out.flatten_from(3)?;
Tensor::cat(&[xs_out, xs_pass], D::Minus1)
}
}
#[derive(Debug, Clone)]
struct CoreAttention {
coeff: Option<f64>,
norm_factor: f64,
dtype: DType,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32, dtype: DType) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true.to_dtype(dtype)?, on_false)?;
Ok(m)
}
impl CoreAttention {
fn new(layer_number: usize, cfg: &Config, dtype: DType) -> Result<Self> {
let norm_factor = (cfg.kv_channels as f64).sqrt();
let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling {
let coeff = f64::max(1.0, layer_number as f64);
(norm_factor * coeff, Some(coeff))
} else {
(norm_factor, None)
};
Ok(Self {
coeff,
norm_factor,
dtype,
})
}
fn forward(
&self,
query_layer: &Tensor,
key_layer: &Tensor,
value_layer: &Tensor,
attention_mask: &Option<Tensor>,
) -> Result<Tensor> {
let output_size = (
query_layer.dim(1)?, // b
query_layer.dim(2)?, // np
query_layer.dim(0)?, // sq
key_layer.dim(0)?, // sk
);
let query_layer =
query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?;
let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?;
let matmul_result = Tensor::matmul(
&query_layer.transpose(0, 1)?.contiguous()?,
&key_layer.transpose(0, 1)?.transpose(1, 2)?.contiguous()?,
)?;
let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?;
let matmul_result = match self.coeff {
None => matmul_result,
Some(coeff) => (matmul_result * coeff)?,
};
let attention_scores = match attention_mask {
Some(mask) => masked_fill(
&matmul_result,
&mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?,
f32::NEG_INFINITY,
self.dtype,
)?,
None => matmul_result,
};
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
let output_size = (
value_layer.dim(1)?,
value_layer.dim(2)?,
query_layer.dim(0)?,
value_layer.dim(3)?,
);
let value_layer =
value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?;
let attention_probs =
attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?;
let context_layer = Tensor::matmul(
&attention_probs.contiguous()?,
&value_layer.transpose(0, 1)?.contiguous()?,
)?;
let context_layer = context_layer.reshape(output_size)?;
let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?;
context_layer.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct SelfAttention {
query_key_value: Linear,
core_attention: CoreAttention,
dense: Linear,
multi_query_attention: bool,
num_attention_heads_per_partition: usize,
num_multi_query_groups_per_partition: usize,
hidden_size_per_attention_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
}
impl SelfAttention {
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let projection_size = cfg.kv_channels * cfg.num_attention_heads;
let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads;
let qkv_hidden_size = if cfg.multi_query_attention {
projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num
} else {
3 * projection_size
};
let query_key_value = linear(
cfg.hidden_size,
qkv_hidden_size,
cfg.add_bias_linear || cfg.add_qkv_bias,
vb.pp("query_key_value"),
)?;
let core_attention = CoreAttention::new(layer_number, cfg, vb.dtype())?;
let dense = linear(
cfg.hidden_size,
cfg.hidden_size,
cfg.add_bias_linear,
vb.pp("dense"),
)?;
Ok(Self {
query_key_value,
core_attention,
dense,
multi_query_attention: cfg.multi_query_attention,
num_attention_heads_per_partition: cfg.num_attention_heads,
num_multi_query_groups_per_partition: cfg.multi_query_group_num,
hidden_size_per_attention_head: cfg.kv_channels,
kv_cache: None,
})
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: &Option<Tensor>,
rotary_emb: &RotaryEmbedding,
) -> Result<Tensor> {
let mixed_x_layer = xs.apply(&self.query_key_value)?;
if !self.multi_query_attention {
candle::bail!("only multi_query_attention=true is supported")
}
let hpa = self.hidden_size_per_attention_head;
let query_layer =
mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?;
let key_layer = mixed_x_layer.narrow(
D::Minus1,
self.num_attention_heads_per_partition * hpa,
self.num_multi_query_groups_per_partition * hpa,
)?;
let value_layer = mixed_x_layer.narrow(
D::Minus1,
self.num_attention_heads_per_partition * hpa
+ self.num_multi_query_groups_per_partition * hpa,
self.num_multi_query_groups_per_partition * hpa,
)?;
let query_layer = query_layer.reshape((
query_layer.dim(0)?,
query_layer.dim(1)?,
self.num_attention_heads_per_partition,
hpa,
))?;
let key_layer = key_layer.reshape((
key_layer.dim(0)?,
key_layer.dim(1)?,
self.num_multi_query_groups_per_partition,
hpa,
))?;
let value_layer = value_layer.reshape((
value_layer.dim(0)?,
value_layer.dim(1)?,
self.num_multi_query_groups_per_partition,
hpa,
))?;
// Rotary embeddings.
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(0)?,
};
let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?;
let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?;
// KV cache.
let (key_layer, value_layer) = match &self.kv_cache {
None => (key_layer, value_layer),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key_layer], 0)?;
let v = Tensor::cat(&[prev_v, &value_layer], 0)?;
(k, v)
}
};
self.kv_cache = Some((key_layer.clone(), value_layer.clone()));
// Repeat KV.
let ratio =
self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition;
let key_layer = {
let (d0, d1, d2, d3) = key_layer.dims4()?;
key_layer
.unsqueeze(D::Minus2)?
.expand((d0, d1, d2, ratio, d3))?
.reshape((
d0,
d1,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
))?
};
let value_layer = {
let (d0, d1, d2, d3) = value_layer.dims4()?;
value_layer
.unsqueeze(D::Minus2)?
.expand((d0, d1, d2, ratio, d3))?
.reshape((
d0,
d1,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
))?
};
let context_layer =
self.core_attention
.forward(&query_layer, &key_layer, &value_layer, attention_mask)?;
let output = context_layer.apply(&self.dense)?;
Ok(output)
}
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone)]
struct MLP {
dense_h_to_4h: Linear,
dense_4h_to_h: Linear,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense_h_to_4h = linear(
cfg.hidden_size,
cfg.ffn_hidden_size * 2,
cfg.add_bias_linear,
vb.pp("dense_h_to_4h"),
)?;
let dense_4h_to_h = linear(
cfg.ffn_hidden_size,
cfg.hidden_size,
cfg.add_bias_linear,
vb.pp("dense_4h_to_h"),
)?;
Ok(Self {
dense_4h_to_h,
dense_h_to_4h,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense_h_to_4h)?
.apply(&candle_nn::Activation::Swiglu)?
.apply(&self.dense_4h_to_h)
}
}
#[derive(Debug, Clone)]
struct Block {
input_layernorm: candle_nn::LayerNorm,
self_attention: SelfAttention,
post_attention_layernorm: candle_nn::LayerNorm,
mlp: MLP,
apply_residual_connection_post_layernorm: bool,
}
impl Block {
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let input_layernorm = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("input_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("input_layernorm"),
)?
};
let post_attention_layernorm = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("post_attention_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("post_attention_layernorm"),
)?
};
let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
input_layernorm,
self_attention,
post_attention_layernorm,
mlp,
apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm,
})
}
fn reset_kv_cache(&mut self) {
self.self_attention.reset_kv_cache()
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: &Option<Tensor>,
rotary_emb: &RotaryEmbedding,
) -> Result<Tensor> {
let layernorm_output = xs.apply(&self.input_layernorm)?;
let attention_output =
self.self_attention
.forward(&layernorm_output, attention_mask, rotary_emb)?;
let residual = if self.apply_residual_connection_post_layernorm {
&layernorm_output
} else {
xs
};
let layernorm_input = (residual + attention_output)?;
let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?;
let mlp_output = layernorm_output.apply(&self.mlp)?;
let residual = if self.apply_residual_connection_post_layernorm {
&layernorm_output
} else {
&layernorm_input
};
mlp_output + residual
}
}
#[derive(Debug, Clone)]
struct Transformer {
layers: Vec<Block>,
final_layernorm: Option<candle_nn::LayerNorm>,
rotary_emb: RotaryEmbedding,
}
impl Transformer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_l = vb.pp("layers");
let mut layers = Vec::with_capacity(cfg.num_layers);
for layer_index in 0..cfg.num_layers {
let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?;
layers.push(block)
}
let final_layernorm = if cfg.post_layer_norm {
let ln = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("final_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("final_layernorm"),
)?
};
Some(ln)
} else {
None
};
let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?;
Ok(Self {
layers,
final_layernorm,
rotary_emb,
})
}
fn reset_kv_cache(&mut self) {
for block in self.layers.iter_mut() {
block.reset_kv_cache()
}
}
fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for block in self.layers.iter_mut() {
xs = block.forward(&xs, attention_mask, &self.rotary_emb)?
}
match self.final_layernorm.as_ref() {
None => Ok(xs),
Some(ln) => xs.apply(ln),
}
}
}
#[derive(Debug, Clone)]
struct Embedding {
word_embeddings: candle_nn::Embedding,
fp32_residual_connection: bool,
}
impl Embedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let word_embeddings = candle_nn::embedding(
cfg.padded_vocab_size,
cfg.hidden_size,
vb.pp("word_embeddings"),
)?;
Ok(Self {
word_embeddings,
fp32_residual_connection: cfg.fp32_residual_connection,
})
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h
if self.fp32_residual_connection {
xs.to_dtype(candle::DType::F32)
} else {
xs.contiguous()
}
}
}
#[derive(Debug, Clone)]
pub struct Model {
embedding: Embedding,
encoder: Transformer,
output_layer: Linear,
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("transformer");
let embedding = Embedding::new(cfg, vb.pp("embedding"))?;
let encoder = Transformer::new(cfg, vb.pp("encoder"))?;
let output_layer = linear(
cfg.hidden_size,
cfg.padded_vocab_size,
false,
vb.pp("output_layer"),
)?;
Ok(Self {
embedding,
encoder,
output_layer,
})
}
pub fn reset_kv_cache(&mut self) {
self.encoder.reset_kv_cache()
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_b_size, seq_len) = xs.dims2()?;
let input_embeds = xs.apply(&self.embedding)?;
let attention_mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
let xs = self.encoder.forward(&input_embeds, &attention_mask)?;
let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?;
Ok(lm_logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/efficientnet.rs | candle-transformers/src/models/efficientnet.rs | //! Implementation of EfficientBert, an efficient variant of BERT for computer vision tasks.
//!
//! See:
//! - ["EfficientBERT: Progressively Searching Multilayer Perceptron Architectures for BERT"](https://arxiv.org/abs/2201.00462)
//!
use candle::{Context, Result, Tensor, D};
use candle_nn as nn;
use nn::{Module, VarBuilder};
// Based on the Python version from torchvision.
// https://github.com/pytorch/vision/blob/0d75d9e5516f446c9c0ef93bd4ed9fea13992d06/torchvision/models/efficientnet.py#L47
#[derive(Debug, Clone, Copy)]
pub struct MBConvConfig {
expand_ratio: f64,
kernel: usize,
stride: usize,
input_channels: usize,
out_channels: usize,
num_layers: usize,
}
fn make_divisible(v: f64, divisor: usize) -> usize {
let min_value = divisor;
let new_v = usize::max(
min_value,
(v + divisor as f64 * 0.5) as usize / divisor * divisor,
);
if (new_v as f64) < 0.9 * v {
new_v + divisor
} else {
new_v
}
}
fn bneck_confs(width_mult: f64, depth_mult: f64) -> Vec<MBConvConfig> {
let bneck_conf = |e, k, s, i, o, n| {
let input_channels = make_divisible(i as f64 * width_mult, 8);
let out_channels = make_divisible(o as f64 * width_mult, 8);
let num_layers = (n as f64 * depth_mult).ceil() as usize;
MBConvConfig {
expand_ratio: e,
kernel: k,
stride: s,
input_channels,
out_channels,
num_layers,
}
};
vec![
bneck_conf(1., 3, 1, 32, 16, 1),
bneck_conf(6., 3, 2, 16, 24, 2),
bneck_conf(6., 5, 2, 24, 40, 2),
bneck_conf(6., 3, 2, 40, 80, 3),
bneck_conf(6., 5, 1, 80, 112, 3),
bneck_conf(6., 5, 2, 112, 192, 4),
bneck_conf(6., 3, 1, 192, 320, 1),
]
}
impl MBConvConfig {
pub fn b0() -> Vec<Self> {
bneck_confs(1.0, 1.0)
}
pub fn b1() -> Vec<Self> {
bneck_confs(1.0, 1.1)
}
pub fn b2() -> Vec<Self> {
bneck_confs(1.1, 1.2)
}
pub fn b3() -> Vec<Self> {
bneck_confs(1.2, 1.4)
}
pub fn b4() -> Vec<Self> {
bneck_confs(1.4, 1.8)
}
pub fn b5() -> Vec<Self> {
bneck_confs(1.6, 2.2)
}
pub fn b6() -> Vec<Self> {
bneck_confs(1.8, 2.6)
}
pub fn b7() -> Vec<Self> {
bneck_confs(2.0, 3.1)
}
}
/// Conv2D with same padding.
#[derive(Debug)]
struct Conv2DSame {
conv2d: nn::Conv2d,
s: usize,
k: usize,
}
impl Conv2DSame {
fn new(
vb: VarBuilder,
i: usize,
o: usize,
k: usize,
stride: usize,
groups: usize,
bias: bool,
) -> Result<Self> {
let conv_config = nn::Conv2dConfig {
stride,
groups,
..Default::default()
};
let conv2d = if bias {
nn::conv2d(i, o, k, conv_config, vb)?
} else {
nn::conv2d_no_bias(i, o, k, conv_config, vb)?
};
Ok(Self {
conv2d,
s: stride,
k,
})
}
}
impl Module for Conv2DSame {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let s = self.s;
let k = self.k;
let (_, _, ih, iw) = xs.dims4()?;
let oh = ih.div_ceil(s);
let ow = iw.div_ceil(s);
let pad_h = usize::max((oh - 1) * s + k - ih, 0);
let pad_w = usize::max((ow - 1) * s + k - iw, 0);
if pad_h > 0 || pad_w > 0 {
let xs = xs.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?;
let xs = xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?;
self.conv2d.forward(&xs)
} else {
self.conv2d.forward(xs)
}
}
}
#[derive(Debug)]
struct ConvNormActivation {
conv2d: Conv2DSame,
bn2d: nn::BatchNorm,
activation: bool,
}
impl ConvNormActivation {
fn new(
vb: VarBuilder,
i: usize,
o: usize,
k: usize,
stride: usize,
groups: usize,
) -> Result<Self> {
let conv2d = Conv2DSame::new(vb.pp("0"), i, o, k, stride, groups, false)?;
let bn2d = nn::batch_norm(o, 1e-3, vb.pp("1"))?;
Ok(Self {
conv2d,
bn2d,
activation: true,
})
}
fn no_activation(self) -> Self {
Self {
activation: false,
..self
}
}
}
impl Module for ConvNormActivation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.conv2d.forward(xs)?.apply_t(&self.bn2d, false)?;
if self.activation {
swish(&xs)
} else {
Ok(xs)
}
}
}
#[derive(Debug)]
struct SqueezeExcitation {
fc1: Conv2DSame,
fc2: Conv2DSame,
}
impl SqueezeExcitation {
fn new(vb: VarBuilder, in_channels: usize, squeeze_channels: usize) -> Result<Self> {
let fc1 = Conv2DSame::new(vb.pp("fc1"), in_channels, squeeze_channels, 1, 1, 1, true)?;
let fc2 = Conv2DSame::new(vb.pp("fc2"), squeeze_channels, in_channels, 1, 1, 1, true)?;
Ok(Self { fc1, fc2 })
}
}
impl Module for SqueezeExcitation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
// equivalent to adaptive_avg_pool2d([1, 1])
let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?;
let xs = self.fc1.forward(&xs)?;
let xs = swish(&xs)?;
let xs = self.fc2.forward(&xs)?;
let xs = nn::ops::sigmoid(&xs)?;
residual.broadcast_mul(&xs)
}
}
#[derive(Debug)]
struct MBConv {
expand_cna: Option<ConvNormActivation>,
depthwise_cna: ConvNormActivation,
squeeze_excitation: SqueezeExcitation,
project_cna: ConvNormActivation,
config: MBConvConfig,
}
impl MBConv {
fn new(vb: VarBuilder, c: MBConvConfig) -> Result<Self> {
let vb = vb.pp("block");
let exp = make_divisible(c.input_channels as f64 * c.expand_ratio, 8);
let expand_cna = if exp != c.input_channels {
Some(ConvNormActivation::new(
vb.pp("0"),
c.input_channels,
exp,
1,
1,
1,
)?)
} else {
None
};
let start_index = if expand_cna.is_some() { 1 } else { 0 };
let depthwise_cna =
ConvNormActivation::new(vb.pp(start_index), exp, exp, c.kernel, c.stride, exp)?;
let squeeze_channels = usize::max(1, c.input_channels / 4);
let squeeze_excitation =
SqueezeExcitation::new(vb.pp(start_index + 1), exp, squeeze_channels)?;
let project_cna =
ConvNormActivation::new(vb.pp(start_index + 2), exp, c.out_channels, 1, 1, 1)?
.no_activation();
Ok(Self {
expand_cna,
depthwise_cna,
squeeze_excitation,
project_cna,
config: c,
})
}
}
impl Module for MBConv {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let use_res_connect =
self.config.stride == 1 && self.config.input_channels == self.config.out_channels;
let ys = match &self.expand_cna {
Some(expand_cna) => expand_cna.forward(xs)?,
None => xs.clone(),
};
let ys = self.depthwise_cna.forward(&ys)?;
let ys = self.squeeze_excitation.forward(&ys)?;
let ys = self.project_cna.forward(&ys)?;
if use_res_connect {
ys + xs
} else {
Ok(ys)
}
}
}
fn swish(s: &Tensor) -> Result<Tensor> {
s * nn::ops::sigmoid(s)?
}
#[derive(Debug)]
pub struct EfficientNet {
init_cna: ConvNormActivation,
blocks: Vec<MBConv>,
final_cna: ConvNormActivation,
classifier: nn::Linear,
}
impl EfficientNet {
pub fn new(p: VarBuilder, configs: Vec<MBConvConfig>, nclasses: usize) -> Result<Self> {
let f_p = p.pp("features");
let first_in_c = configs[0].input_channels;
let last_out_c = configs.last().context("no last")?.out_channels;
let final_out_c = 4 * last_out_c;
let init_cna = ConvNormActivation::new(f_p.pp(0), 3, first_in_c, 3, 2, 1)?;
let nconfigs = configs.len();
let mut blocks = vec![];
for (index, cnf) in configs.into_iter().enumerate() {
let f_p = f_p.pp(index + 1);
for r_index in 0..cnf.num_layers {
let cnf = if r_index == 0 {
cnf
} else {
MBConvConfig {
input_channels: cnf.out_channels,
stride: 1,
..cnf
}
};
blocks.push(MBConv::new(f_p.pp(r_index), cnf)?)
}
}
let final_cna =
ConvNormActivation::new(f_p.pp(nconfigs + 1), last_out_c, final_out_c, 1, 1, 1)?;
let classifier = nn::linear(final_out_c, nclasses, p.pp("classifier.1"))?;
Ok(Self {
init_cna,
blocks,
final_cna,
classifier,
})
}
}
impl Module for EfficientNet {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.init_cna.forward(xs)?;
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
let xs = self.final_cna.forward(&xs)?;
// Equivalent to adaptive_avg_pool2d([1, 1]) -> squeeze(-1) -> squeeze(-1)
let xs = xs.mean(D::Minus1)?.mean(D::Minus1)?;
self.classifier.forward(&xs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen2.rs | candle-transformers/src/models/qwen2.rs | //! Qwen2 model implementation with quantization support.
//!
//! Qwen2 is a large language model from Alibaba optimized for efficiency.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Streaming decode support
//! - Grouped query attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - 🤗 [Qwen2 Model](https://huggingface.co/Qwen/Qwen2-7B)
//!
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: usize,
pub max_window_layers: usize,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub use_sliding_window: bool,
pub hidden_act: Activation,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
Ok(Self {
embed_tokens,
layers,
norm,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_causal_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + self.sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> {
let (b_sz, sql_len) = attn_mask.dims2()?;
let mut mask: Vec<Tensor> = vec![];
for b in 0..b_sz {
mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?);
}
let mask = Tensor::cat(&mask, 0)?;
let on_true = mask.zeros_like()?.to_dtype(self.dtype)?;
let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)?
.broadcast_as(mask.shape())?
.to_dtype(self.dtype)?;
mask.where_cond(&on_true, &on_false)
}
pub fn forward(
&mut self,
input_ids: &Tensor,
seqlen_offset: usize,
attn_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask: Option<Tensor> = match attn_mask {
Some(mask) => Some(self.prepare_attention_mask(mask)?),
None => {
if seq_len <= 1 {
None
} else {
Some(self.prepare_causal_attention_mask(b_size, seq_len, seqlen_offset)?)
}
}
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.apply(&self.norm)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
#[derive(Debug, Clone)]
pub struct ModelForCausalLM {
base_model: Model,
lm_head: Linear,
}
impl ModelForCausalLM {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let base_model = Model::new(cfg, vb.clone())?;
let lm_head = if vb.contains_tensor("lm_head.weight") {
linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
} else {
Linear::from_weights(base_model.embed_tokens.embeddings().clone(), None)
};
Ok(Self {
base_model,
lm_head,
})
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (_b_size, seq_len) = input_ids.dims2()?;
self.base_model
.forward(input_ids, seqlen_offset, None)?
.narrow(1, seq_len - 1, 1)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
self.base_model.clear_kv_cache()
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_metavoice.rs | candle-transformers/src/models/quantized_metavoice.rs | //! Quantized MetaVoice model implementation.
//!
//! MetaVoice is a conditional text-to-speech model based on a transformer architecture.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Transformer-based autoregressive decoder
//! - Speaker conditioning
//! - Support for 8-bit quantization
//! - Key-value caching for efficient inference
//! - RMS normalization layers
//!
//! References:
//! - [MetaVoice Code](https://github.com/metavoiceio/metavoice)
//!
use crate::quantized_nn::{linear_b, Embedding, Linear, RmsNorm};
pub use crate::quantized_var_builder::VarBuilder;
use crate::models::metavoice::repeat_interleave;
use candle::{Module, Result, Tensor, D};
pub mod transformer {
use super::*;
type Config = crate::models::metavoice::transformer::Config;
#[derive(Debug, Clone)]
struct FeedForward {
w1: Linear,
w2: Linear,
w3: Linear,
span: tracing::Span,
}
impl FeedForward {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let i_size = cfg.intermediate_size();
let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?;
let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?;
let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?;
Ok(Self {
w1,
w2,
w3,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
}
impl Module for FeedForward {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?;
swiglu.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct Attention {
wqkv: Linear,
wo: Linear,
dim: usize,
kv_size: usize,
n_local_heads: usize,
head_dim: usize,
n_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_local_heads = cfg.n_local_heads();
let head_dim = cfg.head_dim();
let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim;
let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?;
let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?;
Ok(Self {
wqkv,
wo,
dim: cfg.dim,
kv_size: n_local_heads * head_dim,
n_local_heads,
head_dim,
n_head: cfg.n_head,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "attention"),
})
}
fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seqlen, _) = xs.dims3()?;
let qkv = xs.apply(&self.wqkv)?;
let q = qkv.narrow(D::Minus1, 0, self.dim)?;
let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?;
let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?;
let q = q
.reshape((b_sz, seqlen, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 2)?;
let v = Tensor::cat(&[prev_v, &v], 2)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?;
let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
let attn_weights = attn_weights.broadcast_add(mask)?;
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, seqlen, self.dim))?
.apply(&self.wo)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Block {
attention: Attention,
feed_forward: FeedForward,
ffn_norm: RmsNorm,
attention_norm: RmsNorm,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?;
let ffn_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?;
let attention_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?;
Ok(Self {
attention,
feed_forward,
ffn_norm,
attention_norm,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hs = xs.apply(&self.attention_norm)?;
let hs = (xs + self.attention.forward(&hs, pos, mask))?;
&hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward)
}
fn clear_kv_cache(&mut self) {
self.attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
tok_embeddings: Embedding,
pos_embeddings: Embedding,
speaker_cond_pos: Linear,
layers: Vec<Block>,
norm: RmsNorm,
output: Linear,
spk_cond_mask: Tensor,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let tok_embeddings = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?;
let pos_embeddings = Embedding::new(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?;
let speaker_cond_pos = linear_b(
cfg.speaker_emb_dim,
cfg.dim,
false,
vb.pp("speaker_cond_pos"),
)?;
let mut layers = Vec::with_capacity(cfg.n_layer);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.n_layer {
let layer = Block::new(cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("norm"))?;
let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?;
let spk_cond_mask = Tensor::cat(
&[
Tensor::ones((1, 1, cfg.dim), candle::DType::F32, vb.device())?,
Tensor::zeros((1, 1, cfg.dim), candle::DType::F32, vb.device())?,
],
0,
)?;
Ok(Self {
tok_embeddings,
pos_embeddings,
speaker_cond_pos,
layers,
norm,
output,
spk_cond_mask,
span: tracing::span!(tracing::Level::TRACE, "qtransformer"),
})
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_sz, seqlen) = xs.dims2()?;
let mask: Vec<_> = (0..seqlen)
.flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?;
let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?;
let tok_embeddings = xs.apply(&self.tok_embeddings)?;
let pos_embeddings = input_pos.apply(&self.pos_embeddings)?;
let mut xs = tok_embeddings
.broadcast_add(&pos_embeddings)?
.broadcast_add(
&spk_emb
.apply(&self.speaker_cond_pos)?
.broadcast_mul(&self.spk_cond_mask)?,
)?;
let mask = mask.to_dtype(xs.dtype())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, pos, &mask)?
}
xs.narrow(1, seqlen - 1, 1)?
.contiguous()?
.apply(&self.norm)?
.apply(&self.output)
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_moondream.rs | candle-transformers/src/models/quantized_moondream.rs | //! Implementation of a quantized Moondream vision language model.
//!
//! Moondream is a lightweight vision-language model for image understanding and generation.
//! This module provides a quantized version for reduced memory usage and faster inference.
//!
//! Key features:
//! - ViT-based vision encoder
//! - Phi-2 text decoder model
//! - Memory efficient 8-bit quantization
//! - Optimized for efficient deployment
//!
//! References:
//! - [Moondream Model](https://github.com/vikhyat/moondream)
//!
use crate::models::moondream::{Config, VisionConfig};
use crate::models::quantized_mixformer::MixFormerSequentialForCausalLM as PhiModel;
use crate::quantized_nn::{layer_norm, linear_b, Linear};
use crate::quantized_var_builder::VarBuilder;
use candle::{IndexOp, Module, Result, Tensor, D};
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v)
}
#[derive(Debug, Clone)]
struct LinearPatchEmbedding {
linear: Linear,
}
impl LinearPatchEmbedding {
fn new(vb: VarBuilder) -> Result<Self> {
let linear = linear_b(588, 1152, true, vb.pp("linear"))?;
Ok(Self { linear })
}
}
impl Module for LinearPatchEmbedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
struct Attention {
num_heads: usize,
head_dim: usize,
qkv: Linear,
proj: Linear,
}
impl Attention {
pub fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let qkv = linear_b(dim, dim * 3, true, vb.pp("qkv"))?;
let proj = linear_b(dim, dim, true, vb.pp("proj"))?;
Ok(Self {
num_heads,
head_dim: dim / num_heads,
qkv,
proj,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = xs
.apply(&self.qkv)?
.reshape((b, n, 3, self.num_heads, self.head_dim))?
.permute((2, 0, 3, 1, 4))?;
let (q, k, v) = (
qkv.i(0)?.contiguous()?,
qkv.i(1)?.contiguous()?,
qkv.i(2)?.contiguous()?,
);
scaled_dot_product_attention(&q, &k, &v)?
.transpose(1, 2)?
.reshape((b, n, c))?
.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct VitBlock {
attn: Attention,
mlp: Mlp,
norm1: candle_nn::LayerNorm,
norm2: candle_nn::LayerNorm,
}
impl VitBlock {
fn new(vb: VarBuilder, dim: usize, num_heads: usize, cfg: &VisionConfig) -> Result<Self> {
let attn = Attention::new(vb.pp("attn"), dim, num_heads)?;
let mlp = Mlp::new(vb.pp("mlp"), dim, cfg.hidden_features, dim, cfg.act)?;
let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?;
let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?;
Ok(Self {
attn,
mlp,
norm1,
norm2,
})
}
}
impl Module for VitBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs.apply(&self.norm1)?.apply(&self.attn)?;
let xs = (xs + &ys)?;
let ys = xs.apply(&self.norm2)?.apply(&self.mlp)?;
let xs = (&xs + &ys)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct VisionTransformer {
patch_embed: LinearPatchEmbedding,
pos_embed: Tensor,
blocks: Vec<VitBlock>,
norm: candle_nn::LayerNorm,
}
impl VisionTransformer {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let patch_embed = LinearPatchEmbedding::new(vb.pp("patch_embed"))?;
let pos_embed = vb
.get((1, cfg.embed_len, cfg.embed_dim), "pos_embed")?
.dequantize(vb.device())?;
let blocks = (0..cfg.num_blocks)
.map(|i| {
VitBlock::new(
vb.pp(format!("blocks.{i}")),
cfg.embed_dim,
cfg.num_heads,
cfg,
)
})
.collect::<Result<_>>()?;
let norm = layer_norm(cfg.embed_dim, 1e-5, vb.pp("norm"))?;
Ok(Self {
patch_embed,
pos_embed,
blocks,
norm,
})
}
}
impl Module for VisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = (&xs.apply(&self.patch_embed)? + &self.pos_embed)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?;
}
xs.apply(&self.norm)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
model: VisionTransformer,
}
impl Encoder {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let model = VisionTransformer::new(cfg, vb.pp("model.visual"))?;
Ok(Self { model })
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.model)
}
}
#[derive(Debug, Clone)]
struct Mlp {
fc1: Linear,
act: candle_nn::Activation,
fc2: Linear,
}
impl Mlp {
fn new(
vb: VarBuilder,
in_features: usize,
hidden_features: usize,
out_features: usize,
act: candle_nn::Activation,
) -> Result<Self> {
let fc1 = linear_b(in_features, hidden_features, true, vb.pp("fc1"))?;
let fc2 = linear_b(hidden_features, out_features, true, vb.pp("fc2"))?;
Ok(Self { fc1, act, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct VisionProjection {
mlp: Mlp,
}
impl VisionProjection {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let mlp = Mlp::new(
vb.pp("mlp"),
cfg.image_embedding_dim,
cfg.hidden_dim,
cfg.model_dim,
cfg.act,
)?;
Ok(Self { mlp })
}
}
impl Module for VisionProjection {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.mlp)
}
}
#[derive(Debug, Clone)]
pub struct VisionEncoder {
encoder: Encoder,
projection: VisionProjection,
}
impl VisionEncoder {
pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let projection = VisionProjection::new(cfg, vb.pp("projection"))?;
Ok(Self {
encoder,
projection,
})
}
}
impl Module for VisionEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, c, hp1, wp2) = xs.dims4()?;
let (p1, p2) = (14, 14);
let h = hp1 / p1;
let w = wp2 / p2;
xs.reshape((b, c, h, p1, h, p2))?
.permute((0, 2, 4, 1, 3, 5))?
.reshape((b, h * w, c * p1 * p2))?
.apply(&self.encoder)?
.apply(&self.projection)
}
}
pub struct Model {
pub text_model: PhiModel,
pub vision_encoder: VisionEncoder,
}
impl Model {
pub fn new(config: &Config, vb: VarBuilder) -> Result<Self> {
let text_model = PhiModel::new_v2(&config.phi_config, vb.pp("text_model"))?;
let vision_encoder = VisionEncoder::new(&config.vision_config, vb.pp("vision_encoder"))?;
Ok(Self {
text_model,
vision_encoder,
})
}
pub fn vision_encoder(&self) -> &VisionEncoder {
&self.vision_encoder
}
pub fn text_model(&mut self) -> &mut PhiModel {
&mut self.text_model
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/llama2_c.rs | candle-transformers/src/models/llama2_c.rs | //! Llama2 inference implementation.
//!
//! See ["LLaMA 2: Open Foundation and Fine-Tuned Chat Models"](https://arxiv.org/abs/2307.09288)
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/lmz/candle-llama2)
//! - 💻 llama2.c [GH Link](https://github.com/karpathy/llama2.c)
//!
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::linear_no_bias as linear;
use candle_nn::{embedding, rms_norm, Embedding, Linear, Module, RmsNorm, VarBuilder};
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct Config {
pub dim: usize, // transformer dimension
pub hidden_dim: usize, // for ffn layers
pub n_layers: usize, // number of layers
pub n_heads: usize, // number of query heads
pub n_kv_heads: usize, // number of key/value heads (can be < query heads because of multiquery)
pub vocab_size: usize, // vocabulary size, usually 256 (byte-level)
pub seq_len: usize, // max sequence length
pub norm_eps: f64,
}
impl Config {
pub fn tiny_260k() -> Self {
Self {
dim: 64,
hidden_dim: 768,
n_layers: 5,
n_heads: 8,
n_kv_heads: 4,
vocab_size: 32000,
seq_len: 512,
norm_eps: 1e-5,
}
}
pub fn tiny_15m() -> Self {
Self {
dim: 288,
hidden_dim: 768,
n_layers: 6,
n_heads: 6,
n_kv_heads: 6,
vocab_size: 32000,
seq_len: 256,
norm_eps: 1e-5,
}
}
pub fn tiny_42m() -> Self {
Self {
dim: 512,
hidden_dim: 768,
n_layers: 8,
n_heads: 8,
n_kv_heads: 8,
vocab_size: 32000,
seq_len: 1024,
norm_eps: 1e-5,
}
}
pub fn tiny_110m() -> Self {
Self {
dim: 768,
hidden_dim: 768,
n_layers: 12,
n_heads: 12,
n_kv_heads: 12,
vocab_size: 32000,
seq_len: 1024,
norm_eps: 1e-5,
}
}
}
#[derive(Debug, Clone)]
pub struct Cache {
masks: HashMap<usize, Tensor>,
pub use_kv_cache: bool,
pub kvs: Vec<Option<(Tensor, Tensor)>>,
pub cos: Tensor,
pub sin: Tensor,
device: Device,
}
impl Cache {
pub fn new(use_kv_cache: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_elem = cfg.dim / cfg.n_heads;
let theta: Vec<_> = (0..n_elem)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / n_elem as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), vb.device())?;
let idx_theta = Tensor::arange(0, cfg.seq_len as u32, vb.device())?
.to_dtype(DType::F32)?
.reshape((cfg.seq_len, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let precomputed_cos = idx_theta.cos()?;
let precomputed_sin = idx_theta.sin()?;
let freq_cis_real = vb
.get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_real")
.unwrap_or(precomputed_cos);
let freq_cis_imag = vb
.get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_imag")
.unwrap_or(precomputed_sin);
let cos = freq_cis_real.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?;
let sin = freq_cis_imag.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?;
Ok(Self {
masks: HashMap::new(),
use_kv_cache,
kvs: vec![None; cfg.n_layers],
cos,
sin,
device: vb.device().clone(),
})
}
pub fn mask(&mut self, t: usize) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), &self.device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
}
fn silu(xs: &Tensor) -> Result<Tensor> {
xs / (xs.neg()?.exp()? + 1.0)?
}
#[derive(Debug, Clone)]
struct CausalSelfAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
n_head: usize,
n_key_value_head: usize,
head_dim: usize,
}
impl CausalSelfAttention {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> {
let (b_sz, seq_len, h, n_embd) = x.dims4()?;
let cos = cache.cos.i(index_pos..index_pos + seq_len)?;
let sin = cache.sin.i(index_pos..index_pos + seq_len)?;
let cos = cos.unsqueeze(1)?;
let sin = sin.unsqueeze(1)?;
let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?;
let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?;
let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?;
let x0 = x.narrow(D::Minus1, 0, 1)?;
let x1 = x.narrow(D::Minus1, 1, 1)?;
let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?;
let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?;
let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?;
Ok(rope)
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let (b_sz, seq_len, n_embd) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?;
let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?;
let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?;
let q = self.apply_rotary_emb(&q, index_pos, cache)?;
let mut k = self.apply_rotary_emb(&k, index_pos, cache)?;
if cache.use_kv_cache {
if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] {
k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?;
}
cache.kvs[block_idx] = Some((k.clone(), v.clone()))
}
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let q = q.transpose(1, 2)?.contiguous()?;
let k = k.transpose(1, 2)?.contiguous()?;
let v = v.transpose(1, 2)?.contiguous()?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if seq_len <= 1 {
att
} else {
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
masked_fill(&att, &mask, f32::NEG_INFINITY)?
};
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let y = att.matmul(&v.contiguous()?)?;
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
let n_rep = self.n_head / self.n_key_value_head;
if n_rep == 1 {
Ok(x)
} else {
let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?;
let x = x
.unsqueeze(3)?
.expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))?
.reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?;
Ok(x)
}
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let size_in = cfg.dim;
let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads;
let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads;
let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?;
let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?;
let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?;
let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
n_head: cfg.n_heads,
n_key_value_head: cfg.n_kv_heads,
head_dim: cfg.dim / cfg.n_heads,
})
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct Mlp {
c_fc1: Linear,
c_fc2: Linear,
c_proj: Linear,
}
impl Mlp {
fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self {
Self {
c_fc1,
c_fc2,
c_proj,
}
}
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?;
self.c_proj.forward(&x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let h_size = cfg.dim;
let i_size = cfg.hidden_dim;
let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?;
let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?;
let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?;
Ok(Self::new(c_fc1, c_fc2, c_proj))
}
}
#[derive(Debug, Clone)]
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
mlp: Mlp,
}
impl Block {
fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self {
Self {
rms_1,
attn,
rms_2,
mlp,
}
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let residual = x;
let x = self.rms_1.forward(x)?;
let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?;
let residual = &x;
let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?;
let mlp = Mlp::load(vb.pp("mlp"), cfg)?;
let input_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm =
rms_norm(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?;
Ok(Self::new(
input_layernorm,
attn,
post_attention_layernorm,
mlp,
))
}
}
#[derive(Debug, Clone)]
pub struct Llama {
wte: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
lm_head: Linear,
pub config: Config,
}
impl Llama {
pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> {
let (_b_sz, _seq_len) = x.dims2()?;
let mut x = self.wte.forward(x)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> {
let wte = embedding(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?;
let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?;
let ln_f = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.n_layers)
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), &cfg).unwrap())
.collect();
Ok(Self {
wte,
blocks,
ln_f,
lm_head,
config: cfg,
})
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/olmo2.rs | candle-transformers/src/models/olmo2.rs | //! OLMo 2 (Open Language Model) implementation
//!
//! See OLMo 2 model details at:
//! - [Hugging Face Collection](https://huggingface.co/collections/allenai/olmo-2-674117b93ab84e98afc72edc)
//! - [OLMo 2 Paper](https://arxiv.org/abs/2501.00656)
//!
//!
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b, linear_no_bias, rms_norm, Activation, Linear, RmsNorm, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub attention_bias: bool,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub hidden_act: candle_nn::Activation,
pub max_position_embeddings: usize,
pub rope_theta: f64,
pub tie_word_embeddings: bool,
pub clip_qkv: Option<f64>,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
q_norm: RmsNorm,
k_norm: RmsNorm,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let b = cfg.attention_bias;
let q_proj = linear_b(hidden_sz, num_heads * head_dim, b, vb.pp("q_proj"))?;
let k_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("k_proj"))?;
let v_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("v_proj"))?;
let o_proj = linear_b(num_heads * head_dim, hidden_sz, b, vb.pp("o_proj"))?;
let q_norm = rms_norm(hidden_sz, cfg.rms_norm_eps, vb.pp("q_norm"))?;
let k_norm = rms_norm(num_kv_heads * head_dim, cfg.rms_norm_eps, vb.pp("k_norm"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = self.q_norm.forward(&query_states)?;
let key_states = self.k_norm.forward(&key_states)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
post_attention_layernorm: RmsNorm,
post_feedforward_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let post_feedforward_layernorm = rms_norm(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_feedforward_layernorm"),
)?;
let post_attention_layernorm = rms_norm(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
post_attention_layernorm,
post_feedforward_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.self_attn.forward(xs, attention_mask, seqlen_offset)?;
let xs = self.post_attention_layernorm.forward(&xs)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self.mlp.forward(&xs)?;
let xs = self.post_feedforward_layernorm.forward(&xs)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = if cfg.tie_word_embeddings {
Linear::new(embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/moondream.rs | candle-transformers/src/models/moondream.rs | //! MoonDream Model vision-to-text
//!
//!
//! Moondream is a computer-vision model that can answer real-world questions about images.
//! It's lightweight with only 1.6B parameters, enabling it to run on mobile phones and edge devices.
//! [MoonDream Original Implementation](https://github.com/vikhyat/moondream)
//!
//! The model consists of:
//! - Vision encoder using a ViT-style architecture
//! - Text decoder based on Microsoft's Phi model
//! - Vision projection module to align vision and text embeddings
//!
//! # Examples
//!
//! <img src="https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg" width="200">
//!
//! ```bash
//! # download an example image
//! wget https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg
//!
//! # Now you can run Moondream from the `candle-examples` crate:
//! cargo run --example moondream \
//! --release -- \
//! --prompt "What is the girl eating?"
//! --image "./demo-1.jpg"
//!
//! > avavx: false, neon: true, simd128: false, f16c: false
//! > temp: 0.00 repeat-penalty: 1.00 repeat-last-n: 64
//! > retrieved the files in 3.395583ms
//! > Running on CPU, to run on GPU(metal), build this example with `--features metal`
//! > loaded the model in 5.485493792s
//! > loaded and encoded the image Tensor[dims 3, 378, 378; f32] in 4.801396417s
//! > starting the inference loop
//! > The girl is eating a hamburger.<
//! > 9 tokens generated (0.68 token/s)
//! ```
use crate::models::mixformer::{Config as PhiConfig, MixFormerSequentialForCausalLM as PhiModel};
use crate::models::with_tracing::{layer_norm, linear_b, LayerNorm, Linear};
use candle::{IndexOp, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub phi_config: PhiConfig,
pub vision_config: VisionConfig,
}
impl Config {
pub fn v2() -> Self {
Self {
phi_config: PhiConfig::v1_5(),
vision_config: VisionConfig::v2(),
}
}
}
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v)
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct VisionConfig {
pub(crate) image_embedding_dim: usize,
pub(crate) model_dim: usize,
pub(crate) hidden_dim: usize,
pub(crate) hidden_features: usize,
pub(crate) embed_len: usize,
pub(crate) embed_dim: usize,
pub(crate) num_blocks: usize,
pub(crate) num_heads: usize,
pub(crate) act: candle_nn::Activation,
}
impl VisionConfig {
pub fn v2() -> Self {
Self {
image_embedding_dim: 1152,
model_dim: 2048,
hidden_dim: 2048 * 4,
hidden_features: 4304,
embed_len: 729,
embed_dim: 1152,
num_blocks: 27,
num_heads: 16,
act: candle_nn::Activation::GeluPytorchTanh,
}
}
}
#[derive(Debug, Clone)]
struct LinearPatchEmbedding {
linear: Linear,
}
impl LinearPatchEmbedding {
fn new(vb: VarBuilder) -> Result<Self> {
let linear = linear_b(588, 1152, true, vb.pp("linear"))?;
Ok(Self { linear })
}
}
impl Module for LinearPatchEmbedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
struct Attention {
num_heads: usize,
head_dim: usize,
qkv: Linear,
proj: Linear,
span: tracing::Span,
}
impl Attention {
pub fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let qkv = linear_b(dim, dim * 3, true, vb.pp("qkv"))?;
let proj = linear_b(dim, dim, true, vb.pp("proj"))?;
Ok(Self {
num_heads,
head_dim: dim / num_heads,
qkv,
proj,
span: tracing::span!(tracing::Level::TRACE, "vit-attn"),
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, n, c) = xs.dims3()?;
let qkv = xs
.apply(&self.qkv)?
.reshape((b, n, 3, self.num_heads, self.head_dim))?
.permute((2, 0, 3, 1, 4))?;
let (q, k, v) = (
qkv.i(0)?.contiguous()?,
qkv.i(1)?.contiguous()?,
qkv.i(2)?.contiguous()?,
);
scaled_dot_product_attention(&q, &k, &v)?
.transpose(1, 2)?
.reshape((b, n, c))?
.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct VitBlock {
attn: Attention,
mlp: Mlp,
norm1: LayerNorm,
norm2: LayerNorm,
span: tracing::Span,
}
impl VitBlock {
fn new(vb: VarBuilder, dim: usize, num_heads: usize, cfg: &VisionConfig) -> Result<Self> {
let attn = Attention::new(vb.pp("attn"), dim, num_heads)?;
let mlp = Mlp::new(vb.pp("mlp"), dim, cfg.hidden_features, dim, cfg.act)?;
let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?;
let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?;
Ok(Self {
attn,
mlp,
norm1,
norm2,
span: tracing::span!(tracing::Level::TRACE, "vit-block"),
})
}
}
impl Module for VitBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let ys = xs.apply(&self.norm1)?.apply(&self.attn)?;
let xs = (xs + &ys)?;
let ys = xs.apply(&self.norm2)?.apply(&self.mlp)?;
let xs = (&xs + &ys)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct VisionTransformer {
patch_embed: LinearPatchEmbedding,
pos_embed: Tensor,
blocks: Vec<VitBlock>,
norm: LayerNorm,
span: tracing::Span,
}
impl VisionTransformer {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let patch_embed = LinearPatchEmbedding::new(vb.pp("patch_embed"))?;
let pos_embed = vb.get((1, cfg.embed_len, cfg.embed_dim), "pos_embed")?;
let blocks = (0..cfg.num_blocks)
.map(|i| {
VitBlock::new(
vb.pp(format!("blocks.{i}")),
cfg.embed_dim,
cfg.num_heads,
cfg,
)
})
.collect::<Result<_>>()?;
let norm = layer_norm(cfg.embed_dim, 1e-5, vb.pp("norm"))?;
Ok(Self {
patch_embed,
pos_embed,
blocks,
norm,
span: tracing::span!(tracing::Level::TRACE, "vit"),
})
}
}
impl Module for VisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = (&xs.apply(&self.patch_embed)? + &self.pos_embed)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?;
}
xs.apply(&self.norm)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
model: VisionTransformer,
}
impl Encoder {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let model = VisionTransformer::new(cfg, vb.pp("model.visual"))?;
Ok(Self { model })
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.model)
}
}
#[derive(Debug, Clone)]
struct Mlp {
fc1: Linear,
act: candle_nn::Activation,
fc2: Linear,
span: tracing::Span,
}
impl Mlp {
fn new(
vb: VarBuilder,
in_features: usize,
hidden_features: usize,
out_features: usize,
act: candle_nn::Activation,
) -> Result<Self> {
let fc1 = linear_b(in_features, hidden_features, true, vb.pp("fc1"))?;
let fc2 = linear_b(hidden_features, out_features, true, vb.pp("fc2"))?;
Ok(Self {
fc1,
act,
fc2,
span: tracing::span!(tracing::Level::TRACE, "mlp"),
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct VisionProjection {
mlp: Mlp,
}
impl VisionProjection {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let mlp = Mlp::new(
vb.pp("mlp"),
cfg.image_embedding_dim,
cfg.hidden_dim,
cfg.model_dim,
cfg.act,
)?;
Ok(Self { mlp })
}
}
impl Module for VisionProjection {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.mlp)
}
}
#[derive(Debug, Clone)]
pub struct VisionEncoder {
encoder: Encoder,
projection: VisionProjection,
}
impl VisionEncoder {
pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let projection = VisionProjection::new(cfg, vb.pp("projection"))?;
Ok(Self {
encoder,
projection,
})
}
}
impl Module for VisionEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, c, hp1, wp2) = xs.dims4()?;
let (p1, p2) = (14, 14);
let h = hp1 / p1;
let w = wp2 / p2;
xs.reshape((b, c, h, p1, h, p2))?
.permute((0, 2, 4, 1, 3, 5))?
.reshape((b, h * w, c * p1 * p2))?
.apply(&self.encoder)?
.apply(&self.projection)
}
}
#[derive(Debug, Clone)]
pub struct Model {
pub text_model: PhiModel,
pub vision_encoder: VisionEncoder,
}
impl Model {
pub fn new(config: &Config, vb: VarBuilder) -> Result<Self> {
let text_model = PhiModel::new_v2(&config.phi_config, vb.pp("text_model"))?;
let vision_encoder = VisionEncoder::new(&config.vision_config, vb.pp("vision_encoder"))?;
Ok(Self {
text_model,
vision_encoder,
})
}
pub fn vision_encoder(&self) -> &VisionEncoder {
&self.vision_encoder
}
pub fn text_model(&mut self) -> &mut PhiModel {
&mut self.text_model
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/based.rs | candle-transformers/src/models/based.rs | //! Based from the Stanford Hazy Research group.
//!
//! See "Simple linear attention language models balance the recall-throughput tradeoff", Arora et al. 2024
//! - Simple linear attention language models balance the recall-throughput tradeoff. [Arxiv](https://arxiv.org/abs/2402.18668)
//! - [GitHub Rep](https://github.com/HazyResearch/based)
//! - [Blogpost](https://hazyresearch.stanford.edu/blog/2024-03-03-based)
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{
conv1d_no_bias, linear, linear_no_bias, ops::softmax_last_dim, rms_norm, Conv1d, Conv1dConfig,
Func, Linear, RmsNorm, VarBuilder,
};
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct LinearAttentionFeatureMapConfig {
input_dim: usize,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct LinearAttentionConfig {
num_heads: usize,
feature_dim: usize,
feature_map: LinearAttentionFeatureMapConfig,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct SlidingWindowAttentionConfig {
num_heads: usize,
window_size: usize,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
vocab_size: usize,
#[serde(rename = "n_embd")]
hidden_size: usize,
#[serde(rename = "n_inner")]
intermediate_size: usize,
#[serde(rename = "n_layer")]
num_hidden_layers: usize,
#[serde(rename = "n_head")]
num_attention_heads: usize,
layer_norm_epsilon: f64,
#[serde(default = "default_rope", rename = "rotary_emb_base")]
rope_theta: f64,
alt_mixer_layers: Vec<usize>,
alt_mixer_2_layers: Vec<usize>,
#[serde(rename = "alt_mixer")]
la: LinearAttentionConfig,
#[serde(rename = "alt_mixer_2")]
swa: SlidingWindowAttentionConfig,
}
fn default_rope() -> f64 {
10_000.0
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
fc1: Linear,
fc2: Linear,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let fc1 = linear_no_bias(cfg.hidden_size, cfg.hidden_size * 4, vb.pp("fc1"))?;
let fc2 = linear_no_bias(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?;
Ok(Self { fc1, fc2 })
}
}
// Swiglu implementation.
// Not using Activation::Swiglu because this has the gate and y arguments switched compared to the version in candle-nn/src/ops.rs
fn swiglu(xs: &Tensor) -> Result<Tensor> {
let xs = xs.chunk(2, D::Minus1)?;
&xs[1].silu()? * &xs[0]
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.fc1)?;
let xs = swiglu(&xs)?;
let xs = xs.apply(&self.fc2)?;
Ok(xs)
}
}
// A gated convolutional block.
#[derive(Debug, Clone)]
struct BasedConv {
in_proj: Linear,
out_proj: Linear,
conv: Conv1d,
state: Tensor,
}
impl BasedConv {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dim = cfg.hidden_size * 2;
let conv1d_cfg = Conv1dConfig {
groups: dim,
padding: 2,
..Default::default()
};
let in_proj = linear(cfg.hidden_size, cfg.hidden_size * 4, vb.pp("in_proj"))?;
let out_proj = linear(dim, cfg.hidden_size, vb.pp("out_proj"))?;
let conv = conv1d_no_bias(dim, dim, 3, conv1d_cfg, vb.pp("conv.conv"))?;
let state = Tensor::zeros((1, dim, 3), vb.dtype(), vb.device())?;
Ok(Self {
in_proj,
out_proj,
conv,
state,
})
}
fn step(&mut self, xs: &Tensor) -> Result<Tensor> {
self.state = self.state.roll(-1, D::Minus1)?;
let (_, _, l) = self.state.dims3()?;
self.state = self.state.narrow(D::Minus1, 0, l - 1)?;
self.state = Tensor::cat(&[&self.state, &xs.transpose(1, 2)?], 2)?;
let xs = (&self.state * self.conv.weight().permute((1, 0, 2))?)?
.sum_keepdim(0)?
.sum(D::Minus1)?;
let xs = xs.unsqueeze(1)?;
Ok(xs)
}
fn forward(&mut self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let xs = xs.apply(&self.in_proj)?;
let us = xs.chunk(2, D::Minus1)?;
let (_b, l, _d) = us[0].dims3()?;
let u_conv = if seqlen_offset > 0 {
self.step(&us[0])?
} else {
let k = std::cmp::min(3, l);
self.state = self.state.narrow(D::Minus1, 0, 3 - k)?;
let xs = us[0].narrow(1, l - k, k)?.transpose(1, 2)?;
self.state = Tensor::cat(&[&self.state, &xs], 2)?;
us[0]
.transpose(1, 2)?
.apply(&self.conv)?
.narrow(D::Minus1, 0, l)?
.transpose(1, 2)?
};
let u_conv = u_conv.silu()?;
let v = u_conv.broadcast_mul(&us[1])?;
let xs = v.apply(&self.out_proj)?;
Ok(xs)
}
}
// Linear attention approximating softmax using second order Taylor polynomials.
#[derive(Debug, Clone)]
struct LinearAttention {
proj_q: Linear,
proj_k: Linear,
proj_v: Linear,
out_proj: Linear,
feature_dim: usize,
num_heads: usize,
input_dim: usize,
k_state: Tensor,
kv_state: Tensor,
}
impl LinearAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let input_dim = cfg.la.feature_map.input_dim;
let out_proj = linear_no_bias(cfg.hidden_size, cfg.hidden_size, vb.pp("out_proj"))?;
let proj_k = linear_no_bias(
cfg.hidden_size,
cfg.la.num_heads * cfg.la.feature_dim,
vb.pp("proj_k"),
)?;
let proj_q = linear_no_bias(
cfg.hidden_size,
cfg.la.num_heads * cfg.la.feature_dim,
vb.pp("proj_q"),
)?;
let proj_v = linear_no_bias(cfg.hidden_size, cfg.hidden_size, vb.pp("proj_v"))?;
let expanded_size = cfg.la.feature_dim.pow(2) + cfg.la.feature_dim + 1;
let k_state = Tensor::zeros(
(1, cfg.la.num_heads, 1, 1, expanded_size),
vb.dtype(),
vb.device(),
)?;
let kv_state = Tensor::zeros(
(1, cfg.la.num_heads, cfg.la.feature_dim, expanded_size),
vb.dtype(),
vb.device(),
)?;
Ok(Self {
proj_q,
proj_k,
proj_v,
out_proj,
feature_dim: cfg.la.feature_dim,
num_heads: cfg.la.num_heads,
input_dim,
k_state,
kv_state,
})
}
fn taylor_expansion(&self) -> Result<Func<'static>> {
let r2 = std::f64::consts::SQRT_2;
let rd = (self.input_dim as f64).sqrt();
let rrd = rd.sqrt();
Ok(Func::new(move |xs| {
let dims = xs.dims();
let mut d = dims.to_vec();
if let Some(last) = d.last_mut() {
*last = 1;
};
let x = xs
.unsqueeze(D::Minus1)?
.broadcast_mul(&xs.unsqueeze(D::Minus2)?)?;
let x = (x.flatten_from(D::Minus2)? / r2)?;
let o = Tensor::ones(d, xs.dtype(), xs.device())?;
let x = Tensor::cat(&[o, (xs / rrd)?, (&x / rd)?], D::Minus1)?;
Ok(x)
}))
}
fn forward(&mut self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let eps = 1e-12;
let feature_map = self.taylor_expansion()?;
let (b, l, d) = xs.dims3()?;
let q = xs.apply(&self.proj_q)?;
let k = xs.apply(&self.proj_k)?;
let v = xs.apply(&self.proj_v)?;
let q = q
.reshape((b, l, self.num_heads, self.feature_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b, l, self.num_heads, self.feature_dim))?
.transpose(1, 2)?
.contiguous()?;
let v = v
.reshape((b, l, self.num_heads, d / self.num_heads))?
.transpose(1, 2)?
.contiguous()?;
let q = feature_map.forward(&q)?;
let k = feature_map.forward(&k)?;
let y = if seqlen_offset > 0 {
let (_b, _h, l, _d) = k.dims4()?;
let q = q.unsqueeze(D::Minus2)?;
let k = k.unsqueeze(D::Minus2)?;
let v = v.unsqueeze(D::Minus1)?;
let kn = k.narrow(D::Minus1, l - 1, 1)?;
let vn = v.narrow(D::Minus1, l - 1, 1)?;
self.k_state = self.k_state.broadcast_add(&kn)?;
self.kv_state = self.kv_state.broadcast_add(&kn.broadcast_mul(&vn)?)?;
let num = q.broadcast_mul(&self.kv_state)?.sum(D::Minus1)?;
let den = (q.broadcast_mul(&self.k_state)?.sum(D::Minus1)? + eps)?;
num.broadcast_div(&den)?
} else {
self.k_state = k.sum(2)?.unsqueeze(2)?.unsqueeze(3)?;
self.kv_state = k
.transpose(2, 3)?
.matmul(&v)?
.transpose(2, 3)?
.unsqueeze(2)?;
let aqk = q.matmul(&k.transpose(D::Minus1, D::Minus2)?)?;
let tril = Tensor::tril2(l, aqk.dtype(), aqk.device())?;
let aqk = aqk.broadcast_mul(&tril)?.matmul(&v)?;
let z = (1f64 / (q.mul(&k.cumsum(2)?)?.sum(D::Minus1)? + eps)?)?;
aqk.broadcast_mul(&z.unsqueeze(D::Minus1)?)?
};
let (b, h, l, d) = y.dims4()?;
let y = y.permute((0, 2, 1, 3))?.reshape((b, l, h * d))?;
let y = self.out_proj.forward(&y)?;
Ok(y)
}
}
// Rotary embeddings used in local attention.
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = 2048; // Hardcoded, missing from config.
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
// Local attention using a small sliding window.
#[derive(Debug, Clone)]
struct SlidingWindowAttention {
wqkv: Linear,
out_proj: Linear,
num_heads: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl SlidingWindowAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let num_heads = cfg.swa.num_heads;
let head_dim = hidden_size / num_heads;
let out_proj = linear_no_bias(hidden_size, hidden_size, vb.pp("out_proj"))?;
let wqkv = linear_no_bias(hidden_size, hidden_size * 3, vb.pp("Wqkv"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
Ok(Self {
wqkv,
out_proj,
hidden_size,
num_heads,
head_dim,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let qkv = xs.apply(&self.wqkv)?;
let qkv = qkv.reshape((b_sz, q_len, 3, (), self.head_dim))?;
let q = qkv.i((.., .., 0))?;
let k = qkv.i((.., .., 1))?;
let v = qkv.i((.., .., 2))?;
let q = q
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let (q, k) = self
.rotary_emb
.apply_rotary_emb_qkv(&q, &k, seqlen_offset)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 2)?;
let v = Tensor::cat(&[prev_v, &v], 2)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
let out = attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.out_proj)?;
Ok(out)
}
}
// The model layers use three types of mixers.
#[derive(Debug, Clone)]
enum SequenceMixer {
Based(BasedConv),
Linear(LinearAttention),
Sliding(SlidingWindowAttention),
}
impl SequenceMixer {
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
match self {
Self::Based(b) => b.forward(xs, pos),
Self::Linear(b) => b.forward(xs, pos),
Self::Sliding(b) => b.forward(xs, attention_mask, pos),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
mlp: MLP,
norm1: RmsNorm,
norm2: RmsNorm,
mixer: SequenceMixer,
}
impl DecoderLayer {
fn new(layer_idx: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let norm1 = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("norm1"))?;
let norm2 = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("norm2"))?;
let l_attn = cfg.alt_mixer_layers.contains(&layer_idx);
let sw_attn = cfg.alt_mixer_2_layers.contains(&layer_idx);
let mixer = if l_attn {
SequenceMixer::Linear(LinearAttention::new(cfg, vb.pp("mixer"))?)
} else if sw_attn {
SequenceMixer::Sliding(SlidingWindowAttention::new(cfg, vb.pp("mixer"))?)
} else {
SequenceMixer::Based(BasedConv::new(cfg, vb.pp("mixer"))?)
};
Ok(Self {
mlp,
norm1,
norm2,
mixer,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.norm1.forward(xs)?;
let xs = self.mixer.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: super::with_tracing::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vocab_size = cfg.vocab_size + (8 - cfg.vocab_size % 8) % 8;
let lm_head = linear_no_bias(cfg.hidden_size, vocab_size, vb.pp("lm_head"))?;
let embed_tokens = super::with_tracing::Embedding::from_weights(lm_head.weight().clone())?;
let vb_m = vb.pp("transformer");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(layer_idx, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb_m.pp("ln_f"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.swa.window_size,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let sliding_window = self.sliding_window / 2;
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mobileone.rs | candle-transformers/src/models/mobileone.rs | //! # MobileOne
//!
//! MobileOne inference implementation based on timm and candle-repvgg
//!
//! See ["MobileOne: An Improved One millisecond Mobile Backbone"](https://arxiv.org/abs/2206.04040)
use candle::{DType, Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, linear, ops::sigmoid, BatchNorm, Conv2d, Conv2dConfig,
Func, VarBuilder,
};
struct StageConfig {
blocks: usize,
channels: usize,
}
// The architecture in the paper has 6 stages. The timm implementation uses an equivalent form
// by concatenating the 5th stage (starts with stride 1) to the previous one.
const STAGES: [StageConfig; 5] = [
StageConfig {
blocks: 1,
channels: 64,
},
StageConfig {
blocks: 2,
channels: 64,
},
StageConfig {
blocks: 8,
channels: 128,
},
StageConfig {
blocks: 10,
channels: 256,
},
StageConfig {
blocks: 1,
channels: 512,
},
];
#[derive(Clone)]
pub struct Config {
/// overparameterization factor
k: usize,
/// per-stage channel number multipliers
alphas: [f32; 5],
}
impl Config {
pub fn s0() -> Self {
Self {
k: 4,
alphas: [0.75, 0.75, 1.0, 1.0, 2.0],
}
}
pub fn s1() -> Self {
Self {
k: 1,
alphas: [1.5, 1.5, 1.5, 2.0, 2.5],
}
}
pub fn s2() -> Self {
Self {
k: 1,
alphas: [1.5, 1.5, 2.0, 2.5, 4.0],
}
}
pub fn s3() -> Self {
Self {
k: 1,
alphas: [2.0, 2.0, 2.5, 3.0, 4.0],
}
}
pub fn s4() -> Self {
Self {
k: 1,
alphas: [3.0, 3.0, 3.5, 3.5, 4.0],
}
}
}
// SE blocks are used in the last stages of the s4 variant.
fn squeeze_and_excitation(
in_channels: usize,
squeeze_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let fc1 = conv2d(in_channels, squeeze_channels, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(squeeze_channels, in_channels, 1, conv2d_cfg, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let residual = xs;
let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?;
let xs = sigmoid(&xs.apply(&fc1)?.relu()?.apply(&fc2)?)?;
residual.broadcast_mul(&xs)
}))
}
// fuses a convolutional kernel and a batchnorm layer into a convolutional layer
// based on the _fuse_bn_tensor method in timm
// see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602
fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> {
let (gamma, beta) = bn.weight_and_bias().unwrap();
let mu = bn.running_mean();
let sigma = (bn.running_var() + bn.eps())?.sqrt();
let gps = (gamma / sigma)?;
let bias = (beta - mu * &gps)?;
let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?;
Ok((weights, bias))
}
// A mobileone block has a different training time and inference time architecture.
// The latter is a simple and efficient equivalent transformation of the former
// realized by a structural reparameterization technique, where convolutions
// along with identity branches and batchnorm layers are fused into a single convolution.
#[allow(clippy::too_many_arguments)]
fn mobileone_block(
has_identity: bool,
k: usize,
dim: usize,
stride: usize,
padding: usize,
groups: usize,
kernel: usize,
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding,
groups,
..Default::default()
};
let mut w = Tensor::zeros(
(out_channels, in_channels / groups, kernel, kernel),
DType::F32,
vb.device(),
)?;
let mut b = Tensor::zeros(dim, DType::F32, vb.device())?;
// k is the training-time overparameterization factor, larger than 1 only in the s0 variant
for i in 0..k {
let conv_kxk_bn = batch_norm(dim, 1e-5, vb.pp(format!("conv_kxk.{i}.bn")))?;
let conv_kxk = conv2d_no_bias(
in_channels,
out_channels,
kernel,
conv2d_cfg,
vb.pp(format!("conv_kxk.{i}.conv")),
)?;
let (wk, bk) = fuse_conv_bn(conv_kxk.weight(), conv_kxk_bn)?;
w = (w + wk)?;
b = (b + bk)?;
}
if kernel > 1 {
let conv_scale_bn = batch_norm(dim, 1e-5, vb.pp("conv_scale.bn"))?;
let conv_scale = conv2d_no_bias(
in_channels,
out_channels,
1,
conv2d_cfg,
vb.pp("conv_scale.conv"),
)?;
let (mut ws, bs) = fuse_conv_bn(conv_scale.weight(), conv_scale_bn)?;
// resize to 3x3
ws = ws.pad_with_zeros(D::Minus1, 1, 1)?;
ws = ws.pad_with_zeros(D::Minus2, 1, 1)?;
w = (w + ws)?;
b = (b + bs)?;
}
// Use SE blocks if present (last layers of the s4 variant)
let se = squeeze_and_excitation(out_channels, out_channels / 16, vb.pp("attn"));
// read and reparameterize the identity bn into wi and bi
if has_identity {
let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"))?;
let mut weights: Vec<f32> = vec![0.0; w.elem_count()];
let id = in_channels / groups;
// See https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L809
for i in 0..in_channels {
if kernel > 1 {
weights[i * kernel * kernel + 4] = 1.0;
} else {
weights[i * (id + 1)] = 1.0;
}
}
let weights = &Tensor::from_vec(weights, w.shape(), w.device())?;
let (wi, bi) = fuse_conv_bn(weights, identity_bn)?;
w = (w + wi)?;
b = (b + bi)?;
}
let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg);
Ok(Func::new(move |xs| {
let mut xs = xs.apply(&reparam_conv)?;
if let Ok(f) = &se {
xs = xs.apply(f)?;
}
xs = xs.relu()?;
Ok(xs)
}))
}
// Get the number of output channels per stage taking into account the multipliers
fn output_channels_per_stage(cfg: &Config, stage: usize) -> usize {
let channels = STAGES[stage].channels as f32;
let alpha = cfg.alphas[stage];
match stage {
0 => std::cmp::min(64, (channels * alpha) as usize),
_ => (channels * alpha) as usize,
}
}
// Each stage is made of blocks. The first layer always downsamples with stride 2.
// All but the first block have a residual connection.
fn mobileone_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = STAGES[idx].blocks;
let mut blocks = Vec::with_capacity(nblocks);
let mut in_channels = output_channels_per_stage(cfg, idx - 1);
for block_idx in 0..nblocks {
let out_channels = output_channels_per_stage(cfg, idx);
let (has_identity, stride) = if block_idx == 0 {
(false, 2)
} else {
(true, 1)
};
// depthwise convolution layer
blocks.push(mobileone_block(
has_identity,
cfg.k,
in_channels,
stride,
1,
in_channels,
3,
in_channels,
in_channels,
vb.pp(block_idx * 2),
)?);
// pointwise convolution layer
blocks.push(mobileone_block(
has_identity,
cfg.k,
out_channels,
1, // stride
0, // padding
1, // groups
1, // kernel
in_channels,
out_channels,
vb.pp(block_idx * 2 + 1),
)?);
in_channels = out_channels;
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
// Build a mobileone model for a given configuration.
fn mobileone_model(
config: &Config,
nclasses: Option<usize>,
vb: VarBuilder,
) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = output_channels_per_stage(config, 4);
let linear = linear(outputs, nclasses, vb.pp("head.fc"))?;
Some(linear)
}
};
let stem_dim = output_channels_per_stage(config, 0);
let stem = mobileone_block(false, 1, stem_dim, 2, 1, 1, 3, 3, stem_dim, vb.pp("stem"))?;
let vb = vb.pp("stages");
let stage1 = mobileone_stage(config, 1, vb.pp(0))?;
let stage2 = mobileone_stage(config, 2, vb.pp(1))?;
let stage3 = mobileone_stage(config, 3, vb.pp(2))?;
let stage4 = mobileone_stage(config, 4, vb.pp(3))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.apply(&stage4)?
.mean(D::Minus2)?
.mean(D::Minus1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn mobileone(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
mobileone_model(cfg, Some(nclasses), vb)
}
pub fn mobileone_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
mobileone_model(cfg, None, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/t5.rs | candle-transformers/src/models/t5.rs | //! T5 model implementation.
//!
//! T5 (Text-to-Text Transfer Transformer) is a unified text-to-text transformer model.
//! This implementation follows the original model architecture.
//!
//! Key characteristics:
//! - Text-to-text framework
//! - Relative positional embeddings
//! - T5-specific layer normalization
//! - Encoder-decoder architecture
//! - Support for sequence-to-sequence tasks
//!
//! References:
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm)
//! - 💻[GH Model](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py)
//! - 🤗 [HF Link](https://huggingface.co/docs/transformers/model_doc/t5)
//! - 📝 [T5 Paper](https://arxiv.org/abs/1910.10683)
//!
//! # Encoder-decoder example:
//!
//! ```bash
//! cargo run --example t5 --release -- \
//! --model-id "t5-small" \
//! --prompt "translate to German: A beautiful candle." \
//! --decode
//! > ...
//! > Eine schöne Kerze.
//! > 9 tokens generated (2.42 token/s)
//! ```
//!
//! Variants such as [flan-t5](https://huggingface.co/google/flan-t5-small), [flan-ul2](https://huggingface.co/google/flan-ul2) (with `--revision "refs/pr/25"`), and [Co-EdIT](https://huggingface.co/grammarly/coedit-large) are also supported.
//!
//! # Translation with MADLAD
//!
//!
//! [MADLAD-400](https://arxiv.org/abs/2309.04662) is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models.
//!
//! ```bash
//! cargo run --example t5 --release -- \
//! --model-id "jbochi/madlad400-3b-mt" \
//! --prompt "<2de> How are you, my friend?" \
//! --decode --temperature 0
//! ...
//! Wie geht es dir, mein Freund?
//! ```
//!
//! ## Sentence embedding example
//!
//! ```bash
//! cargo run --example t5 --release -- \
//! --model-id "t5-small" --prompt "A beautiful candle."
//! ...
//! [[[ 0.0515, -0.0541, -0.0761, ..., -0.0392, 0.1511, -0.0265],
//! [-0.0974, 0.0998, -0.1659, ..., -0.2450, 0.1738, -0.0164],
//! [ 0.0624, -0.1024, 0.0430, ..., -0.1388, 0.0564, -0.2962],
//! [-0.0389, -0.1173, 0.0026, ..., 0.1064, -0.1065, 0.0990],
//! [ 0.1300, 0.0027, -0.0326, ..., 0.0026, -0.0317, 0.0851]]]
//! Tensor[[1, 5, 512], f32]
//! Took 303.766583ms
//! ```
use crate::models::with_tracing::Embedding;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use serde::Deserialize;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct Linear {
weight: Tensor,
span: tracing::Span,
}
pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> {
let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL;
let weight = vb.get_with_hints((d2, d1), "weight", init_ws)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { weight, span })
}
impl Module for Linear {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let weight = self.weight.to_dtype(xs.dtype())?;
let w = match *xs.dims() {
[b1, b2, _, _] => weight.broadcast_left((b1, b2))?.t()?,
[bsize, _, _] => weight.broadcast_left(bsize)?.t()?,
_ => weight.t()?,
};
xs.matmul(&w)
}
}
fn default_relative_attention_max_distance() -> usize {
128
}
fn default_is_decoder() -> bool {
false
}
fn default_use_cache() -> bool {
true
}
fn default_tie_word_embeddings() -> bool {
true
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Deserialize, Default, Clone, PartialEq)]
pub struct ActivationWithOptionalGating {
pub gated: bool,
pub activation: candle_nn::Activation,
}
pub fn deserialize_feed_forward_proj_activation<'de, D>(
deserializer: D,
) -> std::result::Result<ActivationWithOptionalGating, D::Error>
where
D: serde::de::Deserializer<'de>,
{
match String::deserialize(deserializer)?.as_str() {
"gated-gelu" => Ok(ActivationWithOptionalGating {
gated: true,
activation: candle_nn::Activation::NewGelu,
}),
"gated-silu" => Ok(ActivationWithOptionalGating {
gated: true,
activation: candle_nn::Activation::Silu,
}),
buf => {
let activation = serde_plain::from_str(buf).map_err(serde::de::Error::custom)?;
Ok(ActivationWithOptionalGating {
gated: false,
activation,
})
}
}
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub d_model: usize,
pub d_kv: usize,
pub d_ff: usize,
pub num_layers: usize,
pub num_decoder_layers: Option<usize>,
pub num_heads: usize,
pub relative_attention_num_buckets: usize,
#[serde(default = "default_relative_attention_max_distance")]
pub relative_attention_max_distance: usize,
pub dropout_rate: f64,
pub layer_norm_epsilon: f64,
pub initializer_factor: f64,
#[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")]
pub feed_forward_proj: ActivationWithOptionalGating,
#[serde(default = "default_tie_word_embeddings")]
pub tie_word_embeddings: bool,
#[serde(default = "default_is_decoder")]
pub is_decoder: bool,
pub is_encoder_decoder: bool,
#[serde(default = "default_use_cache")]
pub use_cache: bool,
pub pad_token_id: usize,
pub eos_token_id: usize,
pub decoder_start_token_id: Option<usize>,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 32128,
d_model: 512,
d_kv: 64,
d_ff: 2048,
num_layers: 6,
num_decoder_layers: None,
num_heads: 8,
relative_attention_num_buckets: 32,
relative_attention_max_distance: 128,
dropout_rate: 0.1,
layer_norm_epsilon: 1e-6,
initializer_factor: 1.0,
feed_forward_proj: ActivationWithOptionalGating {
gated: false,
activation: Activation::Relu,
},
tie_word_embeddings: true,
is_decoder: false,
is_encoder_decoder: true,
use_cache: true,
pad_token_id: 0,
eos_token_id: 1,
decoder_start_token_id: Some(0),
}
}
}
impl Config {
// https://huggingface.co/facebook/musicgen-small/blob/495da4ad086b3416a27c6187f9239f9fd96f3962/config.json#L184
pub fn musicgen_small() -> Self {
Self {
d_ff: 3072,
d_kv: 64,
d_model: 768,
dropout_rate: 0.1,
eos_token_id: 1,
feed_forward_proj: ActivationWithOptionalGating {
gated: false,
activation: Activation::Relu,
},
tie_word_embeddings: true,
initializer_factor: 1.0,
is_decoder: false,
is_encoder_decoder: true,
layer_norm_epsilon: 1e-6,
num_decoder_layers: Some(12),
num_heads: 12,
num_layers: 12,
pad_token_id: 0,
decoder_start_token_id: Some(0),
relative_attention_max_distance: 128,
relative_attention_num_buckets: 32,
use_cache: true,
vocab_size: 32128,
}
}
}
#[derive(Debug, Clone)]
struct T5LayerNorm {
weight: Tensor,
variance_epsilon: f64,
span: tracing::Span,
}
impl T5LayerNorm {
fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(h, "weight")?;
Ok(Self {
weight,
variance_epsilon: eps,
span: tracing::span!(tracing::Level::TRACE, "layer-norm"),
})
}
}
impl Module for T5LayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let dtype = xs.dtype();
let xs_f32 = xs.to_dtype(DType::F32)?;
// variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?;
let xs = xs_f32.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?;
let xs = xs.to_dtype(dtype)?;
let xs = xs.broadcast_mul(&self.weight.to_dtype(dtype)?)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5DenseActDense {
wi: Linear,
wo: Linear,
act: Activation,
span: tracing::Span,
}
impl T5DenseActDense {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wi = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi"))?;
let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
Ok(Self {
wi,
wo,
act: Activation::Relu,
span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"),
})
}
}
impl Module for T5DenseActDense {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.wi.forward(xs)?;
let xs = self.act.forward(&xs)?;
let xs = self.wo.forward(&xs)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5DenseGatedActDense {
wi_0: Linear,
wi_1: Linear,
wo: Linear,
act: Activation,
span: tracing::Span,
}
impl T5DenseGatedActDense {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wi_0 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?;
let wi_1 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?;
let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
Ok(Self {
wi_0,
wi_1,
wo,
act: cfg.feed_forward_proj.activation,
span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"),
})
}
}
impl Module for T5DenseGatedActDense {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?;
let hidden_linear = self.wi_1.forward(xs)?;
let xs = hidden_gelu.broadcast_mul(&hidden_linear)?;
let xs = self.wo.forward(&xs)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5LayerFF {
dense_act: Option<T5DenseActDense>,
gated_dense_act: Option<T5DenseGatedActDense>,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerFF {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated {
(
None,
Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?),
)
} else {
(
Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?),
None,
)
};
Ok(Self {
dense_act,
gated_dense_act,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "layer-ff"),
})
}
}
impl Module for T5LayerFF {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let ys = self.layer_norm.forward(xs)?;
let ys = match &self.dense_act {
Some(dense_act) => dense_act.forward(&ys)?,
None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?,
};
let xs = (xs + ys)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5Attention {
q: Linear,
k: Linear,
v: Linear,
o: Linear,
n_heads: usize,
d_kv: usize,
relative_attention_bias: Option<Embedding>,
relative_attention_num_buckets: usize,
relative_attention_max_distance: usize,
inner_dim: usize,
use_cache: bool,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
span_cache: tracing::Span,
span_mm: tracing::Span,
span_sm: tracing::Span,
}
impl T5Attention {
fn load(
has_relative_attention_bias: bool,
decoder: bool,
vb: VarBuilder,
cfg: &Config,
) -> Result<Self> {
let inner_dim = cfg.num_heads * cfg.d_kv;
let q = linear_no_bias(cfg.d_model, inner_dim, vb.pp("q"))?;
let k = linear_no_bias(cfg.d_model, inner_dim, vb.pp("k"))?;
let v = linear_no_bias(cfg.d_model, inner_dim, vb.pp("v"))?;
let o = linear_no_bias(inner_dim, cfg.d_model, vb.pp("o"))?;
let relative_attention_bias = if has_relative_attention_bias {
let emb = Embedding::new(
cfg.relative_attention_num_buckets,
cfg.num_heads,
vb.pp("relative_attention_bias"),
)?;
Some(emb)
} else {
None
};
Ok(Self {
q,
k,
v,
o,
n_heads: cfg.num_heads,
d_kv: cfg.d_kv,
relative_attention_bias,
relative_attention_num_buckets: cfg.relative_attention_num_buckets,
relative_attention_max_distance: cfg.relative_attention_max_distance,
inner_dim,
use_cache: cfg.use_cache && decoder,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "attention"),
span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"),
span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"),
span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
key_value_states: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
// Performs Self-attention (if key_value_states is None) or attention
// over source sentence (provided by key_value_states).
let _enter = self.span.enter();
let kv_input = match key_value_states {
None => xs,
Some(key_value_states) => key_value_states,
};
let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?);
let kv_len = kv_input.dim(1)?;
let q = self.q.forward(xs)?;
let k = self.k.forward(kv_input)?;
let v = self.v.forward(kv_input)?;
let q = q
.reshape((b_sz, q_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?
.contiguous()?;
let mut k = k
.reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?;
let mut v = v
.reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?;
if self.use_cache && key_value_states.is_none() {
let _enter = self.span_cache.enter();
if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache {
k = Tensor::cat(&[kv_cache_k, &k], 2)?;
v = Tensor::cat(&[kv_cache_v, &v], 2)?;
};
self.kv_cache = Some((k.clone(), v.clone()));
};
let k = k.contiguous()?;
let v = v.contiguous()?;
// TODO: Use flash_attn.
let scores = {
let _enter = self.span_mm.enter();
q.matmul(&k.t()?)?
};
let scores = match mask {
None => scores,
Some(mask) => masked_fill(
&scores,
&mask
.unsqueeze(0)?
.unsqueeze(0)?
.repeat((b_sz, self.n_heads))?,
f32::NEG_INFINITY,
)?,
};
let (scores, position_bias) = match position_bias {
Some(position_bias) => (
scores.broadcast_add(position_bias)?,
Some(position_bias.clone()),
),
None => match &self.relative_attention_bias {
None => (scores, None),
Some(relative_attention_bias) => {
// This only handles the bidirectional case.
let kv_len = k.dim(2)?;
let (q_start, q_end) = match self.use_cache {
true => ((kv_len - q_len) as u32, kv_len as u32),
false => (0_u32, kv_len as u32),
};
let num_buckets = self.relative_attention_num_buckets as u32 / 2;
let max_exact = num_buckets / 2;
let relative_position = (q_start..q_end)
.map(|i| {
(0..kv_len as u32)
.map(|j| {
if i < j {
if j - i < max_exact {
j - i + num_buckets
} else {
let b = f32::log(
(j - i) as f32 / max_exact as f32,
self.relative_attention_max_distance as f32
/ max_exact as f32,
) * (num_buckets - max_exact) as f32;
u32::min(
max_exact + num_buckets + b as u32,
self.relative_attention_num_buckets as u32 - 1,
)
}
} else if i - j < max_exact {
i - j
} else {
let b = f32::log(
(i - j) as f32 / max_exact as f32,
self.relative_attention_max_distance as f32
/ max_exact as f32,
) * (num_buckets - max_exact) as f32;
u32::min(max_exact + b as u32, num_buckets - 1)
}
})
.collect::<Vec<u32>>()
})
.collect::<Vec<Vec<_>>>();
let relative_buckets = Tensor::new(relative_position, q.device())?;
let position_bias = relative_attention_bias
.forward(&relative_buckets)?
.permute((2, 0, 1))?
.unsqueeze(0)?
.to_dtype(scores.dtype())?;
(scores.broadcast_add(&position_bias)?, Some(position_bias))
// TODO: position_bias_masked?
}
},
};
let attn_weights = {
let _enter = self.span_sm.enter();
candle_nn::ops::softmax_last_dim(&scores)?
};
let attn_output = attn_weights.matmul(&v)?;
let attn_output = attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.inner_dim))?;
let attn_output = self.o.forward(&attn_output)?;
Ok((attn_output, position_bias))
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct T5LayerSelfAttention {
self_attention: T5Attention,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerSelfAttention {
fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> {
let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?;
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
Ok(Self {
self_attention,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
let normed_xs = self.layer_norm.forward(xs)?;
let (ys, position_bias) =
self.self_attention
.forward(&normed_xs, position_bias, None, mask)?;
let ys = (xs + ys)?;
Ok((ys, position_bias))
}
fn clear_kv_cache(&mut self) {
self.self_attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
struct T5LayerCrossAttention {
cross_attention: T5Attention,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerCrossAttention {
fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> {
let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?;
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
Ok(Self {
cross_attention,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "cross-attn"),
})
}
fn forward(
&mut self,
hidden_states: &Tensor,
position_bias: Option<&Tensor>,
key_value_states: &Tensor,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
let normed_hidden_states = self.layer_norm.forward(hidden_states)?;
let (ys, position_bias) = self.cross_attention.forward(
&normed_hidden_states,
position_bias,
Some(key_value_states),
None,
)?;
let ys = (hidden_states + ys)?;
Ok((ys, position_bias))
}
fn clear_kv_cache(&mut self) {
self.cross_attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
struct T5Block {
self_attn: T5LayerSelfAttention,
cross_attn: Option<T5LayerCrossAttention>,
ff: T5LayerFF,
span: tracing::Span,
}
impl T5Block {
fn load(
has_relative_attention_bias: bool,
decoder: bool,
vb: VarBuilder,
cfg: &Config,
) -> Result<Self> {
let vb = vb.pp("layer");
let self_attn =
T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?;
let cross_attn = if cfg.is_decoder {
Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?)
} else {
None
};
let ff_i = if cross_attn.is_some() { 2 } else { 1 };
let ff = T5LayerFF::load(vb.pp(ff_i.to_string()), cfg)?;
Ok(Self {
self_attn,
cross_attn,
ff,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
encoder_hidden_states: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
// TODO: Cache masks
let mask = match self.cross_attn.is_some() {
true => {
let mask_len = xs.dim(1)?;
// If the input seq length is 1, no need for a mask, this is also helpful to avoid shape
// issues when using the KV cache in the decoder.
if mask_len <= 1 {
None
} else {
Some(get_mask(mask_len, xs.device())?)
}
}
false => None,
};
let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?;
// TODO: clamp for f16?
if let Some(cross_attn) = &mut self.cross_attn {
(xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?;
// TODO: clamp for f16?
}
let xs = self.ff.forward(&xs)?;
// TODO: clamp for f16?
Ok((xs, position_bias))
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache());
}
}
#[derive(Debug, Clone)]
struct T5Stack {
block: Vec<T5Block>,
shared: Arc<Embedding>,
final_layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5Stack {
fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> {
let block = (0..cfg.num_layers)
.map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg))
.collect::<Result<Vec<_>>>()?;
let final_layer_norm = T5LayerNorm::load(
cfg.d_model,
cfg.layer_norm_epsilon,
vb.pp("final_layer_norm"),
)?;
Ok(Self {
block,
shared: shared.clone(),
final_layer_norm,
span: tracing::span!(tracing::Level::TRACE, "stack"),
})
}
fn forward(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: Option<&Tensor>,
) -> Result<Tensor> {
self.forward_dt(input_ids, encoder_hidden_states, None)
}
fn forward_dt(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: Option<&Tensor>,
dtype: Option<DType>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let input_embeds = self.shared.as_ref().forward(input_ids)?;
let input_embeds = match dtype {
None => input_embeds,
Some(dtype) => input_embeds.to_dtype(dtype)?,
};
let mut hidden_states = input_embeds;
let mut position_bias = None;
for block in self.block.iter_mut() {
(hidden_states, position_bias) = block.forward(
&hidden_states,
position_bias.as_ref(),
encoder_hidden_states,
)?
}
self.final_layer_norm.forward(&hidden_states)
}
fn clear_kv_cache(&mut self) {
self.block.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
#[derive(Debug, Clone)]
pub struct T5EncoderModel {
encoder: T5Stack,
device: Device,
span: tracing::Span,
}
impl T5EncoderModel {
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let shared_vb = if vb.contains_tensor("shared.weight") {
vb.pp("shared")
} else if vb.contains_tensor("decoder.embed_tokens") {
vb.pp("decoder").pp("embed_tokens")
} else {
vb.pp("encoder").pp("embed_tokens")
};
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?;
let shared = Arc::new(shared);
let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?;
Ok(Self {
encoder,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "encoder"),
})
}
pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.encoder.forward(input_ids, None)
}
pub fn forward_dt(&mut self, input_ids: &Tensor, dtype: Option<DType>) -> Result<Tensor> {
let _enter = self.span.enter();
self.encoder.forward_dt(input_ids, None, dtype)
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn clear_kv_cache(&mut self) {
self.encoder.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct T5ForConditionalGeneration {
encoder: T5Stack,
decoder: T5Stack,
d_model: usize,
tie_word_embeddings: bool,
lm_head: Option<Linear>,
shared: Arc<Embedding>,
device: Device,
span_decode: tracing::Span,
span_decode_head: tracing::Span,
}
impl T5ForConditionalGeneration {
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
assert!(cfg.is_encoder_decoder);
let d_model = cfg.d_model;
let shared_vb = if vb.contains_tensor("shared.weight") {
vb.pp("shared")
} else {
vb.pp("decoder").pp("embed_tokens")
};
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?;
let shared = Arc::new(shared);
let mut encoder_cfg = cfg.clone();
encoder_cfg.is_decoder = false;
encoder_cfg.use_cache = false;
encoder_cfg.is_encoder_decoder = false;
let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?;
let mut decoder_cfg = cfg.clone();
decoder_cfg.is_decoder = true;
decoder_cfg.is_encoder_decoder = false;
decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers);
let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?;
let tie_word_embeddings = cfg.tie_word_embeddings;
let lm_head = if tie_word_embeddings {
None
} else {
Some(linear_no_bias(
cfg.d_model,
cfg.vocab_size,
vb.pp("lm_head"),
)?)
};
Ok(Self {
encoder,
decoder,
d_model,
tie_word_embeddings,
lm_head,
shared,
device: vb.device().clone(),
span_decode: tracing::span!(tracing::Level::TRACE, "decode"),
span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"),
})
}
pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> {
self.encoder.forward(input_ids, None)
}
pub fn decode(
&mut self,
decoder_input_ids: &Tensor,
encoder_output: &Tensor,
) -> Result<Tensor> {
let _enter = self.span_decode.enter();
let decoder_output = self
.decoder
.forward(decoder_input_ids, Some(encoder_output))?;
let scaling_factor = if self.tie_word_embeddings {
// Rescale output before projecting on vocab
// See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
(self.d_model as f64).sqrt()
} else {
1.0
};
let sequence_output = ((decoder_output
.narrow(1, decoder_output.dim(1)? - 1, 1)?
.squeeze(1)?)
* scaling_factor)?;
let output = {
let _enter = self.span_decode_head.enter();
match self.lm_head {
None => sequence_output.matmul(&self.shared.embeddings().t()?)?,
Some(ref lm_head) => lm_head.forward(&sequence_output)?,
}
};
Ok(output)
}
pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> {
let encoder_output = self.encode(input_ids)?;
self.decode(decoder_input_ids, &encoder_output)
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn clear_kv_cache(&mut self) {
self.encoder.clear_kv_cache();
self.decoder.clear_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/encodec.rs | candle-transformers/src/models/encodec.rs | //! EnCodec neural audio codec based on the Encodec implementation.
//!
//! See ["High Fidelity Neural Audio Compression"](https://arxiv.org/abs/2210.13438)
//!
//! Based on implementation from [huggingface/transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py)
use candle::{DType, IndexOp, Layout, Module, Result, Shape, Tensor, D};
use candle_nn::{conv1d, Conv1d, ConvTranspose1d, VarBuilder};
// Encodec Model
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)]
pub enum NormType {
WeightNorm,
TimeGroupNorm,
None,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)]
pub enum PadMode {
Constant,
Reflect,
Replicate,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub target_bandwidths: Vec<f64>,
pub sampling_rate: usize,
pub audio_channels: usize,
pub normalize: bool,
pub chunk_length_s: Option<usize>,
pub overlap: Option<usize>,
pub hidden_size: usize,
pub num_filters: usize,
pub num_residual_layers: usize,
pub upsampling_ratios: Vec<usize>,
pub norm_type: NormType,
pub kernel_size: usize,
pub last_kernel_size: usize,
pub residual_kernel_size: usize,
pub dilation_growth_rate: usize,
pub use_causal_conv: bool,
pub pad_mode: PadMode,
pub compress: usize,
pub num_lstm_layers: usize,
pub trim_right_ratio: f64,
pub codebook_size: usize,
pub codebook_dim: Option<usize>,
pub use_conv_shortcut: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
target_bandwidths: vec![1.5, 3.0, 6.0, 12.0, 24.0],
sampling_rate: 24_000,
audio_channels: 1,
normalize: false,
chunk_length_s: None,
overlap: None,
hidden_size: 128,
num_filters: 32,
num_residual_layers: 1,
upsampling_ratios: vec![8, 5, 4, 2],
norm_type: NormType::WeightNorm,
kernel_size: 7,
last_kernel_size: 7,
residual_kernel_size: 3,
dilation_growth_rate: 2,
use_causal_conv: true,
// This should be PadMode::Reflect which is currently unsupported in candle.
pad_mode: PadMode::Replicate,
compress: 2,
num_lstm_layers: 2,
trim_right_ratio: 1.0,
codebook_size: 1024,
codebook_dim: None,
use_conv_shortcut: true,
}
}
}
impl Config {
fn codebook_dim(&self) -> usize {
self.codebook_dim.unwrap_or(self.hidden_size)
}
fn frame_rate(&self) -> usize {
let hop_length: usize = self.upsampling_ratios.iter().product();
self.sampling_rate.div_ceil(hop_length)
}
fn num_quantizers(&self) -> usize {
let num = 1000f64
* self
.target_bandwidths
.last()
.expect("empty target_bandwidths");
(num as usize) / (self.frame_rate() * 10)
}
}
fn get_extra_padding_for_conv1d(
xs: &Tensor,
k_size: usize,
stride: usize,
padding_total: usize,
) -> Result<usize> {
let len = xs.dim(D::Minus1)?;
let n_frames = (len + padding_total).saturating_sub(k_size) as f64 / stride as f64 + 1.0;
let ideal_len =
((n_frames.ceil() as usize - 1) * stride + k_size).saturating_sub(padding_total);
Ok(ideal_len.saturating_sub(len))
}
fn pad1d(xs: &Tensor, pad_l: usize, pad_r: usize, mode: PadMode) -> Result<Tensor> {
match mode {
PadMode::Constant => xs.pad_with_zeros(D::Minus1, pad_l, pad_r),
PadMode::Reflect => candle::bail!("pad-mode 'reflect' is not supported"),
PadMode::Replicate => xs.pad_with_same(D::Minus1, pad_l, pad_r),
}
}
// Applies weight norm for inference by recomputing the weight tensor. This
// does not apply to training.
// https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html
pub fn conv1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "weight_g")?;
let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = vb.get(out_c, "bias")?;
Ok(Conv1d::new(weight, Some(bias), config))
}
pub fn conv1d_weight_norm_no_bias(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "weight_g")?;
let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
Ok(Conv1d::new(weight, None, config))
}
pub fn conv_transpose1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
bias: bool,
config: candle_nn::ConvTranspose1dConfig,
vb: VarBuilder,
) -> Result<ConvTranspose1d> {
let weight_g = vb.get((in_c, 1, 1), "weight_g")?;
let weight_v = vb.get((in_c, out_c, kernel_size), "weight_v")?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = if bias {
Some(vb.get(out_c, "bias")?)
} else {
None
};
Ok(ConvTranspose1d::new(weight, bias, config))
}
struct CodebookEncode;
impl candle::CustomOp2 for CodebookEncode {
fn name(&self) -> &'static str {
"cb"
}
fn cpu_fwd(
&self,
lhs_storage: &candle::CpuStorage,
lhs_layout: &Layout,
rhs_storage: &candle::CpuStorage,
rhs_layout: &Layout,
) -> Result<(candle::CpuStorage, Shape)> {
use rayon::prelude::*;
let (lhs_dim1, lhs_dim2) = lhs_layout.shape().dims2()?;
let (rhs_dim1, rhs_dim2) = rhs_layout.shape().dims2()?;
if lhs_dim2 != rhs_dim2 {
candle::bail!("CodebookEncode, mismatch on last dim, {lhs_layout:?} {rhs_layout:?}");
}
if lhs_dim2 == 0 {
candle::bail!("CodebookEncode, empty last dim {lhs_layout:?}")
}
let lhs = match lhs_layout.contiguous_offsets() {
None => candle::bail!("CodebookEncode, lhs has to be contiguous, got {lhs_layout:?}"),
Some((o1, o2)) => {
let slice = lhs_storage.as_slice::<f32>()?;
&slice[o1..o2]
}
};
let rhs = match rhs_layout.contiguous_offsets() {
None => candle::bail!("CodebookEncode, rhs has to be contiguous, got {rhs_layout:?}"),
Some((o1, o2)) => {
let slice = rhs_storage.as_slice::<f32>()?;
&slice[o1..o2]
}
};
let dst = (0..lhs_dim1)
.into_par_iter()
.map(|idx1| {
let mut where_min = 0;
let mut min_dist = f32::INFINITY;
let lhs = &lhs[idx1 * lhs_dim2..(idx1 + 1) * lhs_dim2];
for idx2 in 0..rhs_dim1 {
let rhs = &rhs[idx2 * rhs_dim2..(idx2 + 1) * rhs_dim2];
let mut dist = 0f32;
for (a, b) in lhs.iter().zip(rhs.iter()) {
dist += (a - b) * (a - b)
}
if dist < min_dist {
min_dist = dist;
where_min = idx2;
}
}
where_min as u32
})
.collect();
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, (lhs_dim1,).into()))
}
}
// https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L340
#[allow(unused)]
#[derive(Clone, Debug)]
pub struct EuclideanCodebook {
inited: Tensor,
cluster_size: Tensor,
embed: candle_nn::Embedding,
embed_avg: Tensor,
c2: Tensor,
}
impl EuclideanCodebook {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let inited = vb.get(1, "inited")?;
let cluster_size = vb.get(cfg.codebook_size, "cluster_size")?;
let e_shape = (cfg.codebook_size, cfg.codebook_dim());
let embed = vb.get(e_shape, "embed")?;
let c2 = ((&embed * &embed)?.sum(D::Minus1)? / 2.0)?;
let embed_avg = vb.get(e_shape, "embed_avg")?;
Ok(Self {
inited,
cluster_size,
embed: candle_nn::Embedding::new(embed, cfg.codebook_dim()),
embed_avg,
c2,
})
}
pub fn encode_slow(&self, xs: &Tensor) -> Result<Tensor> {
let mut target_shape = xs.dims().to_vec();
target_shape.pop();
let xs = xs.flatten_to(D::Minus2)?;
let _ = xs.dims2()?;
let dot_prod = xs.matmul(&self.embed.embeddings().t()?)?;
let codes = self.c2.broadcast_sub(&dot_prod)?.argmin(D::Minus1)?;
codes.reshape(target_shape)
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let mut target_shape = xs.dims().to_vec();
target_shape.pop();
let xs = xs.flatten_to(D::Minus2)?;
let _ = xs.dims2()?;
let codes = Tensor::apply_op2(&xs, self.embed.embeddings(), CodebookEncode)?;
codes.reshape(target_shape)
}
pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> {
let quantize = self.embed.forward(embed_ind)?;
Ok(quantize)
}
}
#[derive(Clone, Debug)]
pub struct VectorQuantization {
codebook: EuclideanCodebook,
}
impl VectorQuantization {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let codebook = EuclideanCodebook::new(cfg, vb.pp("codebook"))?;
Ok(Self { codebook })
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.transpose(1, 2)?;
self.codebook.encode_slow(&xs)
}
pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> {
let quantize = self.codebook.decode(embed_ind)?;
let quantize = quantize.transpose(1, 2)?;
Ok(quantize)
}
}
#[derive(Clone, Debug)]
pub struct ResidualVectorQuantizer {
layers: Vec<VectorQuantization>,
dtype: DType,
}
impl ResidualVectorQuantizer {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = &vb.pp("layers");
let layers = (0..cfg.num_quantizers())
.map(|i| VectorQuantization::new(cfg, vb.pp(i)))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
layers,
dtype: vb.dtype(),
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let mut codes = Vec::with_capacity(self.layers.len());
let mut residual = xs.clone();
for layer in self.layers.iter() {
let indices = layer.encode(&residual)?;
let quantized = layer.decode(&indices)?;
residual = (residual - quantized)?;
codes.push(indices)
}
Tensor::stack(&codes, 0)
}
pub fn decode(&self, codes: &Tensor) -> Result<Tensor> {
let mut quantized_out = Tensor::zeros((), self.dtype, codes.device())?;
let ncodes = codes.dim(0)?;
if ncodes > self.layers.len() {
candle::bail!(
"codes shape {:?} does not match the number of quantization layers {}",
codes.shape(),
self.layers.len()
)
}
for (i, layer) in self.layers.iter().take(ncodes).enumerate() {
let quantized = layer.decode(&codes.i(i)?)?;
quantized_out = quantized.broadcast_add(&quantized_out)?;
}
Ok(quantized_out)
}
}
// https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L226
#[derive(Clone, Debug)]
pub struct EncodecLSTM {
layers: Vec<candle_nn::LSTM>,
}
impl EncodecLSTM {
pub fn new(dim: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = &vb.pp("lstm");
let mut layers = vec![];
for layer_idx in 0..cfg.num_lstm_layers {
let config = candle_nn::LSTMConfig {
layer_idx,
..Default::default()
};
let lstm = candle_nn::lstm(dim, dim, config, vb.clone())?;
layers.push(lstm)
}
Ok(Self { layers })
}
}
impl Module for EncodecLSTM {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
use candle_nn::RNN;
// This is different from the Python transformers version as candle LSTM is batch first.
let xs = xs.t()?;
let residual = &xs;
let mut xs = xs.clone();
for layer in self.layers.iter() {
let states = layer.seq(&xs)?;
xs = layer.states_to_tensor(&states)?;
}
let xs = (xs + residual)?.t()?;
Ok(xs)
}
}
#[derive(Clone, Debug)]
pub struct EncodecConvTranspose1d {
conv: ConvTranspose1d,
}
impl EncodecConvTranspose1d {
fn new(
in_c: usize,
out_c: usize,
k: usize,
stride: usize,
_cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let cfg = candle_nn::ConvTranspose1dConfig {
stride,
..Default::default()
};
let conv = conv_transpose1d_weight_norm(in_c, out_c, k, true, cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl Module for EncodecConvTranspose1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.conv)
}
}
#[derive(Clone, Debug)]
pub struct EncodecConv1d {
causal: bool,
conv: Conv1d,
norm: Option<candle_nn::GroupNorm>,
pad_mode: PadMode,
}
impl EncodecConv1d {
pub fn new(
in_c: usize,
out_c: usize,
kernel_size: usize,
stride: usize,
dilation: usize,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let conv = match cfg.norm_type {
NormType::WeightNorm => conv1d_weight_norm(
in_c,
out_c,
kernel_size,
candle_nn::Conv1dConfig {
stride,
dilation,
..Default::default()
},
vb.pp("conv"),
)?,
NormType::None | NormType::TimeGroupNorm => conv1d(
in_c,
out_c,
kernel_size,
candle_nn::Conv1dConfig {
padding: 0,
stride,
groups: 1,
dilation: 1,
cudnn_fwd_algo: None,
},
vb.pp("conv"),
)?,
};
let norm = match cfg.norm_type {
NormType::None | NormType::WeightNorm => None,
NormType::TimeGroupNorm => {
let gn = candle_nn::group_norm(1, out_c, 1e-5, vb.pp("norm"))?;
Some(gn)
}
};
Ok(Self {
causal: cfg.use_causal_conv,
conv,
norm,
pad_mode: cfg.pad_mode,
})
}
}
impl Module for EncodecConv1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _t, _c) = xs.dims3()?;
let k_size = self.conv.weight().dim(D::Minus1)?;
let conv_cfg = self.conv.config();
// Effective kernel size with dilations.
let k_size = (k_size - 1) * conv_cfg.dilation + 1;
let padding_total = k_size - conv_cfg.stride;
let extra_padding =
get_extra_padding_for_conv1d(xs, k_size, conv_cfg.stride, padding_total)?;
let xs = if self.causal {
pad1d(xs, padding_total, extra_padding, self.pad_mode)?
} else {
let padding_right = padding_total / 2;
let padding_left = padding_total - padding_right;
pad1d(
xs,
padding_left,
padding_right + extra_padding,
self.pad_mode,
)?
};
let xs = self.conv.forward(&xs)?;
match &self.norm {
None => Ok(xs),
Some(norm) => xs.apply(norm),
}
}
}
#[derive(Clone, Debug)]
pub struct EncodecResnetBlock {
block_conv1: EncodecConv1d,
block_conv2: EncodecConv1d,
shortcut: Option<EncodecConv1d>,
}
impl EncodecResnetBlock {
pub fn new(
dim: usize,
(dilation1, dilation2): (usize, usize),
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let h = dim / cfg.compress;
let mut layer = Layer::new(vb.pp("block"));
// TODO: Apply dilations!
layer.inc();
let block_conv1 = EncodecConv1d::new(
dim,
h,
cfg.residual_kernel_size,
1,
dilation1,
cfg,
layer.next(),
)?;
layer.inc();
let block_conv2 = EncodecConv1d::new(h, dim, 1, 1, dilation2, cfg, layer.next())?;
let shortcut = if cfg.use_conv_shortcut {
let conv = EncodecConv1d::new(dim, dim, 1, 1, 1, cfg, vb.pp("shortcut"))?;
Some(conv)
} else {
None
};
Ok(Self {
block_conv1,
block_conv2,
shortcut,
})
}
}
impl Module for EncodecResnetBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs.clone();
let xs = xs.elu(1.)?;
let xs = self.block_conv1.forward(&xs)?;
let xs = xs.elu(1.)?;
let xs = self.block_conv2.forward(&xs)?;
let xs = match &self.shortcut {
None => (xs + residual)?,
Some(shortcut) => xs.add(&shortcut.forward(&residual)?)?,
};
Ok(xs)
}
}
struct Layer<'a> {
vb: VarBuilder<'a>,
cnt: usize,
}
impl<'a> Layer<'a> {
fn new(vb: VarBuilder<'a>) -> Self {
Self { vb, cnt: 0 }
}
fn inc(&mut self) {
self.cnt += 1;
}
fn next(&mut self) -> VarBuilder<'_> {
let vb = self.vb.pp(self.cnt.to_string());
self.cnt += 1;
vb
}
}
#[derive(Clone, Debug)]
pub struct Encoder {
init_conv: EncodecConv1d,
sampling_layers: Vec<(Vec<EncodecResnetBlock>, EncodecConv1d)>,
final_lstm: EncodecLSTM,
final_conv: EncodecConv1d,
}
impl Encoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let mut layer = Layer::new(vb.pp("layers"));
let init_conv = EncodecConv1d::new(
cfg.audio_channels,
cfg.num_filters,
cfg.kernel_size,
1,
1,
cfg,
layer.next(),
)?;
let mut sampling_layers = vec![];
let mut scaling = 1;
for &ratio in cfg.upsampling_ratios.iter().rev() {
let current_scale = scaling * cfg.num_filters;
let mut resnets = vec![];
for j in 0..(cfg.num_residual_layers as u32) {
let resnet = EncodecResnetBlock::new(
current_scale,
(cfg.dilation_growth_rate.pow(j), 1),
cfg,
layer.next(),
)?;
resnets.push(resnet)
}
layer.inc(); // ELU
let conv1d = EncodecConv1d::new(
current_scale,
current_scale * 2,
ratio * 2,
ratio,
1,
cfg,
layer.next(),
)?;
sampling_layers.push((resnets, conv1d));
scaling *= 2;
}
let final_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?;
layer.inc(); // ELU
let final_conv = EncodecConv1d::new(
cfg.num_filters * scaling,
cfg.hidden_size,
cfg.last_kernel_size,
1,
1,
cfg,
layer.next(),
)?;
Ok(Self {
init_conv,
sampling_layers,
final_conv,
final_lstm,
})
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.init_conv)?;
for (resnets, conv) in self.sampling_layers.iter() {
for resnet in resnets.iter() {
xs = xs.apply(resnet)?;
}
xs = xs.elu(1.0)?.apply(conv)?;
}
xs.apply(&self.final_lstm)?
.elu(1.0)?
.apply(&self.final_conv)
}
}
#[derive(Clone, Debug)]
pub struct Decoder {
init_conv: EncodecConv1d,
init_lstm: EncodecLSTM,
sampling_layers: Vec<(EncodecConvTranspose1d, Vec<EncodecResnetBlock>)>,
final_conv: EncodecConv1d,
}
impl Decoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let mut layer = Layer::new(vb.pp("layers"));
let mut scaling = usize::pow(2, cfg.upsampling_ratios.len() as u32);
let init_conv = EncodecConv1d::new(
cfg.hidden_size,
cfg.num_filters * scaling,
cfg.last_kernel_size,
1,
1,
cfg,
layer.next(),
)?;
let init_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?;
let mut sampling_layers = vec![];
for &ratio in cfg.upsampling_ratios.iter() {
let current_scale = scaling * cfg.num_filters;
layer.inc(); // ELU
let conv1d = EncodecConvTranspose1d::new(
current_scale,
current_scale / 2,
ratio * 2,
ratio,
cfg,
layer.next(),
)?;
let mut resnets = vec![];
for j in 0..(cfg.num_residual_layers as u32) {
let resnet = EncodecResnetBlock::new(
current_scale / 2,
(cfg.dilation_growth_rate.pow(j), 1),
cfg,
layer.next(),
)?;
resnets.push(resnet)
}
sampling_layers.push((conv1d, resnets));
scaling /= 2;
}
layer.inc(); // ELU
let final_conv = EncodecConv1d::new(
cfg.num_filters,
cfg.audio_channels,
cfg.last_kernel_size,
1,
1,
cfg,
layer.next(),
)?;
Ok(Self {
init_conv,
init_lstm,
sampling_layers,
final_conv,
})
}
}
impl Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.init_conv)?.apply(&self.init_lstm)?;
for (conv, resnets) in self.sampling_layers.iter() {
xs = xs.elu(1.)?.apply(conv)?;
for resnet in resnets.iter() {
xs = xs.apply(resnet)?
}
}
xs.elu(1.)?.apply(&self.final_conv)
}
}
#[derive(Debug)]
pub struct Model {
encoder: Encoder,
decoder: Decoder,
quantizer: ResidualVectorQuantizer,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let decoder = Decoder::new(cfg, vb.pp("decoder"))?;
let quantizer = ResidualVectorQuantizer::new(cfg, vb.pp("quantizer"))?;
Ok(Self {
encoder,
decoder,
quantizer,
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.encoder.forward(xs)?;
let codes = self.quantizer.encode(&xs)?;
codes.transpose(0, 1)
}
pub fn decode(&self, codes: &Tensor) -> Result<Tensor> {
let (_b_sz, _codebooks, _seqlen) = codes.dims3()?;
let codes = codes.transpose(0, 1)?;
let embeddings = self.quantizer.decode(&codes)?;
let outputs = self.decoder.forward(&embeddings)?;
Ok(outputs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/starcoder2.rs | candle-transformers/src/models/starcoder2.rs | //! StarCoder model implementation with quantization support.
//!
//! StarCoder is a large language model optimized for code generation.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Causal self-attention mechanism
//! - Multi-query attention (MQA)
//! - LayerNorm for normalization
//! - Absolute positional embeddings
//! - Support for 8-bit quantization
//!
//! References:
//! - 📝 [StarCoder Paper](https://arxiv.org/abs/2305.06161)
//! - 🤗 [Model Card](https://huggingface.co/bigcode/starcoder)
//!
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{layer_norm, linear_b, LayerNorm, Linear, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
vocab_size: usize,
hidden_size: usize,
intermediate_size: usize,
num_hidden_layers: usize,
num_attention_heads: usize,
num_key_value_heads: usize,
hidden_act: candle_nn::Activation,
max_position_embeddings: usize,
norm_epsilon: f64,
rope_theta: f64,
use_bias: bool,
sliding_window: Option<usize>,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
c_fc: Linear,
c_proj: Linear,
act: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let (h_size, i_size) = (cfg.hidden_size, cfg.intermediate_size);
let c_fc = linear_b(h_size, i_size, cfg.use_bias, vb.pp("c_fc"))?;
let c_proj = linear_b(i_size, h_size, cfg.use_bias, vb.pp("c_proj"))?;
Ok(Self {
c_fc,
c_proj,
act: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.c_fc)?.apply(&self.act)?.apply(&self.c_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let b = cfg.use_bias;
let q_proj = linear_b(hidden_sz, num_heads * head_dim, b, vb.pp("q_proj"))?;
let k_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("k_proj"))?;
let v_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("v_proj"))?;
let o_proj = linear_b(num_heads * head_dim, hidden_sz, b, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&value_states)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: LayerNorm,
post_attention_layernorm: LayerNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
layer_norm(cfg.hidden_size, cfg.norm_epsilon, vb.pp("input_layernorm"))?;
let post_attention_layernorm = layer_norm(
cfg.hidden_size,
cfg.norm_epsilon,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: LayerNorm,
lm_head: Linear,
sliding_window: Option<usize>,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = layer_norm(cfg.hidden_size, cfg.norm_epsilon, vb_m.pp("norm"))?;
let lm_head = candle_nn::Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let sliding_window = self.sliding_window.unwrap_or(tgt_len + 42);
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/falcon.rs | candle-transformers/src/models/falcon.rs | //! Falcon language model inference implementation
//!
//! See ["Falcon: a new approach to large language models"](https://huggingface.co/blog/falcon)
//!
//! Based on implementation from [Huggingface Transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/falcon)
use candle::{DType, Device, Result, Tensor, D};
use candle_nn::{embedding, linear_b as linear, Embedding, LayerNorm, Linear, Module, VarBuilder};
use serde::Deserialize;
const MAX_SEQ_LEN: usize = 5000;
fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<LayerNorm> {
let (weight, bias) = match (vb.get(size, "weight"), vb.get(size, "bias")) {
(Ok(weight), Ok(bias)) => (weight, bias),
(Err(err), _) | (_, Err(err)) => {
if let (Ok(weight), Ok(bias)) = (vb.get(size, "gamma"), vb.get(size, "beta")) {
(weight, bias)
} else {
return Err(err);
}
}
};
Ok(LayerNorm::new(weight, bias, eps))
}
// https://raw.githubusercontent.com/huggingface/transformers/030c863aaa0165e98352b61697430bf69bf33755/src/transformers/models/falcon/configuration_falcon.py
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub layer_norm_epsilon: f64,
pub initializer_range: f64,
pub use_cache: bool,
pub bos_token_id: u32,
pub eos_token_id: u32,
pub hidden_dropout: f64,
pub attention_dropout: f64,
pub n_head_kv: Option<usize>,
pub alibi: bool,
pub new_decoder_architecture: bool,
pub multi_query: bool,
pub parallel_attn: bool,
pub bias: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 65024,
hidden_size: 4544,
num_hidden_layers: 32,
num_attention_heads: 71,
layer_norm_epsilon: 1e-5,
initializer_range: 0.02,
use_cache: true,
bos_token_id: 11,
eos_token_id: 11,
hidden_dropout: 0.0,
attention_dropout: 0.0,
n_head_kv: None,
alibi: false,
new_decoder_architecture: false,
multi_query: true,
parallel_attn: true,
bias: false,
}
}
}
impl Config {
pub fn validate(&self) -> Result<()> {
if self.alibi {
candle::bail!("alibi is not supported");
}
if self.new_decoder_architecture {
candle::bail!("new_decoder_architecture is not supported");
}
if self.n_head_kv.is_some() {
candle::bail!("n_head_kv is not supported");
}
Ok(())
}
// https://huggingface.co/tiiuae/falcon-7b/blob/main/config.json
pub fn falcon7b() -> Self {
// This is currently on par with the defaults, the defaults come from the Python default
// arguments for the config initialization whereas the following come from the json config.
Self {
vocab_size: 65024,
hidden_size: 4544,
num_hidden_layers: 32,
num_attention_heads: 71,
layer_norm_epsilon: 1e-5,
initializer_range: 0.02,
use_cache: true,
bos_token_id: 11,
eos_token_id: 11,
hidden_dropout: 0.,
attention_dropout: 0.,
n_head_kv: None,
alibi: false,
new_decoder_architecture: false,
multi_query: true,
parallel_attn: true,
bias: false,
}
}
fn head_dim(&self) -> usize {
self.hidden_size / self.num_attention_heads
}
fn rotary(&self) -> bool {
!self.alibi
}
}
fn rotate_half(x: &Tensor) -> Result<Tensor> {
let l = x.dim(D::Minus1)?;
let x1 = x.narrow(D::Minus1, 0, l / 2)?;
let x2 = x.narrow(D::Minus1, l / 2, l - l / 2)?;
let x21 = Tensor::cat(&[&x2.neg()?, &x1], D::Minus1)?;
Ok(x21)
}
#[derive(Debug, Clone)]
struct FalconRotaryEmbedding {
inv_freq: Tensor,
cache: Option<(usize, Tensor, Tensor)>,
}
impl FalconRotaryEmbedding {
fn load(device: &Device, cfg: &Config) -> Result<Self> {
let head_dim = cfg.head_dim();
let inv_freq: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / head_dim as f32))
.collect();
Ok(Self {
inv_freq: Tensor::new(inv_freq.as_slice(), device)?,
cache: None,
})
}
fn cos_sin(
&mut self,
seq_len: usize,
device: &Device,
dtype: DType,
) -> Result<(Tensor, Tensor)> {
match &self.cache {
Some((s, cos, sin)) if *s == seq_len => {
return Ok((cos.clone(), sin.clone()));
}
_ => {}
}
let t = Tensor::arange(0, seq_len as u32, device)?.to_dtype(dtype)?;
let inv_freq = self.inv_freq.to_dtype(dtype)?;
let freqs = t.unsqueeze(1)?.matmul(&inv_freq.unsqueeze(0)?)?;
let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
let cos = emb.cos()?;
let sin = emb.sin()?;
self.cache = Some((seq_len, cos.clone(), sin.clone()));
Ok((cos, sin))
}
fn forward(
&mut self,
query: &Tensor,
key: &Tensor,
past_kv_len: usize,
) -> Result<(Tensor, Tensor)> {
let (_batch, seq_len, _head_dim) = query.dims3()?;
let (cos, sin) = self.cos_sin(MAX_SEQ_LEN, query.device(), query.dtype())?;
let cos = cos.narrow(0, past_kv_len, seq_len)?;
let sin = sin.narrow(0, past_kv_len, seq_len)?;
let qs = (query.broadcast_mul(&cos)? + &rotate_half(query)?.broadcast_mul(&sin)?)?;
let ks = (key.broadcast_mul(&cos)? + &rotate_half(key)?.broadcast_mul(&sin)?)?;
Ok((qs, ks))
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?
.to_dtype(on_false.dtype())?
.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct FalconAttention {
query_key_value: Linear,
dense: Linear,
maybe_rotary: Option<FalconRotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
inv_norm_factor: f64,
multi_query: bool,
use_cache: bool,
num_heads: usize,
head_dim: usize,
n_head_kv: usize,
}
impl FalconAttention {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let maybe_rotary = if cfg.rotary() {
let rotary = FalconRotaryEmbedding::load(vb.device(), cfg)?;
Some(rotary)
} else {
None
};
let head_dim = cfg.head_dim();
let hidden_size = cfg.hidden_size;
let qkv_out_dim = if cfg.multi_query {
hidden_size + 2 * head_dim
} else {
3 * hidden_size
};
let query_key_value = linear(hidden_size, qkv_out_dim, cfg.bias, vb.pp("query_key_value"))?;
let dense = linear(hidden_size, hidden_size, cfg.bias, vb.pp("dense"))?;
Ok(Self {
query_key_value,
dense,
maybe_rotary,
kv_cache: None,
inv_norm_factor: 1. / (head_dim as f64).sqrt(),
multi_query: cfg.multi_query,
use_cache: cfg.use_cache,
num_heads: cfg.num_attention_heads,
n_head_kv: cfg.n_head_kv.unwrap_or(1),
head_dim,
})
}
fn split_heads(&self, fused_qkv: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let (b_sz, seq_len, _) = fused_qkv.dims3()?;
if !self.multi_query {
let fused_qkv = fused_qkv.reshape((b_sz, seq_len, self.num_heads, 3, self.head_dim))?;
let q = fused_qkv.narrow(D::Minus2, 0, 1)?.squeeze(D::Minus2)?;
let k = fused_qkv.narrow(D::Minus2, 1, 1)?.squeeze(D::Minus2)?;
let v = fused_qkv.narrow(D::Minus2, 2, 1)?.squeeze(D::Minus2)?;
Ok((q, k, v))
} else {
let fused_qkv =
fused_qkv.reshape((b_sz, seq_len, self.num_heads + 2, self.head_dim))?;
let d = fused_qkv.dim(D::Minus2)?;
let q = fused_qkv.narrow(D::Minus2, 0, d - 2)?;
let k = fused_qkv.narrow(D::Minus2, d - 2, 1)?;
let v = fused_qkv.narrow(D::Minus2, d - 1, 1)?;
Ok((q, k, v))
}
}
fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, past_kv_len: usize) -> Result<Tensor> {
let fused_qkv = self.query_key_value.forward(x)?;
let head_dim = self.head_dim;
let (query, key, value) = self.split_heads(&fused_qkv)?;
let (b_sz, seq_len, _, _) = query.dims4()?;
let query = query
.transpose(1, 2)?
.reshape((b_sz * self.num_heads, seq_len, head_dim))?;
let key = key
.transpose(1, 2)?
.reshape((b_sz * self.n_head_kv, seq_len, head_dim))?;
let value = value
.transpose(1, 2)?
.reshape((b_sz * self.n_head_kv, seq_len, head_dim))?;
let (query, key) = if let Some(r) = &mut self.maybe_rotary {
r.forward(&query, &key, past_kv_len)?
} else {
(query, key)
};
let (mut key, mut value) = (key, value);
if self.use_cache {
if let Some((cache_k, cache_v)) = &self.kv_cache {
// TODO: we could trim the tensors to MAX_SEQ_LEN so that this would work for
// arbitrarily large sizes.
key = Tensor::cat(&[cache_k, &key], 1)?.contiguous()?;
value = Tensor::cat(&[cache_v, &value], 1)?.contiguous()?;
}
self.kv_cache = Some((key.clone(), value.clone()))
}
let query = query.reshape((b_sz * self.num_heads, seq_len, head_dim))?;
let all_len = past_kv_len + seq_len;
let key = key.reshape((b_sz * self.n_head_kv, all_len, head_dim))?;
let value = value.reshape((b_sz * self.n_head_kv, all_len, head_dim))?;
let (key, value) = if self.n_head_kv == 1 {
(
key.broadcast_as((b_sz * self.num_heads, all_len, head_dim))?,
value.broadcast_as((b_sz * self.num_heads, all_len, head_dim))?,
)
} else {
(key, value)
};
// Only handle the case where alibi is None here, and non-flash attention.
let attention_scores = (query.matmul(&key.t()?)? * self.inv_norm_factor)?;
let attention_scores = match mask {
None => attention_scores,
Some(mask) => {
let mask = masked_fill(&mask.to_dtype(DType::F32)?, mask, -1e9)?
.to_dtype(query.dtype())?;
attention_scores.broadcast_add(&mask.squeeze(1)?)?
}
};
let attention_scores =
candle_nn::ops::softmax(&attention_scores.to_dtype(DType::F32)?, D::Minus1)?
.to_dtype(x.dtype())?;
let attn_output = attention_scores
.matmul(&value)?
.reshape((b_sz, self.num_heads, seq_len, head_dim))?
.transpose(1, 2)?
.reshape((b_sz, seq_len, self.num_heads * head_dim))?;
let attn_output = self.dense.forward(&attn_output)?;
Ok(attn_output)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct FalconMlp {
dense_h_to_4h: Linear,
dense_4h_to_h: Linear,
}
impl FalconMlp {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let h = cfg.hidden_size;
let b = cfg.bias;
let dense_h_to_4h = linear(h, 4 * h, b, vb.pp("dense_h_to_4h"))?;
let dense_4h_to_h = linear(4 * h, h, b, vb.pp("dense_4h_to_h"))?;
Ok(Self {
dense_h_to_4h,
dense_4h_to_h,
})
}
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.dense_h_to_4h.forward(x)?.gelu()?;
let x = self.dense_4h_to_h.forward(&x)?;
Ok(x)
}
}
#[derive(Debug, Clone)]
struct FalconDecoderLayer {
inp_layernorm: LayerNorm,
self_attention: FalconAttention,
post_attention_layernorm: Option<LayerNorm>,
mlp: FalconMlp,
parallel_attn: bool,
}
impl FalconDecoderLayer {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let mlp = FalconMlp::load(vb.pp("mlp"), cfg)?;
let inp_layernorm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_epsilon,
vb.pp("input_layernorm"),
)?;
let self_attention = FalconAttention::load(vb.pp("self_attention"), cfg)?;
let post_attention_layernorm = if cfg.parallel_attn {
None
} else {
let ln = layer_norm(
cfg.hidden_size,
cfg.layer_norm_epsilon,
vb.pp("post_attention_layernorm"),
)?;
Some(ln)
};
Ok(Self {
inp_layernorm,
self_attention,
post_attention_layernorm,
mlp,
parallel_attn: cfg.parallel_attn,
})
}
fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, past_kv_len: usize) -> Result<Tensor> {
let residual = x.clone();
let ln_attn = self.inp_layernorm.forward(x)?;
let attn_output = self.self_attention.forward(&ln_attn, mask, past_kv_len)?;
let (residual, ln_mlp) = match &self.post_attention_layernorm {
None => (residual, ln_attn),
Some(pal) => {
// This should include some dropout.
let residual = (&attn_output + &residual)?;
let ln_mlp = pal.forward(&residual)?;
(residual, ln_mlp)
}
};
let mlp_output = self.mlp.forward(&ln_mlp)?;
let mlp_output = if self.parallel_attn {
(mlp_output + attn_output)?
} else {
mlp_output
};
let output = (mlp_output + residual)?;
Ok(output)
}
pub fn clear_kv_cache(&mut self) {
self.self_attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Falcon {
word_embeddings: Embedding,
blocks: Vec<FalconDecoderLayer>,
ln_f: LayerNorm,
lm_head: Linear,
config: Config,
}
fn make_causal_mask(t: usize) -> Result<Tensor> {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), &Device::Cpu)?;
Ok(mask)
}
fn prepare_attn_mask(b_sz: usize, seq_len: usize) -> Result<Tensor> {
// let mask = Tensor::ones((b_sz, seq_len), DType::U32, &Device::Cpu)?;
let mask = make_causal_mask(seq_len)?;
let mask = mask.broadcast_as((b_sz, 1, seq_len, seq_len))?;
Ok(mask)
}
impl Falcon {
pub fn config(&self) -> &Config {
&self.config
}
pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> {
let word_embeddings = embedding(
cfg.vocab_size,
cfg.hidden_size,
vb.pp("transformer.word_embeddings"),
)?;
let blocks = (0..cfg.num_hidden_layers)
.map(|i| FalconDecoderLayer::load(vb.pp(format!("transformer.h.{i}")), &cfg))
.collect::<Result<Vec<_>>>()?;
let ln_f = layer_norm(
cfg.hidden_size,
cfg.layer_norm_epsilon,
vb.pp("transformer.ln_f"),
)?;
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, false, vb.pp("lm_head"))?;
Ok(Self {
word_embeddings,
blocks,
ln_f,
lm_head,
config: cfg,
})
}
pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let (b_sz, seq_len) = input_ids.dims2()?;
let mut hidden_state = self.word_embeddings.forward(input_ids)?;
let past_kv_len = match &self.blocks[0].self_attention.kv_cache {
Some((k, _)) => k.dim(1)?,
None => 0,
};
let causal_mask = if seq_len <= 1 {
None
} else {
Some(prepare_attn_mask(b_sz, seq_len)?.to_device(input_ids.device())?)
};
for block in self.blocks.iter_mut() {
hidden_state = block.forward(&hidden_state, causal_mask.as_ref(), past_kv_len)?;
}
let hidden_state = self.ln_f.forward(&hidden_state)?;
let hidden_state = hidden_state.narrow(1, seq_len - 1, 1)?;
let logits = self.lm_head.forward(&hidden_state)?.squeeze(1)?;
Ok(logits)
}
pub fn clear_kv_cache(&mut self) {
for block in self.blocks.iter_mut() {
block.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/bigcode.rs | candle-transformers/src/models/bigcode.rs | //! BigCode implementation in Rust based on the GPT-BigCode model.
//!
//! [StarCoder/BigCode](https://huggingface.co/bigcode/starcoderbase-1b) is a LLM
//! model specialized to code generation. The initial model was trained on 80
//! programming languages. See "StarCoder: A State-of-the-Art LLM for Code", Mukherjee et al. 2023
//! - [Arxiv](https://arxiv.org/abs/2305.06161)
//! - [GitHub](https://github.com/bigcode-project/starcoder)
//!
//! ## Running some example
//!
//! ```bash
//! cargo run --example bigcode --release -- --prompt "fn fact(n: u64) -> u64"
//!
//! > fn fact(n: u64) -> u64 {
//! > if n == 0 {
//! > 1
//! > } else {
//! > n * fact(n - 1)
//! > }
//! > }
//! ```
//!
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{embedding, linear_b as linear, Embedding, LayerNorm, Linear, Module, VarBuilder};
fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<LayerNorm> {
let weight = vb.get(size, "weight")?;
let bias = vb.get(size, "bias")?;
Ok(LayerNorm::new(weight, bias, eps))
}
fn make_causal_mask(t: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j <= i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
Ok(mask)
}
#[derive(Debug)]
pub struct Config {
pub vocab_size: usize,
// max_position_embeddings aka n_positions
pub max_position_embeddings: usize,
// num_hidden_layers aka n_layer
pub num_hidden_layers: usize,
// hidden_size aka n_embd
pub hidden_size: usize,
pub layer_norm_epsilon: f64,
pub n_inner: Option<usize>,
// num_attention_heads aka n_head
pub num_attention_heads: usize,
pub multi_query: bool,
pub use_cache: bool,
}
impl Config {
#[allow(dead_code)]
pub fn starcoder_1b() -> Self {
Self {
vocab_size: 49152,
max_position_embeddings: 8192,
num_hidden_layers: 24,
hidden_size: 2048,
layer_norm_epsilon: 1e-5,
n_inner: Some(8192),
num_attention_heads: 16,
multi_query: true,
use_cache: true,
}
}
#[allow(dead_code)]
pub fn starcoder_3b() -> Self {
Self {
vocab_size: 49152,
max_position_embeddings: 8192,
num_hidden_layers: 36,
hidden_size: 2816,
layer_norm_epsilon: 1e-5,
n_inner: Some(11264),
num_attention_heads: 22,
multi_query: true,
use_cache: true,
}
}
#[allow(dead_code)]
pub fn starcoder_7b() -> Self {
Self {
vocab_size: 49152,
max_position_embeddings: 8192,
num_hidden_layers: 42,
hidden_size: 4096,
layer_norm_epsilon: 1e-5,
n_inner: Some(16384),
num_attention_heads: 32,
multi_query: true,
use_cache: true,
}
}
#[allow(dead_code)]
pub fn starcoder() -> Self {
Self {
vocab_size: 49152,
max_position_embeddings: 8192,
num_hidden_layers: 40,
hidden_size: 6144,
layer_norm_epsilon: 1e-5,
n_inner: Some(24576),
num_attention_heads: 48,
multi_query: true,
use_cache: true,
}
}
}
struct Attention {
c_attn: Linear,
c_proj: Linear,
kv_cache: Option<Tensor>,
use_cache: bool,
embed_dim: usize,
kv_dim: usize,
num_heads: usize,
head_dim: usize,
multi_query: bool,
}
impl Attention {
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let head_dim = hidden_size / cfg.num_attention_heads;
let kv_heads = if cfg.multi_query {
1
} else {
cfg.num_attention_heads
};
let kv_dim = kv_heads * head_dim;
let c_attn = linear(hidden_size, hidden_size + 2 * kv_dim, true, vb.pp("c_attn"))?;
let c_proj = linear(hidden_size, hidden_size, true, vb.pp("c_proj"))?;
Ok(Self {
c_proj,
c_attn,
embed_dim: hidden_size,
kv_cache: None,
use_cache: cfg.use_cache,
kv_dim,
head_dim,
num_heads: cfg.num_attention_heads,
multi_query: cfg.multi_query,
})
}
fn attn(
&self,
query: &Tensor,
key: &Tensor,
value: &Tensor,
attention_mask: &Tensor,
) -> Result<Tensor> {
if query.dtype() != DType::F32 {
// If we start supporting f16 models, we may need the upcasting scaling bits.
// https://github.com/huggingface/transformers/blob/a0042379269bea9182c1f87e6b2eee4ba4c8cce8/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py#L133
candle::bail!("upcasting is not supported {:?}", query.dtype())
}
let scale_factor = 1f64 / (self.head_dim as f64).sqrt();
let initial_query_shape = query.shape();
let key_len = key.dim(D::Minus1)?;
let (query, key, attn_shape, attn_view) = if self.multi_query {
let (b_sz, query_len, _) = query.dims3()?;
let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?;
let attn_shape = (b_sz, query_len, self.num_heads, key_len);
let attn_view = (b_sz, query_len * self.num_heads, key_len);
(query, key.clone(), attn_shape, attn_view)
} else {
let (b_sz, _num_heads, query_len, _head_dim) = query.dims4()?;
let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?;
let key = key.reshape((b_sz * self.num_heads, self.head_dim, key_len))?;
let attn_shape = (b_sz, self.num_heads, query_len, key_len);
let attn_view = (b_sz * self.num_heads, query_len, key_len);
(query, key, attn_shape, attn_view)
};
let attn_weights =
(query.matmul(&key.contiguous()?)? * scale_factor)?.reshape(attn_shape)?;
let attention_mask = attention_mask.broadcast_as(attn_shape)?;
let mask_value =
Tensor::new(f32::NEG_INFINITY, query.device())?.broadcast_as(attn_shape)?;
let attn_weights = attention_mask.where_cond(&attn_weights, &mask_value)?;
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let value = value.contiguous()?;
let attn_output = if self.multi_query {
attn_weights
.reshape(attn_view)?
.matmul(&value)?
.reshape(initial_query_shape)?
} else {
attn_weights.matmul(&value)?
};
Ok(attn_output)
}
fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let qkv = self.c_attn.forward(hidden_states)?;
let (query, key_value) = if self.multi_query {
let query = qkv.i((.., .., ..self.embed_dim))?;
let key_value = qkv.i((.., .., self.embed_dim..self.embed_dim + 2 * self.kv_dim))?;
(query, key_value)
} else {
let mut dims = qkv.dims().to_vec();
dims.pop();
dims.push(self.embed_dim);
dims.push(self.head_dim * 3);
let qkv = qkv.reshape(dims)?.transpose(1, 2)?;
let query = qkv.i((.., .., .., ..self.head_dim))?;
let key_value = qkv.i((.., .., .., self.head_dim..3 * self.head_dim))?;
(query, key_value)
};
let mut key_value = key_value;
if self.use_cache {
if let Some(kv_cache) = &self.kv_cache {
// TODO: we could trim the tensors to MAX_SEQ_LEN so that this would work for
// arbitrarily large sizes.
key_value = Tensor::cat(&[kv_cache, &key_value], D::Minus2)?.contiguous()?;
}
self.kv_cache = Some(key_value.clone())
}
let key = key_value.narrow(D::Minus1, 0, self.head_dim)?;
let value = key_value.narrow(D::Minus1, self.head_dim, self.head_dim)?;
let attn_output = self.attn(&query, &key.t()?, &value, attention_mask)?;
let attn_output = if self.multi_query {
attn_output
} else {
attn_output
.transpose(1, 2)?
.reshape(hidden_states.shape())?
};
let attn_output = self.c_proj.forward(&attn_output)?;
Ok(attn_output)
}
}
struct Mlp {
c_fc: Linear,
c_proj: Linear,
}
impl Mlp {
fn load(inner_dim: usize, vb: VarBuilder, cfg: &Config) -> Result<Self> {
let c_fc = linear(cfg.hidden_size, inner_dim, true, vb.pp("c_fc"))?;
let c_proj = linear(inner_dim, cfg.hidden_size, true, vb.pp("c_proj"))?;
Ok(Self { c_fc, c_proj })
}
fn forward(&mut self, hidden_states: &Tensor) -> Result<Tensor> {
let hidden_states = self.c_fc.forward(hidden_states)?.gelu()?;
let hidden_states = self.c_proj.forward(&hidden_states)?;
Ok(hidden_states)
}
}
// TODO: Add cross-attention?
struct Block {
ln_1: LayerNorm,
attn: Attention,
ln_2: LayerNorm,
mlp: Mlp,
}
impl Block {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let inner_dim = cfg.n_inner.unwrap_or(4 * hidden_size);
let ln_1 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_1"))?;
let attn = Attention::load(vb.pp("attn"), cfg)?;
let ln_2 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_2"))?;
let mlp = Mlp::load(inner_dim, vb.pp("mlp"), cfg)?;
Ok(Self {
ln_1,
attn,
ln_2,
mlp,
})
}
fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let residual = hidden_states;
let hidden_states = self.ln_1.forward(hidden_states)?;
let attn_outputs = self.attn.forward(&hidden_states, attention_mask)?;
let hidden_states = (&attn_outputs + residual)?;
let residual = &hidden_states;
let hidden_states = self.ln_2.forward(&hidden_states)?;
let hidden_states = self.mlp.forward(&hidden_states)?;
let hidden_states = (&hidden_states + residual)?;
Ok(hidden_states)
}
}
pub struct GPTBigCode {
wte: Embedding,
wpe: Embedding,
blocks: Vec<Block>,
ln_f: LayerNorm,
lm_head: Linear,
bias: Tensor,
config: Config,
}
impl GPTBigCode {
pub fn config(&self) -> &Config {
&self.config
}
pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let vb_t = vb.pp("transformer");
let wte = embedding(cfg.vocab_size, hidden_size, vb_t.pp("wte"))?;
let wpe = embedding(cfg.max_position_embeddings, hidden_size, vb_t.pp("wpe"))?;
let blocks = (0..cfg.num_hidden_layers)
.map(|i| Block::load(vb_t.pp(format!("h.{i}")), &cfg))
.collect::<Result<Vec<_>>>()?;
let ln_f = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb_t.pp("ln_f"))?;
let lm_head = linear(hidden_size, cfg.vocab_size, false, vb_t.pp("wte"))?;
let bias = make_causal_mask(cfg.max_position_embeddings, vb.device())?;
Ok(Self {
wte,
wpe,
blocks,
lm_head,
ln_f,
bias,
config: cfg,
})
}
pub fn forward(&mut self, input_ids: &Tensor, past_len: usize) -> Result<Tensor> {
let dev = input_ids.device();
let (b_sz, seq_len) = input_ids.dims2()?;
let key_len = past_len + seq_len;
let attention_mask = self.bias.i((past_len..key_len, ..key_len))?.unsqueeze(0)?;
// MQA models: (batch_size, query_length, n_heads, key_length)
// MHA models: (batch_size, n_heads, query_length, key_length)
let seq_len_dim = if self.config.multi_query { 2 } else { 1 };
let attention_mask = attention_mask.unsqueeze(seq_len_dim)?;
let position_ids = Tensor::arange(past_len as u32, (past_len + seq_len) as u32, dev)?;
let position_ids = position_ids.unsqueeze(0)?.broadcast_as((b_sz, seq_len))?;
let input_embeds = self.wte.forward(input_ids)?;
let position_embeds = self.wpe.forward(&position_ids)?;
let mut hidden_states = (&input_embeds + &position_embeds)?;
for block in self.blocks.iter_mut() {
hidden_states = block.forward(&hidden_states, &attention_mask)?;
}
let hidden_states = self.ln_f.forward(&hidden_states)?;
let hidden_states = hidden_states
.reshape((b_sz, seq_len, self.config.hidden_size))?
.narrow(1, seq_len - 1, 1)?;
let logits = self.lm_head.forward(&hidden_states)?.squeeze(1)?;
Ok(logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/olmo.rs | candle-transformers/src/models/olmo.rs | //! OLMo (Open Language Model) implementation
//!
//! See OLMo model details at:
//! - [Hugging Face](https://huggingface.co/allenai/OLMo)
//! - [OLMo Paper](https://allenai.org/olmo)
//!
//! The model uses:
//! - RoPE embeddings
//! - Sliding window attention
//! - Transformer architecture
//!
//! References:
//! - [Hugging Face Implementation](https://huggingface.co/allenai/OLMo)
//! - [OLMo Paper](https://allenai.org/olmo)
//!
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b, linear_no_bias, Activation, LayerNorm, Linear, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub attention_bias: bool,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub hidden_act: candle_nn::Activation,
pub max_position_embeddings: usize,
pub rope_theta: f64,
pub tie_word_embeddings: bool,
pub clip_qkv: Option<f64>,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
qkv_clip: Option<f64>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let b = cfg.attention_bias;
let qkv_clip = cfg.clip_qkv;
let q_proj = linear_b(hidden_sz, num_heads * head_dim, b, vb.pp("q_proj"))?;
let k_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("k_proj"))?;
let v_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("v_proj"))?;
let o_proj = linear_b(num_heads * head_dim, hidden_sz, b, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
qkv_clip,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let (query_states, key_states, value_states) = match &self.qkv_clip {
None => (query_states, key_states, value_states),
Some(qkv_clip) => {
let query_states = Tensor::clamp(&query_states, -qkv_clip, *qkv_clip)?;
let key_states = Tensor::clamp(&key_states, -qkv_clip, *qkv_clip)?;
let value_states = Tensor::clamp(&value_states, -qkv_clip, *qkv_clip)?;
(query_states, key_states, value_states)
}
};
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: LayerNorm,
post_attention_layernorm: LayerNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let ln_weight = Tensor::ones(cfg.hidden_size, vb.dtype(), vb.device())?;
let input_layernorm = LayerNorm::new_no_bias(ln_weight.clone(), 1e-5);
let post_attention_layernorm = LayerNorm::new_no_bias(ln_weight.clone(), 1e-5);
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: LayerNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let ln_weight = Tensor::ones(cfg.hidden_size, vb.dtype(), vb.device())?;
let norm = LayerNorm::new_no_bias(ln_weight, 1e-5);
let lm_head = if cfg.tie_word_embeddings {
Linear::new(embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_rwkv_v5.rs | candle-transformers/src/models/quantized_rwkv_v5.rs | //! RWKV v5 model implementation with quantization support.
//!
//! RWKV v5 is an attention-free language model optimized for efficiency.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Linear attention mechanism
//! - GroupNorm layer normalization
//! - Time-mixing layers
//! - State-based sequential processing
//! - Support for 8-bit quantization
//!
//! References:
//! - [RWKV Model](https://github.com/BlinkDL/RWKV-LM)
//! - [RWKV v5 Architecture](https://www.rwkv.com/v5)
//!
use crate::{
quantized_nn::{layer_norm, linear_no_bias as linear, Embedding, Linear},
quantized_var_builder::VarBuilder,
};
use candle::{IndexOp, Result, Tensor};
use candle_nn::{GroupNorm, LayerNorm, Module};
pub use crate::models::rwkv_v5::{Config, State, Tokenizer};
#[derive(Debug, Clone)]
struct SelfAttention {
key: Linear,
receptance: Linear,
value: Linear,
gate: Linear,
output: Linear,
ln_x: candle_nn::GroupNorm,
time_mix_key: Tensor,
time_mix_value: Tensor,
time_mix_receptance: Tensor,
time_decay: Tensor,
time_faaaa: Tensor,
time_mix_gate: Tensor,
layer_id: usize,
n_attn_heads: usize,
}
impl SelfAttention {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let attn_hidden_size = cfg.attention_hidden_size;
let key = linear(hidden_size, attn_hidden_size, vb.pp("key"))?;
let receptance = linear(hidden_size, attn_hidden_size, vb.pp("receptance"))?;
let value = linear(hidden_size, attn_hidden_size, vb.pp("value"))?;
let gate = linear(hidden_size, attn_hidden_size, vb.pp("gate"))?;
let output = linear(attn_hidden_size, hidden_size, vb.pp("output"))?;
let vb_x = vb.pp("ln_x");
let ln_x_weight = vb_x.get(hidden_size, "weight")?.dequantize(vb.device())?;
let ln_x_bias = vb_x.get(hidden_size, "bias")?.dequantize(vb.device())?;
let ln_x = GroupNorm::new(
ln_x_weight,
ln_x_bias,
hidden_size,
hidden_size / cfg.head_size,
1e-5,
)?;
let time_mix_key = vb
.get((1, 1, cfg.hidden_size), "time_mix_key")?
.dequantize(vb.device())?;
let time_mix_value = vb
.get((1, 1, cfg.hidden_size), "time_mix_value")?
.dequantize(vb.device())?;
let time_mix_receptance = vb
.get((1, 1, cfg.hidden_size), "time_mix_receptance")?
.dequantize(vb.device())?;
let n_attn_heads = cfg.hidden_size / cfg.head_size;
let time_decay = vb
.get((n_attn_heads, cfg.head_size), "time_decay")?
.dequantize(vb.device())?;
let time_faaaa = vb
.get((n_attn_heads, cfg.head_size), "time_faaaa")?
.dequantize(vb.device())?;
let time_mix_gate = vb
.get((1, 1, cfg.hidden_size), "time_mix_gate")?
.dequantize(vb.device())?;
Ok(Self {
key,
value,
receptance,
gate,
output,
ln_x,
time_mix_key,
time_mix_value,
time_mix_receptance,
time_decay,
time_faaaa,
time_mix_gate,
layer_id,
n_attn_heads,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let h = self.time_decay.dim(0)?;
let (b, t, s) = xs.dims3()?;
let s = s / h;
let (receptance, key, value, gate) = {
// extract key-value
let shifted = state.per_layer[self.layer_id].extract_key_value.clone();
let shifted = if shifted.rank() == 2 {
shifted.unsqueeze(1)?
} else {
shifted
};
let key = ((xs * &self.time_mix_key)? + &shifted * (1.0 - &self.time_mix_key)?)?;
let value = ((xs * &self.time_mix_value)? + &shifted * (1.0 - &self.time_mix_value)?)?;
let receptance = ((xs * &self.time_mix_receptance)?
+ &shifted * (1.0 - &self.time_mix_receptance)?)?;
let gate = ((xs * &self.time_mix_gate)? + &shifted * (1.0 - &self.time_mix_gate)?)?;
let key = self.key.forward(&key)?;
let value = self.value.forward(&value)?;
let receptance = self.receptance.forward(&receptance)?;
let gate = candle_nn::ops::silu(&self.gate.forward(&gate)?)?;
state.per_layer[self.layer_id].extract_key_value = xs.i((.., t - 1))?;
(receptance, key, value, gate)
};
// linear attention
let mut state_ = state.per_layer[self.layer_id].linear_attention.clone();
let key = key.reshape((b, t, h, s))?.permute((0, 2, 3, 1))?;
let value = value.reshape((b, t, h, s))?.transpose(1, 2)?;
let receptance = receptance.reshape((b, t, h, s))?.transpose(1, 2)?;
let time_decay = self
.time_decay
.exp()?
.neg()?
.exp()?
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let time_faaaa =
self.time_faaaa
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let mut out: Vec<Tensor> = Vec::with_capacity(t);
for t_ in 0..t {
let rt = receptance.i((.., .., t_..t_ + 1))?.contiguous()?;
let kt = key.i((.., .., .., t_..t_ + 1))?.contiguous()?;
let vt = value.i((.., .., t_..t_ + 1))?.contiguous()?;
let at = kt.matmul(&vt)?;
let rhs = (time_faaaa.broadcast_mul(&at)? + &state_)?;
let out_ = rt.matmul(&rhs)?.squeeze(2)?;
state_ = (&at + time_decay.broadcast_mul(&state_))?;
out.push(out_)
}
let out = Tensor::cat(&out, 1)?.reshape((b * t, h * s, 1))?;
let out = out.apply(&self.ln_x)?.reshape((b, t, h * s))?;
let out = (out * gate)?.apply(&self.output)?;
state.per_layer[self.layer_id].linear_attention = state_;
Ok(out)
}
}
#[derive(Debug, Clone)]
struct FeedForward {
time_mix_key: Tensor,
time_mix_receptance: Tensor,
key: Linear,
receptance: Linear,
value: Linear,
layer_id: usize,
}
impl FeedForward {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let int_size = cfg
.intermediate_size
.unwrap_or(((cfg.hidden_size as f64 * 3.5) as usize) / 32 * 32);
let key = linear(cfg.hidden_size, int_size, vb.pp("key"))?;
let receptance = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("receptance"))?;
let value = linear(int_size, cfg.hidden_size, vb.pp("value"))?;
let time_mix_key = vb
.get((1, 1, cfg.hidden_size), "time_mix_key")?
.dequantize(vb.device())?;
let time_mix_receptance = vb
.get((1, 1, cfg.hidden_size), "time_mix_receptance")?
.dequantize(vb.device())?;
Ok(Self {
key,
receptance,
value,
time_mix_key,
time_mix_receptance,
layer_id,
})
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let shifted = &state.per_layer[self.layer_id].feed_forward;
let key = (xs.broadcast_mul(&self.time_mix_key)?
+ shifted.broadcast_mul(&(1.0 - &self.time_mix_key)?)?)?;
let receptance = (xs.broadcast_mul(&self.time_mix_receptance)?
+ shifted.broadcast_mul(&(1.0 - &self.time_mix_receptance)?)?)?;
let key = key.apply(&self.key)?.relu()?.sqr()?;
let value = key.apply(&self.value)?;
let receptance = candle_nn::ops::sigmoid(&receptance.apply(&self.receptance)?)?;
state.per_layer[self.layer_id].feed_forward = xs.i((.., xs.dim(1)? - 1))?;
let xs = (receptance * value)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct Block {
pre_ln: Option<LayerNorm>,
ln1: LayerNorm,
ln2: LayerNorm,
attention: SelfAttention,
feed_forward: FeedForward,
}
impl Block {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln1 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln1"))?;
let ln2 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln2"))?;
let pre_ln = if layer_id == 0 {
let ln = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("pre_ln"))?;
Some(ln)
} else {
None
};
let attention = SelfAttention::new(layer_id, cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(layer_id, cfg, vb.pp("feed_forward"))?;
Ok(Self {
pre_ln,
ln1,
ln2,
attention,
feed_forward,
})
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let xs = match self.pre_ln.as_ref() {
None => xs.clone(),
Some(pre_ln) => xs.apply(pre_ln)?,
};
let attention = self.attention.forward(&xs.apply(&self.ln1)?, state)?;
let xs = (xs + attention)?;
let feed_forward = self.feed_forward.forward(&xs.apply(&self.ln2)?, state)?;
let xs = (xs + feed_forward)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embedding,
blocks: Vec<Block>,
ln_out: LayerNorm,
head: Linear,
rescale_every: usize,
layers_are_rescaled: bool,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("rwkv");
let embeddings = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embeddings"))?;
let mut blocks = Vec::with_capacity(cfg.num_hidden_layers);
let vb_b = vb_m.pp("blocks");
for block_index in 0..cfg.num_hidden_layers {
let block = Block::new(block_index, cfg, vb_b.pp(block_index))?;
blocks.push(block)
}
let ln_out = layer_norm(cfg.hidden_size, 1e-5, vb_m.pp("ln_out"))?;
let head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("head"))?;
Ok(Self {
embeddings,
blocks,
ln_out,
head,
rescale_every: cfg.rescale_every,
layers_are_rescaled: false, // This seem to only happen for the f16/bf16 dtypes.
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let (_b_size, _seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embeddings)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
xs = block.forward(&xs, state)?;
if self.layers_are_rescaled && (block_idx + 1) % self.rescale_every == 0 {
xs = (xs / 2.)?
}
}
let xs = xs.apply(&self.ln_out)?.apply(&self.head)?;
state.pos += 1;
Ok(xs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_rwkv_v6.rs | candle-transformers/src/models/quantized_rwkv_v6.rs | //! RWKV v6 model implementation with quantization support.
//!
//! RWKV is a linear attention model that combines the efficiency of RNNs
//! with the parallelizable training of Transformers. Version 6 builds on previous
//! versions with further optimizations.
//!
//! Key characteristics:
//! - Linear attention mechanism
//! - Time mixing layers
//! - Channel mixing layers
//! - RMSNorm for normalization
//! - Support for 8-bit quantization
//!
//! References:
//! - [RWKV Architecture](https://github.com/BlinkDL/RWKV-LM)
//! - [RWKV v6 Release](https://huggingface.co/BlinkDL/rwkv-6)
//!
use crate::{
quantized_nn::{layer_norm, linear_no_bias as linear, Embedding, Linear},
quantized_var_builder::VarBuilder,
};
use candle::{IndexOp, Result, Tensor};
use candle_nn::{GroupNorm, LayerNorm, Module};
pub use crate::models::rwkv_v5::{Config, State, Tokenizer};
#[derive(Debug, Clone)]
struct SelfAttention {
key: Linear,
receptance: Linear,
value: Linear,
gate: Linear,
output: Linear,
ln_x: candle_nn::GroupNorm,
time_mix_x: Tensor,
time_mix_w: Tensor,
time_mix_key: Tensor,
time_mix_value: Tensor,
time_mix_receptance: Tensor,
time_decay: Tensor,
time_faaaa: Tensor,
time_mix_gate: Tensor,
time_decay_w1: Tensor,
time_decay_w2: Tensor,
time_mix_w1: Tensor,
time_mix_w2: Tensor,
layer_id: usize,
n_attn_heads: usize,
}
impl SelfAttention {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let attn_hidden_size = cfg.attention_hidden_size;
let key = linear(hidden_size, attn_hidden_size, vb.pp("key"))?;
let receptance = linear(hidden_size, attn_hidden_size, vb.pp("receptance"))?;
let value = linear(hidden_size, attn_hidden_size, vb.pp("value"))?;
let gate = linear(hidden_size, attn_hidden_size, vb.pp("gate"))?;
let output = linear(attn_hidden_size, hidden_size, vb.pp("output"))?;
let vb_x = vb.pp("ln_x");
let ln_x_weight = vb_x.get(hidden_size, "weight")?.dequantize(vb.device())?;
let ln_x_bias = vb_x.get(hidden_size, "bias")?.dequantize(vb.device())?;
let ln_x = GroupNorm::new(
ln_x_weight,
ln_x_bias,
hidden_size,
hidden_size / cfg.head_size,
1e-5,
)?;
let time_mix_x = vb
.get((1, 1, cfg.hidden_size), "time_mix_x")?
.dequantize(vb.device())?;
let time_mix_w = vb
.get((1, 1, cfg.hidden_size), "time_mix_w")?
.dequantize(vb.device())?;
let time_mix_key = vb
.get((1, 1, cfg.hidden_size), "time_mix_key")?
.dequantize(vb.device())?;
let time_mix_value = vb
.get((1, 1, cfg.hidden_size), "time_mix_value")?
.dequantize(vb.device())?;
let time_mix_receptance = vb
.get((1, 1, cfg.hidden_size), "time_mix_receptance")?
.dequantize(vb.device())?;
let n_attn_heads = cfg.hidden_size / cfg.head_size;
let time_decay = vb
.get((1, 1, cfg.hidden_size), "time_decay")?
.dequantize(vb.device())?;
let time_faaaa = vb
.get((n_attn_heads, cfg.head_size), "time_faaaa")?
.dequantize(vb.device())?;
let time_mix_gate = vb
.get((1, 1, cfg.hidden_size), "time_mix_gate")?
.dequantize(vb.device())?;
let time_decay_w1 = vb
.get((cfg.hidden_size, n_attn_heads * 2), "time_decay_w1")?
.dequantize(vb.device())?;
let time_decay_w2 = vb
.get((n_attn_heads * 2, cfg.hidden_size), "time_decay_w2")?
.dequantize(vb.device())?;
let time_mix_w1 = vb
.get((cfg.hidden_size, n_attn_heads * 5), "time_mix_w1")?
.dequantize(vb.device())?;
let time_mix_w2 = vb
.get((5, n_attn_heads, cfg.hidden_size), "time_mix_w2")?
.dequantize(vb.device())?;
Ok(Self {
key,
value,
receptance,
gate,
output,
ln_x,
time_mix_x,
time_mix_w,
time_mix_key,
time_mix_value,
time_mix_receptance,
time_decay,
time_faaaa,
time_mix_gate,
time_decay_w1,
time_decay_w2,
time_mix_w1,
time_mix_w2,
layer_id,
n_attn_heads,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let h = self.n_attn_heads;
let (b, t, s) = xs.dims3()?;
let s = s / h;
let (receptance, key, value, gate, w) = {
// extract key-value
let shifted = state.per_layer[self.layer_id].extract_key_value.clone();
let shifted = if shifted.rank() == 2 {
shifted.unsqueeze(1)?
} else {
shifted
};
let sx = (&shifted - xs)?;
let xxx = (xs + &sx * &self.time_mix_x)?;
let xxx = xxx
.broadcast_matmul(&self.time_mix_w1)?
.tanh()?
.reshape((b * t, 5, ()))?
.transpose(0, 1)?;
let xxx = xxx.matmul(&self.time_mix_w2)?.reshape((5, b, t, ()))?;
let (mw, mk, mv, mr, mg) = (xxx.i(0)?, xxx.i(1)?, xxx.i(2)?, xxx.i(3)?, xxx.i(4)?);
let xw = (xs + &sx * (&self.time_mix_w + &mw)?)?;
let xk = (xs + &sx * (&self.time_mix_key + &mk)?)?;
let xv = (xs + &sx * (&self.time_mix_value + &mv)?)?;
let xr = (xs + &sx * (&self.time_mix_receptance + &mr)?)?;
let xg = (xs + &sx * (&self.time_mix_gate + &mg)?)?;
let w = (&self.time_decay
+ xw.broadcast_matmul(&self.time_decay_w1)?
.tanh()?
.broadcast_matmul(&self.time_decay_w2)?)?
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let key = self.key.forward(&xk)?;
let value = self.value.forward(&xv)?;
let receptance = self.receptance.forward(&xr)?;
let gate = candle_nn::ops::silu(&self.gate.forward(&xg)?)?;
state.per_layer[self.layer_id].extract_key_value = xs.i((.., t - 1))?;
(receptance, key, value, gate, w)
};
// linear attention
let mut state_ = state.per_layer[self.layer_id].linear_attention.clone();
let key = key.reshape((b, t, h, s))?.permute((0, 2, 3, 1))?;
let value = value.reshape((b, t, h, s))?.transpose(1, 2)?;
let receptance = receptance.reshape((b, t, h, s))?.transpose(1, 2)?;
let w = w.exp()?.neg()?.exp()?;
let time_faaaa =
self.time_faaaa
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let mut out: Vec<Tensor> = Vec::with_capacity(t);
for t_ in 0..t {
let rt = receptance.i((.., .., t_..t_ + 1))?.contiguous()?;
let kt = key.i((.., .., .., t_..t_ + 1))?.contiguous()?;
let vt = value.i((.., .., t_..t_ + 1))?.contiguous()?;
let at = kt.matmul(&vt)?;
let rhs = (time_faaaa.broadcast_mul(&at)? + &state_)?;
let out_ = rt.matmul(&rhs)?.squeeze(2)?;
state_ = (&at + w.broadcast_mul(&state_))?;
out.push(out_)
}
let out = Tensor::cat(&out, 1)?.reshape((b * t, h * s, 1))?;
let out = out.apply(&self.ln_x)?.reshape((b, t, h * s))?;
let out = (out * gate)?.apply(&self.output)?;
state.per_layer[self.layer_id].linear_attention = state_;
Ok(out)
}
}
#[derive(Debug, Clone)]
struct FeedForward {
time_mix_key: Tensor,
time_mix_receptance: Tensor,
key: Linear,
receptance: Linear,
value: Linear,
layer_id: usize,
}
impl FeedForward {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let int_size = cfg
.intermediate_size
.unwrap_or(((cfg.hidden_size as f64 * 3.5) as usize) / 32 * 32);
let key = linear(cfg.hidden_size, int_size, vb.pp("key"))?;
let receptance = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("receptance"))?;
let value = linear(int_size, cfg.hidden_size, vb.pp("value"))?;
let time_mix_key = vb
.get((1, 1, cfg.hidden_size), "time_mix_key")?
.dequantize(vb.device())?;
let time_mix_receptance = vb
.get((1, 1, cfg.hidden_size), "time_mix_receptance")?
.dequantize(vb.device())?;
Ok(Self {
key,
receptance,
value,
time_mix_key,
time_mix_receptance,
layer_id,
})
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let shifted = state.per_layer[self.layer_id]
.feed_forward
.broadcast_sub(xs)?;
let key = (xs + shifted.broadcast_mul(&self.time_mix_key)?)?;
let receptance = (xs + shifted.broadcast_mul(&self.time_mix_receptance)?)?;
let key = key.apply(&self.key)?.relu()?.sqr()?;
let value = key.apply(&self.value)?;
let receptance = candle_nn::ops::sigmoid(&receptance.apply(&self.receptance)?)?;
state.per_layer[self.layer_id].feed_forward = xs.i((.., xs.dim(1)? - 1))?;
let xs = (receptance * value)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct Block {
pre_ln: Option<LayerNorm>,
ln1: LayerNorm,
ln2: LayerNorm,
attention: SelfAttention,
feed_forward: FeedForward,
}
impl Block {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln1 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln1"))?;
let ln2 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln2"))?;
let pre_ln = if layer_id == 0 {
let ln = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("pre_ln"))?;
Some(ln)
} else {
None
};
let attention = SelfAttention::new(layer_id, cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(layer_id, cfg, vb.pp("feed_forward"))?;
Ok(Self {
pre_ln,
ln1,
ln2,
attention,
feed_forward,
})
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let xs = match self.pre_ln.as_ref() {
None => xs.clone(),
Some(pre_ln) => xs.apply(pre_ln)?,
};
let attention = self.attention.forward(&xs.apply(&self.ln1)?, state)?;
let xs = (xs + attention)?;
let feed_forward = self.feed_forward.forward(&xs.apply(&self.ln2)?, state)?;
let xs = (xs + feed_forward)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embedding,
blocks: Vec<Block>,
ln_out: LayerNorm,
head: Linear,
rescale_every: usize,
layers_are_rescaled: bool,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("rwkv");
let embeddings = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embeddings"))?;
let mut blocks = Vec::with_capacity(cfg.num_hidden_layers);
let vb_b = vb_m.pp("blocks");
for block_index in 0..cfg.num_hidden_layers {
let block = Block::new(block_index, cfg, vb_b.pp(block_index))?;
blocks.push(block)
}
let ln_out = layer_norm(cfg.hidden_size, 1e-5, vb_m.pp("ln_out"))?;
let head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("head"))?;
Ok(Self {
embeddings,
blocks,
ln_out,
head,
rescale_every: cfg.rescale_every,
layers_are_rescaled: false, // This seem to only happen for the f16/bf16 dtypes.
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let (_b_size, _seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embeddings)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
xs = block.forward(&xs, state)?;
if self.layers_are_rescaled && (block_idx + 1) % self.rescale_every == 0 {
xs = (xs / 2.)?
}
}
let xs = xs.apply(&self.ln_out)?.apply(&self.head)?;
state.pos += 1;
Ok(xs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_recurrent_gemma.rs | candle-transformers/src/models/quantized_recurrent_gemma.rs | //! Recurrent Gemma model implementation with quantization support.
//!
//! Gemma is a large language model optimized for efficiency.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Recurrent blocks with gated recurrent units
//! - Convolution and attention blocks
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - [Gemma Paper](https://arxiv.org/abs/2401.06751)
//! - [Model Card](https://ai.google.dev/gemma)
//!
use crate::quantized_nn::{linear_b as linear, Embedding, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use std::sync::Arc;
use crate::models::recurrent_gemma::{Config, Rglru, RmsNorm, RotaryEmbedding, TemporalBlockType};
fn rms_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> {
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
Ok(RmsNorm::from_weight(weight, eps))
}
#[derive(Debug, Clone)]
struct Mlp {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl Mlp {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let intermediate_size = cfg.intermediate_size / 2;
let gate_proj = linear(h, intermediate_size, true, vb.pp("gate_proj"))?;
let up_proj = linear(h, intermediate_size, true, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_size, h, true, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_activation,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let gate = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
(gate * xs.apply(&self.up_proj))?.apply(&self.down_proj)
}
}
fn rglru(cfg: &Config, vb: VarBuilder) -> Result<Rglru> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let n_heads = cfg.num_attention_heads;
let block_width = lru_width / n_heads;
let recurrent_param = vb.get((lru_width,), "recurrent_param")?;
let input_gate_weight = vb.get((n_heads, block_width, block_width), "input_gate_weight")?;
let input_gate_bias = vb.get((n_heads, block_width), "input_gate_bias")?;
let recurrent_gate_weight =
vb.get((n_heads, block_width, block_width), "recurrent_gate_weight")?;
let recurrent_gate_bias = vb.get((n_heads, block_width), "recurrent_gate_bias")?;
Ok(Rglru {
recurrent_param: recurrent_param.dequantize(vb.device())?,
input_gate_bias: input_gate_bias.dequantize(vb.device())?,
input_gate_weight: input_gate_weight.dequantize(vb.device())?,
recurrent_gate_bias: recurrent_gate_bias.dequantize(vb.device())?,
recurrent_gate_weight: recurrent_gate_weight.dequantize(vb.device())?,
block_width,
n_heads,
recurrent_states: None,
})
}
#[derive(Debug, Clone)]
struct RecurrentBlock {
linear_y: Linear,
linear_x: Linear,
linear_out: Linear,
conv_1d: candle_nn::Conv1d,
conv1d_state: Option<Tensor>,
conv1d_width: usize,
rg_lru: Rglru,
act_fn: candle_nn::Activation,
}
impl RecurrentBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let linear_y = linear(h, lru_width, true, vb.pp("linear_y"))?;
let linear_x = linear(h, lru_width, true, vb.pp("linear_x"))?;
let linear_out = linear(lru_width, h, true, vb.pp("linear_out"))?;
let conv_1d = {
let ws = vb
.get((lru_width, 1, cfg.conv1d_width), "conv_1d.weight")?
.dequantize(vb.device())?;
let bs = vb.get(lru_width, "conv_1d.bias")?.dequantize(vb.device())?;
let config = candle_nn::Conv1dConfig {
groups: lru_width,
padding: cfg.conv1d_width - 1,
..Default::default()
};
candle_nn::Conv1d::new(ws, Some(bs), config)
};
let rg_lru = rglru(cfg, vb.pp("rg_lru"))?;
Ok(Self {
linear_y,
linear_x,
linear_out,
conv_1d,
conv1d_state: None,
conv1d_width: cfg.conv1d_width,
rg_lru,
act_fn: cfg.hidden_activation,
})
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len, _) = xs.dims3()?;
let y_branch = xs.apply(&self.linear_y)?.apply(&self.act_fn)?;
let x_branch = xs.apply(&self.linear_x)?.transpose(1, 2)?;
let x_branch = if pos == 0 {
let x_len = x_branch.dim(D::Minus1)?;
let pad = self.conv1d_width as i64 - x_len as i64 - 1;
let padded = match pad.cmp(&0) {
std::cmp::Ordering::Equal => x_branch.clone(),
std::cmp::Ordering::Less => {
let rev_pad = (-pad) as usize;
x_branch.narrow(D::Minus1, rev_pad, x_len - rev_pad)?
}
std::cmp::Ordering::Greater => {
x_branch.pad_with_zeros(D::Minus1, pad as usize, 0)?
}
};
self.conv1d_state = Some(padded);
x_branch
.apply(&self.conv_1d)?
.narrow(D::Minus1, 0, seq_len)?
} else {
let conv_state = match self.conv1d_state.as_ref() {
None => candle::bail!("empty cache despite pos > 0"),
Some(s) => Tensor::cat(&[s, &x_branch], D::Minus1)?,
};
let w = self.conv_1d.weight().i((.., 0, ..))?;
let x_branch = conv_state.broadcast_mul(&w)?.sum(D::Minus1)?;
let x_branch = match self.conv_1d.bias() {
None => x_branch,
Some(b) => x_branch.broadcast_add(b)?,
};
let x_branch = x_branch.unsqueeze(D::Minus1)?;
self.conv1d_state = Some(conv_state.i((.., .., 1..))?);
x_branch
};
let x_branch = x_branch.transpose(1, 2)?;
let x_branch = self.rg_lru.forward(&x_branch, pos)?;
(x_branch * y_branch)?.apply(&self.linear_out)
}
}
#[derive(Debug, Clone)]
struct SdpaAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
n_heads: usize,
n_kv_heads: usize,
head_dim: usize,
hidden_size: usize,
kv_cache: Option<(Tensor, Tensor)>,
rotary_emb: Arc<RotaryEmbedding>,
}
impl SdpaAttention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let n_heads = cfg.num_attention_heads;
let n_kv_heads = cfg.num_key_value_heads;
let hd = cfg.head_dim;
let q_proj = linear(h, n_heads * hd, cfg.attention_bias, vb.pp("q_proj"))?;
let k_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("k_proj"))?;
let v_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("v_proj"))?;
let o_proj = linear(n_heads * hd, h, true, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
n_heads,
n_kv_heads,
head_dim: hd,
hidden_size: h,
kv_cache: None,
rotary_emb,
})
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
let n_rep = self.n_heads / self.n_kv_heads;
crate::utils::repeat_kv(x, n_rep)
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let (bsz, q_len, _) = xs.dims3()?;
let query_states = xs.apply(&self.q_proj)?;
let key_states = xs.apply(&self.k_proj)?;
let value_states = xs.apply(&self.v_proj)?;
let query_states = query_states
.reshape((bsz, q_len, self.n_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let query_states = query_states.chunk(2, D::Minus1)?;
let key_states = key_states.chunk(2, D::Minus1)?;
let (query_rot, key_rot) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states[0], &key_states[0], pos)?;
let query_states = Tensor::cat(&[&query_rot, &query_states[1]], D::Minus1)?.contiguous()?;
let key_states = Tensor::cat(&[&key_rot, &key_states[1]], D::Minus1)?.contiguous()?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = self.repeat_kv(key_states)?;
let value_states = self.repeat_kv(value_states)?;
let xs = {
let att = (query_states.matmul(&key_states.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if q_len == 1 {
att
} else {
match attention_mask {
None => att,
Some(mask) => att.broadcast_add(mask)?,
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
att.matmul(&value_states.contiguous()?)?
};
let xs = xs
.transpose(1, 2)?
.reshape((bsz, q_len, self.hidden_size))?;
self.o_proj.forward(&xs)
}
}
#[derive(Debug, Clone)]
enum TemporalBlock {
Recurrent(RecurrentBlock),
Attention(SdpaAttention),
}
impl TemporalBlock {
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
match self {
Self::Recurrent(b) => b.forward(xs, pos),
Self::Attention(b) => b.forward(xs, attention_mask, pos),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
temporal_pre_norm: RmsNorm,
channel_pre_norm: RmsNorm,
temporal_block: TemporalBlock,
mlp_block: Mlp,
}
impl DecoderLayer {
fn new(
block_idx: usize,
rotary_emb: Arc<RotaryEmbedding>,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let h = cfg.hidden_size;
let temporal_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("temporal_pre_norm"))?;
let channel_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("channel_pre_norm"))?;
let temporal_block = match cfg.block_types[block_idx % cfg.block_types.len()] {
TemporalBlockType::Recurrent => {
let block = RecurrentBlock::new(cfg, vb.pp("temporal_block"))?;
TemporalBlock::Recurrent(block)
}
TemporalBlockType::Attention => {
let block = SdpaAttention::new(rotary_emb, cfg, vb.pp("temporal_block"))?;
TemporalBlock::Attention(block)
}
};
let mlp_block = Mlp::new(cfg, vb.pp("mlp_block"))?;
Ok(Self {
temporal_pre_norm,
channel_pre_norm,
temporal_block,
mlp_block,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.temporal_pre_norm)?;
let xs = self.temporal_block.forward(&xs, attention_mask, pos)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.channel_pre_norm)?.apply(&self.mlp_block)?;
xs + residual
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: Embedding,
layers: Vec<DecoderLayer>,
final_norm: RmsNorm,
lm_head: Linear,
hidden_size: usize,
logits_soft_cap: f64,
device: Device,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb.device())?);
let vb_b = vb.pp("layers");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(idx, rotary_emb.clone(), cfg, vb_b.pp(idx))?;
layers.push(layer)
}
let final_norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("final_norm"))?;
let lm_head = linear(
cfg.hidden_size,
cfg.vocab_size,
false,
vb.pp("embed_tokens"),
)?;
Ok(Self {
embed_tokens,
layers,
final_norm,
lm_head,
hidden_size: cfg.hidden_size,
logits_soft_cap: cfg.logits_soft_cap,
device: vb.device().clone(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(DType::F32)
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (b_size, seq_len) = xs.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, pos)?;
Some(mask)
};
let xs = xs.apply(&self.embed_tokens)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), pos)?;
}
let logits = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.final_norm)?
.apply(&self.lm_head)?;
let logits = ((logits / self.logits_soft_cap)?.tanh()? * self.logits_soft_cap)?;
Ok(logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_blip.rs | candle-transformers/src/models/quantized_blip.rs | //! BLIP model implementation with quantization support.
//!
//! BLIP is a vision-language model for image understanding and generation tasks.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Vision encoder using ViT architecture
//! - Text decoder using BERT-style transformer
//! - Cross-attention between vision and text features
//! - Support for 8-bit quantization
//!
//! References:
//! - [BLIP Paper](https://arxiv.org/abs/2201.12086)
//! - [Hugging Face Implementation](https://huggingface.co/docs/transformers/model_doc/blip)
//!
use super::quantized_blip_text as blip_text;
use crate::quantized_nn::{layer_norm, linear, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{Module, Result, Tensor, D};
use candle_nn::{Conv2d, Conv2dConfig, LayerNorm};
pub type VisionConfig = super::blip::VisionConfig;
pub type Config = super::blip::Config;
#[derive(Debug, Clone)]
struct VisionEmbeddings {
class_embedding: Tensor,
patch_embedding: Conv2d,
position_embedding: Tensor,
}
impl VisionEmbeddings {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let class_embedding = vb
.get((1, 1, cfg.hidden_size), "class_embedding")?
.dequantize(vb.device())?;
let conv_cfg = Conv2dConfig {
stride: cfg.patch_size,
..Default::default()
};
let pe_vb = vb.pp("patch_embedding");
let pe_weight = pe_vb
.get(
(cfg.hidden_size, 3, cfg.patch_size, cfg.patch_size),
"weight",
)?
.dequantize(vb.device())?;
let pe_bias = pe_vb
.get(cfg.hidden_size, "bias")?
.dequantize(vb.device())?;
let patch_embedding = Conv2d::new(pe_weight, Some(pe_bias), conv_cfg);
let num_patches1 = cfg.image_size / cfg.patch_size;
let num_patches = num_patches1 * num_patches1;
let num_positions = num_patches + 1;
let position_embedding = vb
.get((1, num_positions, cfg.hidden_size), "position_embedding")?
.dequantize(vb.device())?;
Ok(Self {
class_embedding,
patch_embedding,
position_embedding,
})
}
}
impl Module for VisionEmbeddings {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let target_dtype = xs.dtype();
let b_size = xs.dim(0)?;
let patch_embeds = xs.apply(&self.patch_embedding)?.flatten_from(2)?.t()?;
let d = self.class_embedding.dim(D::Minus1)?;
let class_embeds = self
.class_embedding
.broadcast_as((b_size, 1, d))?
.to_dtype(target_dtype)?;
let embeddings = Tensor::cat(&[&class_embeds, &patch_embeds], 1)?;
let position_embedding = self.position_embedding.narrow(1, 0, embeddings.dim(1)?)?;
embeddings.broadcast_add(&position_embedding)
}
}
#[derive(Debug, Clone)]
struct Attention {
qkv: Linear,
projection: Linear,
scale: f64,
num_heads: usize,
}
impl Attention {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let head_dim = embed_dim / num_heads;
let scale = 1f64 / (head_dim as f64).sqrt();
let qkv = linear(embed_dim, 3 * embed_dim, vb.pp("qkv"))?;
let projection = linear(embed_dim, embed_dim, vb.pp("projection"))?;
Ok(Self {
qkv,
projection,
scale,
num_heads,
})
}
fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> {
let (b_sz, tgt_len, embed_dim) = xs.dims3()?;
let mixed_qkv = xs
.apply(&self.qkv)?
.reshape((b_sz, tgt_len, 3, self.num_heads, embed_dim / self.num_heads))?
.permute((2, 0, 3, 1, 4))?;
let query = mixed_qkv.get(0)?;
let key = mixed_qkv.get(1)?;
let value = mixed_qkv.get(2)?;
let attention_scores = query.matmul(&key.t()?)?;
let attention_scores = (attention_scores * self.scale)?;
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
let attention_probs = match attn_mask {
None => attention_probs,
Some(attn_mask) => (attention_probs * attn_mask)?,
};
attention_probs
.matmul(&value)?
.permute((0, 2, 1, 3))?
.flatten_from(D::Minus2)?
.apply(&self.projection)
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
activation_fn: candle_nn::Activation,
fc1: Linear,
fc2: Linear,
}
impl MLP {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?;
let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?;
Ok(Self {
activation_fn: cfg.hidden_act,
fc1,
fc2,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?
.apply(&self.activation_fn)?
.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct EncoderLayer {
self_attn: Attention,
layer_norm1: LayerNorm,
mlp: MLP,
layer_norm2: LayerNorm,
}
impl EncoderLayer {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.hidden_size;
let self_attn = Attention::new(cfg, vb.pp("self_attn"))?;
let layer_norm1 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm1"))?;
let layer_norm2 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm2"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.layer_norm1)?;
let xs = self.self_attn.forward(&xs, attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?;
xs + residual
}
}
#[derive(Debug, Clone)]
struct Encoder {
layers: Vec<EncoderLayer>,
}
impl Encoder {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb = vb.pp("layers");
for i in 0..cfg.num_hidden_layers {
let layer = EncoderLayer::new(cfg, vb.pp(i))?;
layers.push(layer)
}
Ok(Self { layers })
}
fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, attention_mask)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct VisionModel {
embeddings: VisionEmbeddings,
encoder: Encoder,
post_layernorm: LayerNorm,
}
impl VisionModel {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?;
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let post_layernorm =
layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?;
Ok(Self {
embeddings,
encoder,
post_layernorm,
})
}
}
impl Module for VisionModel {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.embeddings)?;
let encoder_outputs = self.encoder.forward(&xs, None)?;
// Return the last hidden state rather than pooled outputs.
encoder_outputs.apply(&self.post_layernorm)
}
}
#[derive(Debug, Clone)]
pub struct BlipForConditionalGeneration {
vision_model: VisionModel,
text_decoder: blip_text::TextLMHeadModel,
}
impl BlipForConditionalGeneration {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vision_model = VisionModel::new(&cfg.vision_config, vb.pp("vision_model"))?;
let text_decoder =
blip_text::TextLMHeadModel::new(&cfg.text_config, vb.pp("text_decoder"))?;
Ok(Self {
vision_model,
text_decoder,
})
}
pub fn vision_model(&self) -> &VisionModel {
&self.vision_model
}
pub fn text_decoder(&mut self) -> &mut blip_text::TextLMHeadModel {
&mut self.text_decoder
}
pub fn reset_kv_cache(&mut self) {
self.text_decoder.reset_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mistral.rs | candle-transformers/src/models/mistral.rs | //! Mixtral Model, based on the Mistral architecture
//!
//! See Mistral and Mixtral at:
//! - [Hugging Face](https://huggingface.co/docs/transformers/model_doc/mixtral)
//! - [GitHub](https://github.com/mistralai/mistral-src)
//!
use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm};
/// Mistral LLM, https://github.com/mistralai/mistral-src
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
fn default_num_attention_heads() -> usize {
32
}
fn default_use_flash_attn() -> bool {
false
}
fn default_hidden_act() -> candle_nn::Activation {
candle_nn::Activation::Silu
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
#[serde(default = "default_num_attention_heads")]
pub num_attention_heads: usize,
pub head_dim: Option<usize>,
pub num_key_value_heads: usize,
#[serde(default = "default_hidden_act")]
pub hidden_act: Activation,
pub max_position_embeddings: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub sliding_window: Option<usize>,
#[serde(default = "default_use_flash_attn")]
pub use_flash_attn: bool,
}
impl Config {
// https://huggingface.co/mistralai/Mistral-7B-v0.1/blob/main/config.json
pub fn config_7b_v0_1(use_flash_attn: bool) -> Self {
Self {
vocab_size: 32000,
hidden_size: 4096,
intermediate_size: 14336,
num_hidden_layers: 32,
num_attention_heads: 32,
head_dim: None,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 32768,
rms_norm_eps: 1e-5,
rope_theta: 10_000.,
sliding_window: Some(4096),
use_flash_attn,
}
}
// https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca/blob/main/config.json
// https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/blob/main/config.json
pub fn config_chat_ml(use_flash_attn: bool) -> Self {
Self {
vocab_size: 32002,
hidden_size: 4096,
intermediate_size: 14336,
num_hidden_layers: 32,
num_attention_heads: 32,
head_dim: None,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 32768,
rms_norm_eps: 1e-5,
rope_theta: 10_000.,
sliding_window: Some(4096),
use_flash_attn,
}
}
// https://huggingface.co/amazon/MistralLite/blob/main/config.json
pub fn config_amazon_mistral_lite(use_flash_attn: bool) -> Self {
Self {
vocab_size: 32003,
hidden_size: 4096,
intermediate_size: 14336,
num_hidden_layers: 32,
num_attention_heads: 32,
head_dim: None,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 32768,
rms_norm_eps: 1e-5,
rope_theta: 10_000.,
sliding_window: Some(4096),
use_flash_attn,
}
}
fn head_dim(&self) -> usize {
self.head_dim
.unwrap_or(self.hidden_size / self.num_attention_heads)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let rope_theta = cfg.rope_theta as f32;
let dim = cfg.head_dim();
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(DType::F32)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?.to_dtype(dtype)?,
cos: freqs.cos()?.to_dtype(dtype)?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(q, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(k, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = cfg.head_dim();
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
rotary_emb,
kv_cache: None,
use_flash_attn: cfg.use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.num_heads * self.head_dim))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: Option<usize>,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let sliding_window = self.sliding_window.unwrap_or(tgt_len + 1);
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((1, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn embed_tokens(&self) -> &candle_nn::Embedding {
&self.embed_tokens
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (_b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn forward_embeds(
&mut self,
xs: &Tensor,
attn_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (_b_size, seq_len, _) = xs.dims3()?;
let mut xs = xs.clone();
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attn_mask, seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_phi3.rs | candle-transformers/src/models/quantized_phi3.rs | //! Phi3 model implementation with quantization support.
//!
//! Phi3 is a language model intended for research purposes.
//! This implementation provides quantization for reduced memory usage.
//!
//! Key characteristics:
//! - Multi-head attention
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for quantization
//!
//! References:
//! - [Model Card](https://huggingface.co/microsoft/phi-3)
//!
use std::collections::HashMap;
use candle::quantized::gguf_file;
use candle::quantized::QTensor;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{kv_cache::KvCache, Embedding, RmsNorm};
#[derive(Debug, Clone)]
struct QLinear {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QLinear {
fn new<R: std::io::Read + std::io::Seek>(
ct: &gguf_file::Content,
r: &mut R,
name: &str,
device: &Device,
) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
let w = ct.tensor(r, &format!("{name}.weight"), device)?;
let inner = candle::quantized::QMatMul::from_qtensor(w)?;
Ok(Self { inner, span })
}
}
impl Module for QLinear {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
struct Mlp {
ffn_up: QLinear,
ffn_down: QLinear,
i_size: usize,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let up_states = xs.apply(&self.ffn_up)?;
let gate = up_states.narrow(D::Minus1, 0, self.i_size)?;
let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?;
let up_states = (up_states * gate.silu()?)?;
up_states.apply(&self.ffn_down)
}
}
fn rms_norm(w: QTensor, eps: f64) -> Result<RmsNorm> {
let w = w.dequantize(&w.device())?;
let rms = RmsNorm::new(w, eps);
Ok(rms)
}
#[derive(Debug, Clone)]
struct LayerWeights {
attn_qkv: QLinear,
attn_output: QLinear,
attn_norm: RmsNorm,
ffn_norm: RmsNorm,
mlp: Mlp,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
cos: Tensor,
sin: Tensor,
neg_inf: Tensor,
kv_cache: KvCache,
use_flash_attn: bool,
span_attn: tracing::Span,
span_rot: tracing::Span,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> {
let shape = mask.shape();
let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?;
Ok(m)
}
impl LayerWeights {
fn apply_rotary_emb(&self, xs: &Tensor, index_pos: usize) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _h, seq_len, _n_embd) = xs.dims4()?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(&xs.contiguous()?, &cos, &sin)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, n_embd) = x.dims3()?;
let qkv = self.attn_qkv.forward(x)?;
let query_pos = self.n_head * self.head_dim;
let q = qkv.narrow(D::Minus1, 0, query_pos)?;
let k = qkv.narrow(D::Minus1, query_pos, self.n_kv_head * self.head_dim)?;
let v = qkv.narrow(
D::Minus1,
query_pos + self.n_kv_head * self.head_dim,
self.n_kv_head * self.head_dim,
)?;
let q = q
.reshape((b_sz, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let q = self.apply_rotary_emb(&q, index_pos)?.contiguous()?;
let k = self.apply_rotary_emb(&k, index_pos)?;
if index_pos == 0 {
self.kv_cache.reset();
}
let (k, v) = self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?;
let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?;
let y = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = q.to_dtype(DType::BF16)?.transpose(1, 2)?;
let k = k.to_dtype(DType::BF16)?.transpose(1, 2)?;
let v = v.to_dtype(DType::BF16)?.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?
.to_dtype(DType::F32)?
.transpose(1, 2)?
} else {
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = match mask {
None => att,
Some(mask) => {
let mask = mask.broadcast_as(att.shape())?;
masked_fill(&att, &mask, &self.neg_inf)?
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.attn_output.forward(&y)?;
Ok(y)
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
pub struct ModelWeights {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
output_norm: RmsNorm,
output: QLinear,
masks: HashMap<usize, Tensor>,
span: tracing::Span,
span_output: tracing::Span,
}
fn precomput_freqs_cis(
head_dim: usize,
max_seq_len: usize,
freq_base: f32,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, max_seq_len as u32, device)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok((cos, sin))
}
impl ModelWeights {
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
use_flash_attn: bool,
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let md_get = |s: &str| match ct.metadata.get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
// Parameter extraction from metadata.
let head_count = md_get("phi3.attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("phi3.attention.head_count_kv")?.to_u32()? as usize;
let block_count = md_get("phi3.block_count")?.to_u32()? as usize;
let embedding_length = md_get("phi3.embedding_length")?.to_u32()? as usize;
let max_seq_len = md_get("phi3.context_length")?.to_u32()? as usize;
let head_dim = embedding_length / head_count;
let i_size = md_get("phi3.feed_forward_length")?.to_u32()? as usize;
let rope_dim = md_get("phi3.rope.dimension_count")?.to_u32()? as usize;
let rms_eps = md_get("phi3.attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let (cos, sin) = precomput_freqs_cis(rope_dim, max_seq_len, 10_000., device)?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings.dequantize(device)?;
let output_norm = rms_norm(ct.tensor(reader, "output_norm.weight", device)?, rms_eps)?;
let output = QLinear::new(&ct, reader, "output", device)?;
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let ffn_up = QLinear::new(&ct, reader, &format!("{prefix}.ffn_up"), device)?;
let ffn_down = QLinear::new(&ct, reader, &format!("{prefix}.ffn_down"), device)?;
let mlp = Mlp {
ffn_up,
ffn_down,
i_size,
};
let attn_norm = rms_norm(
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?,
rms_eps,
)?;
let ffn_norm = rms_norm(
ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?,
rms_eps,
)?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let kv_cache = KvCache::new(2, max_seq_len);
layers.push(LayerWeights {
attn_qkv: QLinear::new(&ct, reader, &format!("{prefix}.attn_qkv"), device)?,
attn_output: QLinear::new(&ct, reader, &format!("{prefix}.attn_output"), device)?,
attn_norm,
ffn_norm,
mlp,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim,
cos: cos.clone(),
sin: sin.clone(),
neg_inf: neg_inf.clone(),
kv_cache,
use_flash_attn,
span_attn,
span_rot,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
output_norm,
output,
masks: HashMap::new(),
span,
span_output,
})
}
fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
pub fn forward(&mut self, xs: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = xs.dims2()?;
let mask = if seq_len == 1 {
None
} else {
Some(self.mask(seq_len, xs.device())?)
};
let _enter = self.span.enter();
let mut xs = self.tok_embeddings.forward(xs)?;
for layer in self.layers.iter_mut() {
let residual = &xs;
let ys = xs.apply(&layer.attn_norm)?;
let ys = layer.forward_attn(&ys, mask.as_ref(), index_pos)?;
let ys = (ys + residual)?;
let residual = &ys;
let ys = ys.apply(&layer.ffn_norm)?;
let ys = layer.mlp.forward(&ys)?;
xs = (ys + residual)?
}
let xs = xs.apply(&self.output_norm)?.i((.., seq_len - 1, ..))?;
let _enter = self.span_output.enter();
self.output.forward(&xs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/persimmon.rs | candle-transformers/src/models/persimmon.rs | //! Persimmon Model
//!
//! A transformer language model for efficient inference and general-purpose tasks. The model uses a standard transformer architecture with:
//! - Layer normalization for Q/K attention
//! - RoPE embeddings with partial rotary factor
//! - ReLU activation
//! - Separate number of attention heads and KV heads
//!
//! References:
//! - 💻 [Hugging Face Implementation](https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/modeling_persimmon.py)
//! - 💻 [Persimmon Config](https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/configuration_persimmon.py)
//! - 🤗 [Hugging Face](https://huggingface.co/adept/persimmon-8b-base)
//!
use candle::DType;
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PositionEmbeddingType {
Absolute,
Alibi,
}
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/configuration_persimmon.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub hidden_act: candle_nn::Activation,
pub max_position_embeddings: usize,
pub initializer_range: f64,
pub layer_norm_eps: f64,
pub rms_norm_eps: f64,
pub use_cache: bool,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub qk_layernorm: bool,
pub partial_rotary_factor: f64,
}
impl Config {
pub fn base_8b() -> Self {
// https://huggingface.co/adept/persimmon-8b-base/blob/main/config.json
Self {
hidden_act: candle_nn::Activation::Relu,
hidden_size: 4096,
initializer_range: 0.02,
intermediate_size: 16384,
layer_norm_eps: 1e-05,
max_position_embeddings: 16384,
num_attention_heads: 64,
num_hidden_layers: 36,
num_key_value_heads: 64,
qk_layernorm: true,
rms_norm_eps: 1e-06,
rope_theta: 25000.0,
tie_word_embeddings: false,
use_cache: true,
vocab_size: 262144,
partial_rotary_factor: 0.5,
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/rwkv_v6.rs | candle-transformers/src/models/rwkv_v6.rs | //! RWKV v6 model implementation.
//!
//! The [RWKV model](https://wiki.rwkv.com/) is a recurrent neural network model
//! with performance on par with transformer architectures. Several variants are
//! available, candle implements the v5 and v6 versions and can be used with
//! Eagle 7B([blog post](https://blog.rwkv.com/p/eagle-7b-soaring-past-transformers)).
//!
//! Key characteristics:
//! - Linear attention mechanism
//! - Time-mixing for temporal dependencies
//! - Group normalization
//! - Feed forward gating
//! - State recycling for efficient inference
//!
//! # Example
//!
//! ```bash
//! cargo run --example rwkv --release -- \
//! --prompt "The smallest prime is "
//!
//! > avx: true, neon: false, simd128: false, f16c: true
//! > temp: 0.00 repeat-penalty: 1.10 repeat-last-n: 64
//! > The smallest prime is ϕ(2) = 2.
//! > The smallest composite is ϕ(3) = 3.
//! > The smallest perfect number is ϕ(5) = 5.
//! > The smallest perfect square is ϕ(4) = 4.
//! > The smallest perfect cube is ϕ(6) = 6.
//! ```
use super::with_tracing::{layer_norm, linear_no_bias as linear, LayerNorm, Linear};
use candle::{IndexOp, Result, Tensor};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
pub use crate::models::rwkv_v5::{Config, State, Tokenizer};
#[derive(Debug, Clone)]
struct SelfAttention {
key: Linear,
receptance: Linear,
value: Linear,
gate: Linear,
output: Linear,
ln_x: candle_nn::GroupNorm,
time_mix_x: Tensor,
time_mix_w: Tensor,
time_mix_key: Tensor,
time_mix_value: Tensor,
time_mix_receptance: Tensor,
time_decay: Tensor,
time_faaaa: Tensor,
time_mix_gate: Tensor,
time_decay_w1: Tensor,
time_decay_w2: Tensor,
time_mix_w1: Tensor,
time_mix_w2: Tensor,
layer_id: usize,
n_attn_heads: usize,
}
impl SelfAttention {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let attn_hidden_size = cfg.attention_hidden_size;
let key = linear(hidden_size, attn_hidden_size, vb.pp("key"))?;
let receptance = linear(hidden_size, attn_hidden_size, vb.pp("receptance"))?;
let value = linear(hidden_size, attn_hidden_size, vb.pp("value"))?;
let gate = linear(hidden_size, attn_hidden_size, vb.pp("gate"))?;
let output = linear(attn_hidden_size, hidden_size, vb.pp("output"))?;
let ln_x = candle_nn::group_norm(
hidden_size / cfg.head_size,
hidden_size,
1e-5,
vb.pp("ln_x"),
)?;
let time_mix_x = vb.get((1, 1, cfg.hidden_size), "time_mix_x")?;
let time_mix_w = vb.get((1, 1, cfg.hidden_size), "time_mix_w")?;
let time_mix_key = vb.get((1, 1, cfg.hidden_size), "time_mix_key")?;
let time_mix_value = vb.get((1, 1, cfg.hidden_size), "time_mix_value")?;
let time_mix_receptance = vb.get((1, 1, cfg.hidden_size), "time_mix_receptance")?;
let n_attn_heads = cfg.hidden_size / cfg.head_size;
let time_decay = vb.get((1, 1, cfg.hidden_size), "time_decay")?;
let time_faaaa = vb.get((n_attn_heads, cfg.head_size), "time_faaaa")?;
let time_mix_gate = vb.get((1, 1, cfg.hidden_size), "time_mix_gate")?;
let time_decay_w1 = vb.get((cfg.hidden_size, n_attn_heads * 2), "time_decay_w1")?;
let time_decay_w2 = vb.get((n_attn_heads * 2, cfg.hidden_size), "time_decay_w2")?;
let time_mix_w1 = vb.get((cfg.hidden_size, n_attn_heads * 5), "time_mix_w1")?;
let time_mix_w2 = vb.get((5, n_attn_heads, cfg.hidden_size), "time_mix_w2")?;
Ok(Self {
key,
value,
receptance,
gate,
output,
ln_x,
time_mix_x,
time_mix_w,
time_mix_key,
time_mix_value,
time_mix_receptance,
time_decay,
time_faaaa,
time_mix_gate,
time_decay_w1,
time_decay_w2,
time_mix_w1,
time_mix_w2,
layer_id,
n_attn_heads,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let h = self.n_attn_heads;
let (b, t, s) = xs.dims3()?;
let s = s / h;
let (receptance, key, value, gate, w) = {
// extract key-value
let shifted = state.per_layer[self.layer_id].extract_key_value.clone();
let shifted = if shifted.rank() == 2 {
shifted.unsqueeze(1)?
} else {
shifted
};
let sx = (&shifted - xs)?;
let xxx = (xs + &sx * &self.time_mix_x)?;
let xxx = xxx
.broadcast_matmul(&self.time_mix_w1)?
.tanh()?
.reshape((b * t, 5, ()))?
.transpose(0, 1)?;
let xxx = xxx.matmul(&self.time_mix_w2)?.reshape((5, b, t, ()))?;
let (mw, mk, mv, mr, mg) = (xxx.i(0)?, xxx.i(1)?, xxx.i(2)?, xxx.i(3)?, xxx.i(4)?);
let xw = (xs + &sx * (&self.time_mix_w + &mw)?)?;
let xk = (xs + &sx * (&self.time_mix_key + &mk)?)?;
let xv = (xs + &sx * (&self.time_mix_value + &mv)?)?;
let xr = (xs + &sx * (&self.time_mix_receptance + &mr)?)?;
let xg = (xs + &sx * (&self.time_mix_gate + &mg)?)?;
let w = (&self.time_decay
+ xw.broadcast_matmul(&self.time_decay_w1)?
.tanh()?
.broadcast_matmul(&self.time_decay_w2)?)?
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let key = self.key.forward(&xk)?;
let value = self.value.forward(&xv)?;
let receptance = self.receptance.forward(&xr)?;
let gate = candle_nn::ops::silu(&self.gate.forward(&xg)?)?;
state.per_layer[self.layer_id].extract_key_value = xs.i((.., t - 1))?;
(receptance, key, value, gate, w)
};
// linear attention
let mut state_ = state.per_layer[self.layer_id].linear_attention.clone();
let key = key.reshape((b, t, h, s))?.permute((0, 2, 3, 1))?;
let value = value.reshape((b, t, h, s))?.transpose(1, 2)?;
let receptance = receptance.reshape((b, t, h, s))?.transpose(1, 2)?;
let w = w.exp()?.neg()?.exp()?;
let time_faaaa =
self.time_faaaa
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let mut out: Vec<Tensor> = Vec::with_capacity(t);
for t_ in 0..t {
let rt = receptance.i((.., .., t_..t_ + 1))?.contiguous()?;
let kt = key.i((.., .., .., t_..t_ + 1))?.contiguous()?;
let vt = value.i((.., .., t_..t_ + 1))?.contiguous()?;
let at = kt.matmul(&vt)?;
let rhs = (time_faaaa.broadcast_mul(&at)? + &state_)?;
let out_ = rt.matmul(&rhs)?.squeeze(2)?;
state_ = (&at + w.broadcast_mul(&state_))?;
out.push(out_)
}
let out = Tensor::cat(&out, 1)?.reshape((b * t, h * s, 1))?;
let out = out.apply(&self.ln_x)?.reshape((b, t, h * s))?;
let out = (out * gate)?.apply(&self.output)?;
state.per_layer[self.layer_id].linear_attention = state_;
Ok(out)
}
}
#[derive(Debug, Clone)]
struct FeedForward {
time_mix_key: Tensor,
time_mix_receptance: Tensor,
key: Linear,
receptance: Linear,
value: Linear,
layer_id: usize,
}
impl FeedForward {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let int_size = cfg
.intermediate_size
.unwrap_or(((cfg.hidden_size as f64 * 3.5) as usize) / 32 * 32);
let key = linear(cfg.hidden_size, int_size, vb.pp("key"))?;
let receptance = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("receptance"))?;
let value = linear(int_size, cfg.hidden_size, vb.pp("value"))?;
let time_mix_key = vb.get((1, 1, cfg.hidden_size), "time_mix_key")?;
let time_mix_receptance = vb.get((1, 1, cfg.hidden_size), "time_mix_receptance")?;
Ok(Self {
key,
receptance,
value,
time_mix_key,
time_mix_receptance,
layer_id,
})
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let shifted = state.per_layer[self.layer_id]
.feed_forward
.broadcast_sub(xs)?;
let key = (xs + shifted.broadcast_mul(&self.time_mix_key)?)?;
let receptance = (xs + shifted.broadcast_mul(&self.time_mix_receptance)?)?;
let key = key.apply(&self.key)?.relu()?.sqr()?;
let value = key.apply(&self.value)?;
let receptance = candle_nn::ops::sigmoid(&receptance.apply(&self.receptance)?)?;
state.per_layer[self.layer_id].feed_forward = xs.i((.., xs.dim(1)? - 1))?;
let xs = (receptance * value)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct Block {
pre_ln: Option<LayerNorm>,
ln1: LayerNorm,
ln2: LayerNorm,
attention: SelfAttention,
feed_forward: FeedForward,
}
impl Block {
fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln1 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln1"))?;
let ln2 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln2"))?;
let pre_ln = if layer_id == 0 {
let ln = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("pre_ln"))?;
Some(ln)
} else {
None
};
let attention = SelfAttention::new(layer_id, cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(layer_id, cfg, vb.pp("feed_forward"))?;
Ok(Self {
pre_ln,
ln1,
ln2,
attention,
feed_forward,
})
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let xs = match self.pre_ln.as_ref() {
None => xs.clone(),
Some(pre_ln) => xs.apply(pre_ln)?,
};
let attention = self.attention.forward(&xs.apply(&self.ln1)?, state)?;
let xs = (xs + attention)?;
let feed_forward = self.feed_forward.forward(&xs.apply(&self.ln2)?, state)?;
let xs = (xs + feed_forward)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embedding,
blocks: Vec<Block>,
ln_out: LayerNorm,
head: Linear,
rescale_every: usize,
layers_are_rescaled: bool,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("rwkv");
let embeddings = embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embeddings"))?;
let mut blocks = Vec::with_capacity(cfg.num_hidden_layers);
let vb_b = vb_m.pp("blocks");
for block_index in 0..cfg.num_hidden_layers {
let block = Block::new(block_index, cfg, vb_b.pp(block_index))?;
blocks.push(block)
}
let ln_out = layer_norm(cfg.hidden_size, 1e-5, vb_m.pp("ln_out"))?;
let head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("head"))?;
Ok(Self {
embeddings,
blocks,
ln_out,
head,
rescale_every: cfg.rescale_every,
layers_are_rescaled: false, // This seem to only happen for the f16/bf16 dtypes.
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let (_b_size, _seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embeddings)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
xs = block.forward(&xs, state)?;
if self.layers_are_rescaled && (block_idx + 1) % self.rescale_every == 0 {
xs = (xs / 2.)?
}
}
let xs = xs.apply(&self.ln_out)?.apply(&self.head)?;
state.pos += 1;
Ok(xs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_mixformer.rs | candle-transformers/src/models/quantized_mixformer.rs | //! Module containing quantized MixFormer model implementation.
//!
//! MixFormer is an efficient transformer variant for text generation that uses
//! mixture-of-experts and parallel attention/feed-forward blocks.
//! This implementation provides quantization for reduced memory usage.
//!
//! Key features:
//! - Parallel attention and feed-forward computation
//! - Rotary positional embeddings
//! - Optional key-value caching
//! - Support for 8-bit quantization
//!
use crate::quantized_nn::{layer_norm, linear, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::Activation;
pub use crate::models::mixformer::Config;
const MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone)]
struct Embedding {
wte: crate::quantized_nn::Embedding,
}
impl Embedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let wte = crate::quantized_nn::Embedding::new(cfg.vocab_size, cfg.n_embd, vb.pp("wte"))?;
Ok(Self { wte })
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.wte.forward(xs)
}
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dim: usize, max_seq_len: usize, dev: &Device) -> Result<Self> {
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
qkv: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor, Tensor)> {
let (_b_size, seqlen, three, _, _headdim) = qkv.dims5()?;
if three != 3 {
candle::bail!("unexpected shape for qkv {:?}", qkv.shape())
}
let (_rotary_seqlen, rotary_dim) = self.cos.dims2()?;
let rotary_dim = rotary_dim * 2;
let q_rot = qkv.i((.., .., 0, .., ..rotary_dim))?;
let q_pass = qkv.i((.., .., 0, .., rotary_dim..))?;
let k_rot = qkv.i((.., .., 1, .., ..rotary_dim))?;
let k_pass = qkv.i((.., .., 1, .., rotary_dim..))?;
let q12 = q_rot.chunk(2, D::Minus1)?;
let k12 = k_rot.chunk(2, D::Minus1)?;
let (q1, q2) = (&q12[0], &q12[1]);
let (k1, k2) = (&k12[0], &k12[1]);
let c = self.cos.narrow(0, seqlen_offset, seqlen)?.unsqueeze(1)?;
let s = self.sin.narrow(0, seqlen_offset, seqlen)?.unsqueeze(1)?;
let q_rot = Tensor::cat(
&[
(q1.broadcast_mul(&c)? - q2.broadcast_mul(&s)?)?,
(q1.broadcast_mul(&s)? + q2.broadcast_mul(&c)?)?,
],
D::Minus1,
)?;
let k_rot = Tensor::cat(
&[
(k1.broadcast_mul(&c)? - k2.broadcast_mul(&s)?)?,
(k1.broadcast_mul(&s)? + k2.broadcast_mul(&c)?)?,
],
D::Minus1,
)?;
let q = Tensor::cat(&[&q_rot, &q_pass], D::Minus1)?;
let k = Tensor::cat(&[&k_rot, &k_pass], D::Minus1)?;
let v = qkv.i((.., .., 2))?;
Ok((q, k, v))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
fc1: Linear,
fc2: Linear,
act: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_inner = cfg.n_inner.unwrap_or(4 * cfg.n_embd);
let fc1 = linear(cfg.n_embd, n_inner, vb.pp("fc1"))?;
let fc2 = linear(n_inner, cfg.n_embd, vb.pp("fc2"))?;
Ok(Self {
fc1,
fc2,
act: cfg.activation_function,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct CausalLMHead {
ln: candle_nn::LayerNorm,
linear: Linear,
}
impl CausalLMHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln = layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?;
let linear = linear(cfg.n_embd, cfg.vocab_size, vb.pp("linear"))?;
Ok(Self { ln, linear })
}
}
impl Module for CausalLMHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.ln)?
.apply(&self.linear)?
.to_dtype(DType::F32)
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MHA {
wqkv: Linear,
out_proj: Linear,
rotary_emb: RotaryEmbedding,
kv_cache: Option<(Tensor, Tensor)>,
head_dim: usize,
n_head: usize,
softmax_scale: f64,
span: tracing::Span,
}
impl MHA {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.n_embd / cfg.n_head;
let op_size = cfg.n_embd;
let wqkv = linear(cfg.n_embd, 3 * op_size, vb.pp("Wqkv"))?;
let out_proj = linear(op_size, cfg.n_embd, vb.pp("out_proj"))?;
let rotary_emb = RotaryEmbedding::new(cfg.rotary_dim, MAX_SEQ_LEN, vb.device())?;
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
Ok(Self {
wqkv,
out_proj,
head_dim,
n_head: cfg.n_head,
kv_cache: None,
rotary_emb,
softmax_scale,
span: tracing::span!(tracing::Level::TRACE, "mha"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self
.wqkv
.forward(xs)?
.reshape((b_size, seq_len, 3, (), self.head_dim))?;
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(1)?,
};
// In the python implementation, a single tensor is returned with the third axis of size 3.
let (q, k, v) = self.rotary_emb.apply_rotary_emb_qkv(&qkv, seqlen_offset)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 1)?;
let v = Tensor::cat(&[prev_v, &v], 1)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
// scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
let q = q.transpose(1, 2)?.flatten_to(1)?; // b*h, t, d
let k = k.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d
let v = v.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d
let attn_weights = (q.matmul(&k.t()?)? * self.softmax_scale)?; // b*h, t, s
// causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=scores.device), 1)
// scores = scores + causal_mask.to(dtype=scores.dtype)
let attn_weights = match mask {
None => attn_weights,
Some(mask) => masked_fill(
&attn_weights,
&mask.broadcast_left(b_size * self.n_head)?,
f32::NEG_INFINITY,
)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
// output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
// attn_weights: b*h,t,s, v: b*h,s,d
let attn_output = attn_weights.matmul(&v)?;
// b*h,t,d
let attn_output = attn_output
.reshape((b_size, (), seq_len, self.head_dim))?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
attn_output.apply(&self.out_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct ParallelBlock {
ln: candle_nn::LayerNorm,
mixer: MHA,
mlp: MLP,
span: tracing::Span,
}
impl ParallelBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln = layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?;
let mixer = MHA::new(cfg, vb.pp("mixer"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
ln,
mixer,
mlp,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = xs;
let xs = xs.apply(&self.ln)?;
let attn_outputs = self.mixer.forward(&xs, mask)?;
let feed_forward_hidden_states = self.mlp.forward(&xs)?;
attn_outputs + feed_forward_hidden_states + residual
}
fn clear_kv_cache(&mut self) {
self.mixer.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct MixFormerSequentialForCausalLM {
embedding: Embedding,
blocks: Vec<ParallelBlock>,
head: CausalLMHead,
span: tracing::Span,
}
impl MixFormerSequentialForCausalLM {
pub fn new_v2(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_head = vb.pp("lm_head");
let vb = vb.pp("transformer");
let embedding = Embedding::new(cfg, vb.pp("embd"))?;
let mut blocks = Vec::new();
for i in 0..cfg.n_layer {
let block = ParallelBlock::new(cfg, vb.pp("h").pp(i))?;
blocks.push(block)
}
let head = CausalLMHead::new(cfg, vb_head)?;
Ok(Self {
embedding,
blocks,
head,
span: tracing::span!(tracing::Level::TRACE, "mixformer"),
})
}
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("layers");
let embedding = Embedding::new(cfg, vb.pp(0))?;
let mut blocks = Vec::new();
for i in 0..cfg.n_layer {
let block = ParallelBlock::new(cfg, vb.pp(i + 1))?;
blocks.push(block);
}
let head = CausalLMHead::new(cfg, vb.pp(cfg.n_layer + 1))?;
Ok(Self {
embedding,
blocks,
head,
span: tracing::span!(tracing::Level::TRACE, "mixformer"),
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embedding)?;
let mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?;
}
xs.narrow(1, seq_len - 1, 1)?.apply(&self.head)?.squeeze(1)
}
pub fn forward_with_img(
&mut self,
bos_token: &Tensor,
xs: &Tensor,
img_embeds: &Tensor,
) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = xs.apply(&self.embedding)?;
let bos_token = bos_token.apply(&self.embedding)?;
// Python implementation sequence order is <bos token embedding><img embedding><rest of text embedding>
// https://github.com/vikhyat/moondream/blob/a9d788a20d1543fb1479edc54106e88cff7759d3/moondream/moondream.py#L43-L56
let mut xs = Tensor::cat(&[bos_token, img_embeds.clone(), xs], 1)?;
let (_b_size, seq_len, _embds) = xs.dims3()?;
let mask = Some(get_mask(seq_len, xs.device())?);
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?
}
let xs = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.head)?
.squeeze(1)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/stable_lm.rs | candle-transformers/src/models/stable_lm.rs | //! StableLM model implementation.
//!
//! StableLM is a family of language models trained by Stability AI.
//! This implementation supports the StableLM architecture.
//!
//! Key characteristics:
//! - Grouped query attention (GQA)
//! - Layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for different model sizes (3B, 7B)
//!
//! References:
//! - 🤗 [Model Card](https://huggingface.co/stabilityai/stablelm-3b-4e1t)
//!
use crate::models::with_tracing::{linear, linear_no_bias, Linear};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, LayerNorm, VarBuilder};
use serde::Deserialize;
use std::sync::Arc;
// https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/configuration_stablelm.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) hidden_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: usize,
pub(crate) hidden_act: Activation,
pub(crate) partial_rotary_factor: f64,
pub(crate) rope_theta: f64,
pub(crate) max_position_embeddings: usize,
pub(crate) layer_norm_eps: f64,
pub(crate) use_cache: bool,
#[serde(default)]
pub(crate) use_qkv_bias: bool, // Used in StableLM-2
#[serde(default)]
pub(crate) use_flash_attn: bool, // Not in config.json
}
impl Config {
pub fn stablelm_3b_4e1t(use_flash_attn: bool) -> Self {
Self {
vocab_size: 50304,
intermediate_size: 6912,
hidden_size: 2560,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 32,
hidden_act: Activation::Silu,
partial_rotary_factor: 0.25,
rope_theta: 10_000.,
max_position_embeddings: 4096,
layer_norm_eps: 1e-5,
use_qkv_bias: false,
use_cache: true,
use_flash_attn,
}
}
pub fn head_dim(&self) -> usize {
self.hidden_size / self.num_attention_heads
}
pub fn rotary_ndims(&self) -> usize {
(self.head_dim() as f64 * self.partial_rotary_factor) as usize
}
pub fn num_kv_groups(&self) -> usize {
self.num_attention_heads / self.num_key_value_heads
}
pub fn set_use_flash_attn(&mut self, use_flash_attn: bool) {
self.use_flash_attn = use_flash_attn
}
}
#[derive(Debug)]
pub(crate) struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let xs = xs.chunk(2, D::Minus1)?;
Tensor::cat(&[&xs[1].neg()?, &xs[0]], D::Minus1)
}
impl RotaryEmbedding {
pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.rotary_ndims();
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
pub(crate) fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
span: tracing::Span,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
span: tracing::span!(tracing::Level::TRACE, "mlp"),
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_cache: bool,
rotary_ndims: usize,
use_flash_attn: bool,
span: tracing::Span,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let head_dim = cfg.head_dim();
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let linear_layer = if cfg.use_qkv_bias {
linear
} else {
linear_no_bias
};
let q_proj = linear_layer(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups: cfg.num_kv_groups(),
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
use_cache: cfg.use_cache,
rotary_ndims: cfg.rotary_ndims(),
use_flash_attn: cfg.use_flash_attn,
span: tracing::span!(tracing::Level::TRACE, "attn"),
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (rot_ndims, pass_ndims) = (self.rotary_ndims, self.head_dim - self.rotary_ndims);
let query_rot = query_states.narrow(D::Minus1, 0, rot_ndims)?;
let query_pass = query_states.narrow(D::Minus1, rot_ndims, pass_ndims)?;
let key_rot = key_states.narrow(D::Minus1, 0, rot_ndims)?;
let key_pass = key_states.narrow(D::Minus1, rot_ndims, pass_ndims)?;
let (query_rot, key_rot) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_rot, &key_rot, seqlen_offset)?;
let query_states = Tensor::cat(&[query_rot, query_pass], D::Minus1)?.contiguous()?;
let key_states = Tensor::cat(&[key_rot, key_pass], D::Minus1)?.contiguous()?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
if self.use_cache {
self.kv_cache = Some((key_states.clone(), value_states.clone()));
}
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: LayerNorm,
post_attention_layernorm: LayerNorm,
span: tracing::Span,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm = candle_nn::layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb.pp("input_layernorm"),
)?;
let post_attention_layernorm = candle_nn::layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: LayerNorm,
lm_head: Linear,
device: Device,
dtype: DType,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/gemma2.rs | candle-transformers/src/models/gemma2.rs | //! Gemma LLM architecture (Google) inference implementation.
//!
//! See ["Gemma: Open Models Based on Gemini Technology"](https://blog.google/technology/developers/gemma-open-models/)
//!
//! Based on implementations from Google and OpenLLM
use std::sync::Arc;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b as linear, Activation, Linear, VarBuilder};
fn default_max_position_embeddings() -> usize {
4096
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub attention_bias: bool,
pub head_dim: usize,
pub hidden_activation: Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub vocab_size: usize,
pub final_logit_softcapping: Option<f64>,
pub attn_logit_softcapping: Option<f64>,
pub query_pre_attn_scalar: usize,
// TODO: Handle the sliding window in the attention mask.
pub sliding_window: Option<usize>,
#[serde(default = "default_max_position_embeddings")]
pub max_position_embeddings: usize,
}
#[derive(Debug, Clone)]
struct RmsNorm {
weight: Tensor,
eps: f64,
}
impl RmsNorm {
fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(dim, "weight")?;
Ok(Self { weight, eps })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(&(&self.weight + 1.0)?)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?;
let up_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_activation,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
attn_logit_softcapping: Option<f64>,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = cfg.head_dim;
let bias = cfg.attention_bias;
let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?;
let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
attn_logit_softcapping: cfg.attn_logit_softcapping,
rotary_emb,
kv_cache: None,
use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, scale, attention_mask.is_some())?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match self.attn_logit_softcapping {
None => attn_weights,
Some(sc) => ((attn_weights / sc)?.tanh()? * sc)?,
};
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, ()))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
pre_feedforward_layernorm: RmsNorm,
post_feedforward_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, use_flash_attn, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let pre_feedforward_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("pre_feedforward_layernorm"),
)?;
let post_feedforward_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_feedforward_layernorm"),
)?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
pre_feedforward_layernorm,
post_feedforward_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = xs.apply(&self.post_attention_layernorm)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.pre_feedforward_layernorm)?;
let xs = xs.apply(&self.mlp)?;
let xs = xs.apply(&self.post_feedforward_layernorm)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
final_logit_softcapping: Option<f64>,
device: Device,
dtype: DType,
hidden_size: usize,
sliding_window: Option<usize>,
}
impl Model {
pub fn new(use_flash_attn: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer =
DecoderLayer::new(rotary_emb.clone(), use_flash_attn, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
final_logit_softcapping: cfg.final_logit_softcapping,
device: vb.device().clone(),
dtype: vb.dtype(),
hidden_size: cfg.hidden_size,
sliding_window: cfg.sliding_window,
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = match self.sliding_window {
None => (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect(),
Some(sliding_window) => (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect(),
};
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let xs = self.embed_tokens.forward(input_ids)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
let logits = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)?;
let logits = match self.final_logit_softcapping {
None => logits,
Some(sc) => ((logits / sc)?.tanh()? * sc)?,
};
Ok(logits)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/vgg.rs | candle-transformers/src/models/vgg.rs | //! VGG-16 model implementation.
//!
//! VGG-16 is a convolutional neural network architecture. It consists of 13
//! convolutional layers followed by 3 fully connected layers.
//!
//! Key characteristics:
//! - Conv layers with 3x3 filters
//! - Max pooling after every 2-3 conv layers
//! - Three fully connected layers of 4096, 4096, 1000 units
//! - ReLU activation and dropout
//!
//! References:
//! - [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
//!
use candle::{ModuleT, Result, Tensor};
use candle_nn::{FuncT, VarBuilder};
// Enum representing the different VGG models
pub enum Models {
Vgg13,
Vgg16,
Vgg19,
}
// Struct representing a VGG model
#[derive(Debug)]
pub struct Vgg<'a> {
blocks: Vec<FuncT<'a>>,
}
// Struct representing the configuration for the pre-logit layer
struct PreLogitConfig {
in_dim: (usize, usize, usize, usize),
target_in: usize,
target_out: usize,
}
// Implementation of the VGG model
impl<'a> Vgg<'a> {
// Function to create a new VGG model
pub fn new(vb: VarBuilder<'a>, model: Models) -> Result<Self> {
let blocks = match model {
Models::Vgg13 => vgg13_blocks(vb)?,
Models::Vgg16 => vgg16_blocks(vb)?,
Models::Vgg19 => vgg19_blocks(vb)?,
};
Ok(Self { blocks })
}
}
// Implementation of the forward pass for the VGG model
impl ModuleT for Vgg<'_> {
fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> {
let mut xs = xs.unsqueeze(0)?;
for block in self.blocks.iter() {
xs = xs.apply_t(block, train)?;
}
Ok(xs)
}
}
// Function to create a conv2d block
// The block is composed of two conv2d layers followed by a max pool layer
fn conv2d_block(convs: &[(usize, usize, &str)], vb: &VarBuilder) -> Result<FuncT<'static>> {
let layers = convs
.iter()
.map(|&(in_c, out_c, name)| {
candle_nn::conv2d(
in_c,
out_c,
3,
candle_nn::Conv2dConfig {
stride: 1,
padding: 1,
..Default::default()
},
vb.pp(name),
)
})
.collect::<Result<Vec<_>>>()?;
Ok(FuncT::new(move |xs, _train| {
let mut xs = xs.clone();
for layer in layers.iter() {
xs = xs.apply(layer)?.relu()?
}
xs = xs.max_pool2d_with_stride(2, 2)?;
Ok(xs)
}))
}
// Function to create a fully connected layer
// The layer is composed of two linear layers followed by a dropout layer
fn fully_connected(
num_classes: usize,
pre_logit_1: PreLogitConfig,
pre_logit_2: PreLogitConfig,
vb: VarBuilder,
) -> Result<FuncT> {
let lin = get_weights_and_biases(
&vb.pp("pre_logits.fc1"),
pre_logit_1.in_dim,
pre_logit_1.target_in,
pre_logit_1.target_out,
)?;
let lin2 = get_weights_and_biases(
&vb.pp("pre_logits.fc2"),
pre_logit_2.in_dim,
pre_logit_2.target_in,
pre_logit_2.target_out,
)?;
let dropout1 = candle_nn::Dropout::new(0.5);
let dropout2 = candle_nn::Dropout::new(0.5);
let dropout3 = candle_nn::Dropout::new(0.5);
Ok(FuncT::new(move |xs, train| {
let xs = xs.reshape((1, pre_logit_1.target_out))?;
let xs = xs.apply_t(&dropout1, train)?.apply(&lin)?.relu()?;
let xs = xs.apply_t(&dropout2, train)?.apply(&lin2)?.relu()?;
let lin3 = candle_nn::linear(4096, num_classes, vb.pp("head.fc"))?;
let xs = xs.apply_t(&dropout3, train)?.apply(&lin3)?.relu()?;
Ok(xs)
}))
}
// Function to get the weights and biases for a layer
// This is required because the weights and biases are stored in different format than our linear layer expects
fn get_weights_and_biases(
vs: &VarBuilder,
in_dim: (usize, usize, usize, usize),
target_in: usize,
target_out: usize,
) -> Result<candle_nn::Linear> {
let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL;
let ws = vs.get_with_hints(in_dim, "weight", init_ws)?;
let ws = ws.reshape((target_in, target_out))?;
let bound = 1. / (target_out as f64).sqrt();
let init_bs = candle_nn::Init::Uniform {
lo: -bound,
up: bound,
};
let bs = vs.get_with_hints(target_in, "bias", init_bs)?;
Ok(candle_nn::Linear::new(ws, Some(bs)))
}
fn vgg13_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> {
let num_classes = 1000;
let blocks = vec![
conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?,
conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?,
conv2d_block(&[(128, 256, "features.10"), (256, 256, "features.12")], &vb)?,
conv2d_block(&[(256, 512, "features.15"), (512, 512, "features.17")], &vb)?,
conv2d_block(&[(512, 512, "features.20"), (512, 512, "features.22")], &vb)?,
fully_connected(
num_classes,
PreLogitConfig {
in_dim: (4096, 512, 7, 7),
target_in: 4096,
target_out: 512 * 7 * 7,
},
PreLogitConfig {
in_dim: (4096, 4096, 1, 1),
target_in: 4096,
target_out: 4096,
},
vb.clone(),
)?,
];
Ok(blocks)
}
fn vgg16_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> {
let num_classes = 1000;
let blocks = vec![
conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?,
conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?,
conv2d_block(
&[
(128, 256, "features.10"),
(256, 256, "features.12"),
(256, 256, "features.14"),
],
&vb,
)?,
conv2d_block(
&[
(256, 512, "features.17"),
(512, 512, "features.19"),
(512, 512, "features.21"),
],
&vb,
)?,
conv2d_block(
&[
(512, 512, "features.24"),
(512, 512, "features.26"),
(512, 512, "features.28"),
],
&vb,
)?,
fully_connected(
num_classes,
PreLogitConfig {
in_dim: (4096, 512, 7, 7),
target_in: 4096,
target_out: 512 * 7 * 7,
},
PreLogitConfig {
in_dim: (4096, 4096, 1, 1),
target_in: 4096,
target_out: 4096,
},
vb.clone(),
)?,
];
Ok(blocks)
}
fn vgg19_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> {
let num_classes = 1000;
let blocks = vec![
conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?,
conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?,
conv2d_block(
&[
(128, 256, "features.10"),
(256, 256, "features.12"),
(256, 256, "features.14"),
(256, 256, "features.16"),
],
&vb,
)?,
conv2d_block(
&[
(256, 512, "features.19"),
(512, 512, "features.21"),
(512, 512, "features.23"),
(512, 512, "features.25"),
],
&vb,
)?,
conv2d_block(
&[
(512, 512, "features.28"),
(512, 512, "features.30"),
(512, 512, "features.32"),
(512, 512, "features.34"),
],
&vb,
)?,
fully_connected(
num_classes,
PreLogitConfig {
in_dim: (4096, 512, 7, 7),
target_in: 4096,
target_out: 512 * 7 * 7,
},
PreLogitConfig {
in_dim: (4096, 4096, 1, 1),
target_in: 4096,
target_out: 4096,
},
vb.clone(),
)?,
];
Ok(blocks)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/phi3.rs | candle-transformers/src/models/phi3.rs | //! Microsoft Phi-3 model implementation
//!
//! See Phi model details at:
//! - [Phi-3 Model](https://huggingface.co/microsoft/phi-3)
//!
//! The Phi series are decoder-only transformers designed for code and language tasks.
//! Key characteristics:
//! - Decoder-only transformer architecture
//! - RoPE embeddings
//! - Layer normalization
//! - QK normalization
//! - Mixed activation functions
//! - Improved context window handling
//!
//! References:
//! - [Hugging Face Implementation](https://huggingface.co/microsoft/phi-3)
//! - [Alternative Implementation](https://huggingface.co/microsoft/phi-3/tree/main)
//!
// This implementation is based on:
// https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/modeling_phi3.py
use crate::models::with_tracing::{linear_no_bias as linear, Linear, RmsNorm};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub enum RopeScalingType {
#[serde(rename = "longrope")]
LongRope,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct RopeScaling {
pub short_factor: Vec<f32>,
pub long_factor: Vec<f32>,
#[serde(rename = "type")]
pub type_: RopeScalingType,
}
// https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/config.json
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_act: candle_nn::Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<u32>,
pub rope_scaling: Option<RopeScaling>,
pub max_position_embeddings: usize,
pub original_max_position_embeddings: Option<usize>,
pub partial_rotary_factor: Option<f64>,
#[serde(default)]
pub tie_word_embeddings: bool,
}
impl Config {
pub fn head_dim(&self) -> usize {
self.hidden_size / self.num_attention_heads
}
}
#[derive(Debug, Clone)]
pub struct RotaryEmbedding {
partial_dim: Option<usize>,
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
pub fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let partial_dim = cfg
.partial_rotary_factor
.as_ref()
.map(|v| (v * cfg.head_dim() as f64) as usize);
let dim = partial_dim.unwrap_or(cfg.head_dim());
let freqs = match cfg.rope_scaling.as_ref() {
None => {
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq = Tensor::from_vec(inv_freq, (1, ()), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
t.matmul(&inv_freq)?
}
Some(rope_scaling) => {
let inv_freq_s: Vec<_> = (0..dim)
.step_by(2)
.zip(rope_scaling.short_factor.iter())
.map(|(i, &f)| f / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_s = Tensor::from_vec(inv_freq_s, (1, ()), dev)?.to_dtype(dtype)?;
let max_seq_len = cfg.max_position_embeddings;
match cfg.original_max_position_embeddings {
None => {
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
t.matmul(&inv_freq_s)?
}
Some(original_max_seq_len) => {
let t_s = Tensor::arange(0u32, original_max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((original_max_seq_len, 1))?;
let freq_s = t_s.matmul(&inv_freq_s)?;
let inv_freq_l: Vec<_> = (0..dim)
.step_by(2)
.zip(rope_scaling.long_factor.iter())
.map(|(i, &f)| f / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_l =
Tensor::from_vec(inv_freq_l, (1, ()), dev)?.to_dtype(dtype)?;
let t_l =
Tensor::arange(original_max_seq_len as u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape(((), 1))?;
let freq_l = t_l.matmul(&inv_freq_l)?;
Tensor::cat(&[&freq_s, &freq_l], 0)?
}
}
}
};
Ok(Self {
partial_dim,
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn rope(&self, xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> {
let x = match self.partial_dim {
None => candle_nn::rotary_emb::rope(&xs.contiguous()?, cos, sin)?,
Some(dim) => {
let xs_rot = xs.i((.., .., .., ..dim))?.contiguous()?;
let xs_pass = xs.i((.., .., .., dim..))?;
let xs_rot = candle_nn::rotary_emb::rope(&xs_rot, cos, sin)?;
Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1)?.contiguous()?
}
};
Ok(x)
}
pub fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = self.rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = self.rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
struct Attention {
qkv_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let head_dim = cfg.head_dim();
let op_size = num_heads * head_dim + 2 * num_kv_heads * head_dim;
let qkv_proj = linear(cfg.hidden_size, op_size, vb.pp("qkv_proj"))?;
let o_proj = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("o_proj"))?;
Ok(Self {
qkv_proj,
o_proj,
rotary_emb,
kv_cache: None,
num_heads,
num_kv_heads,
num_kv_groups: num_heads / num_kv_heads,
head_dim,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let qkv = self.qkv_proj.forward(xs)?;
let query_pos = self.num_heads * self.head_dim;
let query_states = qkv.narrow(D::Minus1, 0, query_pos)?;
let key_states = qkv.narrow(D::Minus1, query_pos, self.num_kv_heads * self.head_dim)?;
let value_states = qkv.narrow(
D::Minus1,
query_pos + self.num_kv_heads * self.head_dim,
self.num_kv_heads * self.head_dim,
)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, ()))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Mlp {
gate_up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
i_size: usize,
}
impl Mlp {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let i_size = cfg.intermediate_size;
let gate_up_proj = linear(hidden_size, 2 * i_size, vb.pp("gate_up_proj"))?;
let down_proj = linear(i_size, hidden_size, vb.pp("down_proj"))?;
Ok(Self {
gate_up_proj,
down_proj,
act_fn: cfg.hidden_act,
i_size,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let up_states = xs.apply(&self.gate_up_proj)?;
let gate = up_states.narrow(D::Minus1, 0, self.i_size)?;
let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?;
let up_states = (up_states * gate.apply(&self.act_fn))?;
up_states.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: Mlp,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(embed_tokens.embeddings().clone(), None)
} else {
linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen3_vl/config.rs | candle-transformers/src/models/qwen3_vl/config.rs | use candle_nn::Activation;
use crate::serde_default_fn;
serde_default_fn!(Activation, default_vision_hidden_act, Activation::Gelu);
serde_default_fn!(usize, default_in_channels, 3);
serde_default_fn!(usize, default_depth, 32);
serde_default_fn!(usize, default_hidden_size, 3584);
serde_default_fn!(usize, default_out_hidden_size, 3584);
serde_default_fn!(usize, default_intermediate_size, 3420);
serde_default_fn!(usize, default_num_heads, 16);
serde_default_fn!(usize, default_patch_size, 14);
serde_default_fn!(usize, default_spatial_merge_size, 2);
serde_default_fn!(usize, default_temporal_patch_size, 2);
serde_default_fn!(usize, default_num_position_embeddings, 576);
serde_default_fn!(Vec<usize>, default_deepstack_visual_indexes, Vec::new());
#[derive(Debug, Clone, serde::Deserialize)]
pub struct VisionConfig {
#[serde(default = "default_depth")]
pub depth: usize,
#[serde(default = "default_hidden_size")]
pub hidden_size: usize,
#[serde(default = "default_out_hidden_size")]
pub out_hidden_size: usize,
#[serde(default = "default_vision_hidden_act")]
pub hidden_act: Activation,
#[serde(default = "default_intermediate_size")]
pub intermediate_size: usize,
#[serde(default = "default_num_heads")]
pub num_heads: usize,
#[serde(default = "default_in_channels")]
pub in_chans: usize,
#[serde(default = "default_patch_size")]
pub patch_size: usize,
#[serde(default = "default_spatial_merge_size")]
pub spatial_merge_size: usize,
#[serde(default = "default_temporal_patch_size")]
pub temporal_patch_size: usize,
#[serde(default = "default_num_position_embeddings")]
pub num_position_embeddings: usize,
#[serde(default = "default_deepstack_visual_indexes")]
pub deepstack_visual_indexes: Vec<usize>,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct TextConfig {
pub head_dim: usize,
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub hidden_act: Activation,
pub max_position_embeddings: usize,
pub rms_norm_eps: f64,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub sliding_window: Option<usize>,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub text_config: TextConfig,
pub vision_config: VisionConfig,
pub image_token_id: u32,
pub video_token_id: u32,
pub vision_start_token_id: u32,
pub vision_end_token_id: u32,
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen3_vl/vision.rs | candle-transformers/src/models/qwen3_vl/vision.rs | use std::f64;
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{
embedding, layer_norm, linear, Activation, Embedding, LayerNorm, LayerNormConfig, Linear,
Module, VarBuilder,
};
use crate::models::qwen3_vl::conv3d_temporal_2::{Conv3dConfig, Conv3dNoBias};
use super::config::VisionConfig;
struct PatchEmbed {
proj: Conv3dNoBias,
bias: Tensor,
in_channels: usize,
patch_size: usize,
temporal_patch_size: usize,
hidden_size: usize,
}
impl PatchEmbed {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let proj_vb = vb.pp("proj");
let proj = Conv3dNoBias::new(
cfg.in_chans,
cfg.hidden_size,
[cfg.temporal_patch_size, cfg.patch_size, cfg.patch_size],
Conv3dConfig {
stride: cfg.patch_size,
..Default::default()
},
proj_vb.clone(),
)?;
let bias = proj_vb.get(cfg.hidden_size, "bias")?;
Ok(Self {
proj,
bias,
in_channels: cfg.in_chans,
patch_size: cfg.patch_size,
temporal_patch_size: cfg.temporal_patch_size,
hidden_size: cfg.hidden_size,
})
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.reshape((
(),
self.in_channels,
self.temporal_patch_size,
self.patch_size,
self.patch_size,
))?;
let xs = self.proj.forward(&xs)?;
let xs = xs.reshape(((), self.hidden_size))?;
let bias = self.bias.unsqueeze(0)?;
xs.broadcast_add(&bias)
}
}
struct VisionMlp {
fc1: Linear,
fc2: Linear,
act: Activation,
}
impl VisionMlp {
fn new(dim: usize, hidden_dim: usize, act: Activation, vb: VarBuilder) -> Result<Self> {
Ok(Self {
fc1: linear(dim, hidden_dim, vb.pp("linear_fc1"))?,
fc2: linear(hidden_dim, dim, vb.pp("linear_fc2"))?,
act,
})
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?;
let xs = xs.apply(&self.act)?;
self.fc2.forward(&xs)
}
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
fn apply_rotary_pos_emb_vision(
q: &Tensor,
k: &Tensor,
cos: &Tensor,
sin: &Tensor,
) -> Result<(Tensor, Tensor)> {
let cos = cos.unsqueeze(D::Minus2)?;
let sin = sin.unsqueeze(D::Minus2)?;
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin)?)?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin)?)?;
Ok((q_embed, k_embed))
}
struct VisionAttention {
qkv: Linear,
proj: Linear,
num_heads: usize,
head_dim: usize,
}
impl VisionAttention {
fn new(dim: usize, num_heads: usize, vb: VarBuilder) -> Result<Self> {
Ok(Self {
qkv: linear(dim, dim * 3, vb.pp("qkv"))?,
proj: linear(dim, dim, vb.pp("proj"))?,
num_heads,
head_dim: dim / num_heads,
})
}
fn forward(
&self,
xs: &Tensor,
cu_seqlens: &[usize],
cos: &Tensor,
sin: &Tensor,
) -> Result<Tensor> {
let seq_len = xs.dim(0)?;
let hidden_states = self.qkv.forward(xs)?;
let qkv = hidden_states
.reshape((seq_len, 3, self.num_heads, self.head_dim))?
.permute((1, 0, 2, 3))?;
let mut q = qkv.i(0)?.squeeze(0)?;
let mut k = qkv.i(1)?.squeeze(0)?;
let mut v = qkv.i(2)?.squeeze(0)?;
let cos = cos.to_dtype(DType::F32)?;
let sin = sin.to_dtype(DType::F32)?;
q = q.to_dtype(DType::F32)?;
k = k.to_dtype(DType::F32)?;
v = v.to_dtype(DType::F32)?;
(q, k) = apply_rotary_pos_emb_vision(&q, &k, &cos, &sin)?;
let mut outputs = Vec::new();
for window in cu_seqlens.windows(2) {
let start = window[0];
let end = window[1];
if end <= start {
continue;
}
let len = end - start;
let q_chunk = q.narrow(0, start, len)?.transpose(0, 1)?.contiguous()?;
let k_chunk = k.narrow(0, start, len)?.transpose(0, 1)?.contiguous()?;
let v_chunk = v.narrow(0, start, len)?.transpose(0, 1)?.contiguous()?;
let mut chunk_out = {
let q = q_chunk.unsqueeze(0)?;
let k = k_chunk.unsqueeze(0)?;
let v = v_chunk.unsqueeze(0)?;
let attn_weights =
(q.matmul(&k.transpose(2, 3)?)? / (self.head_dim as f64).sqrt())?;
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&v)?
};
chunk_out = chunk_out.squeeze(0)?.transpose(0, 1)?;
chunk_out.device().synchronize()?;
chunk_out = chunk_out.reshape((len, self.num_heads * self.head_dim))?;
outputs.push(chunk_out.to_dtype(xs.dtype())?);
}
let attn_output = Tensor::cat(&outputs, 0)?;
self.proj.forward(&attn_output)
}
}
struct VisionBlock {
norm1: LayerNorm,
norm2: LayerNorm,
attn: VisionAttention,
mlp: VisionMlp,
}
impl VisionBlock {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let norm_cfg = LayerNormConfig {
eps: 1e-6,
..Default::default()
};
let norm1 = layer_norm(cfg.hidden_size, norm_cfg, vb.pp("norm1"))?;
let norm2 = layer_norm(cfg.hidden_size, norm_cfg, vb.pp("norm2"))?;
let attn = VisionAttention::new(cfg.hidden_size, cfg.num_heads, vb.pp("attn"))?;
let mlp = VisionMlp::new(
cfg.hidden_size,
cfg.intermediate_size,
cfg.hidden_act,
vb.pp("mlp"),
)?;
Ok(Self {
norm1,
norm2,
attn,
mlp,
})
}
fn forward(
&self,
xs: &Tensor,
cu_seqlens: &[usize],
cos: &Tensor,
sin: &Tensor,
) -> Result<Tensor> {
let normed = self.norm1.forward(xs)?;
let attn_out = self.attn.forward(&normed, cu_seqlens, cos, sin)?;
let xs_att = xs.add(&attn_out)?;
let mlp_out = self.mlp.forward(&self.norm2.forward(&xs_att)?)?;
xs_att.add(&mlp_out)
}
}
struct PatchMerger {
norm: LayerNorm,
use_postshuffle_norm: bool,
spatial_merge_unit: usize,
merged_hidden_size: usize,
fc1: Linear,
fc2: Linear,
}
impl PatchMerger {
fn new(cfg: &VisionConfig, use_postshuffle_norm: bool, vb: VarBuilder) -> Result<Self> {
let merged_hidden_size = cfg.hidden_size * cfg.spatial_merge_size.pow(2);
let norm_dim = if use_postshuffle_norm {
merged_hidden_size
} else {
cfg.hidden_size
};
let norm_cfg = LayerNormConfig {
eps: 1e-6,
..Default::default()
};
Ok(Self {
norm: layer_norm(norm_dim, norm_cfg, vb.pp("norm"))?,
use_postshuffle_norm,
spatial_merge_unit: cfg.spatial_merge_size.pow(2),
merged_hidden_size,
fc1: linear(merged_hidden_size, merged_hidden_size, vb.pp("linear_fc1"))?,
fc2: linear(merged_hidden_size, cfg.out_hidden_size, vb.pp("linear_fc2"))?,
})
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let seq_len = xs.dim(0)?;
if seq_len % self.spatial_merge_unit != 0 {
candle::bail!(
"Sequence length {} is not divisible by spatial merge unit {}",
seq_len,
self.spatial_merge_unit
);
}
let grouped = seq_len / self.spatial_merge_unit;
let norm_input = if self.use_postshuffle_norm {
xs.reshape((grouped, self.merged_hidden_size))?
} else {
xs.clone()
};
let normed = self.norm.forward(&norm_input)?;
let reshaped = if self.use_postshuffle_norm {
normed
} else {
normed.reshape((grouped, self.merged_hidden_size))?
};
let xs = self.fc1.forward(&reshaped)?;
let xs = xs.gelu()?;
self.fc2.forward(&xs)
}
}
struct VisionRotaryEmbedding {
inv_freq: Tensor,
}
impl VisionRotaryEmbedding {
const THETA: f32 = 10000.;
fn new(dim: usize, device: &Device) -> Result<Self> {
let inv_freq = (0..dim)
.step_by(2)
.map(|i| 1f32 / Self::THETA.powf(i as f32 / dim as f32))
.collect::<Vec<_>>();
let inv_freq_len = inv_freq.len();
Ok(Self {
inv_freq: Tensor::from_vec(inv_freq, (1, inv_freq_len), device)?,
})
}
fn make_embeds(&self, seqlen: usize) -> Result<Tensor> {
let seq =
Tensor::arange(0f32, seqlen as f32, self.inv_freq.device())?.unsqueeze(D::Minus1)?;
seq.broadcast_matmul(&self.inv_freq)
}
}
pub struct Qwen3VLVisionModel {
patch_embed: PatchEmbed,
pos_embed: Embedding,
blocks: Vec<VisionBlock>,
merger: PatchMerger,
deepstack_mergers: Vec<PatchMerger>,
deepstack_lookup: Vec<Option<usize>>,
rotary_pos_emb: VisionRotaryEmbedding,
spatial_merge_size: usize,
num_grid_per_side: usize,
hidden_size: usize,
}
impl Qwen3VLVisionModel {
pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let patch_embed = PatchEmbed::new(cfg, vb.pp("patch_embed"))?;
let pos_embed = embedding(
cfg.num_position_embeddings,
cfg.hidden_size,
vb.pp("pos_embed"),
)?;
let mut blocks = Vec::with_capacity(cfg.depth);
for i in 0..cfg.depth {
blocks.push(VisionBlock::new(cfg, vb.pp(format!("blocks.{i}")))?);
}
let merger = PatchMerger::new(cfg, false, vb.pp("merger"))?;
let deepstack_mergers = cfg
.deepstack_visual_indexes
.iter()
.enumerate()
.map(|(i, _)| PatchMerger::new(cfg, true, vb.pp(format!("deepstack_merger_list.{i}"))))
.collect::<Result<Vec<_>>>()?;
let mut deepstack_lookup = vec![None; cfg.depth];
for (idx, &layer_idx) in cfg.deepstack_visual_indexes.iter().enumerate() {
if layer_idx < cfg.depth {
deepstack_lookup[layer_idx] = Some(idx);
}
}
let head_dim = cfg.hidden_size / cfg.num_heads;
let rotary_pos_emb = VisionRotaryEmbedding::new(head_dim / 2, vb.device())?;
let num_grid_per_side = (cfg.num_position_embeddings as f64).sqrt().round() as usize;
if num_grid_per_side * num_grid_per_side != cfg.num_position_embeddings {
candle::bail!(
"num_position_embeddings {} is not a perfect square",
cfg.num_position_embeddings
);
}
Ok(Self {
patch_embed,
pos_embed,
blocks,
merger,
deepstack_mergers,
deepstack_lookup,
rotary_pos_emb,
spatial_merge_size: cfg.spatial_merge_size,
num_grid_per_side,
hidden_size: cfg.hidden_size,
})
}
fn linspace_points(&self, steps: usize) -> Vec<f32> {
if steps == 1 {
return vec![0.0];
}
let max_val = (self.num_grid_per_side - 1) as f32;
let step = max_val / (steps.saturating_sub(1)) as f32;
(0..steps).map(|i| i as f32 * step).collect()
}
fn fast_pos_embed_interpolate(&self, grid_thw: &Tensor) -> Result<Tensor> {
let device = self.pos_embed.embeddings().device();
let dtype = self.pos_embed.embeddings().dtype();
let grid = grid_thw.to_vec2::<u32>()?;
let mut idx_lists: [Vec<i64>; 4] = Default::default();
let mut weight_lists: [Vec<f32>; 4] = Default::default();
let mut hw_lengths = Vec::with_capacity(grid.len());
for g in &grid {
let h = g[1] as usize;
let w = g[2] as usize;
hw_lengths.push(h * w);
let h_vals = self.linspace_points(h);
let w_vals = self.linspace_points(w);
let h_floor: Vec<usize> = h_vals.iter().map(|v| v.floor() as usize).collect();
let w_floor: Vec<usize> = w_vals.iter().map(|v| v.floor() as usize).collect();
let h_ceil: Vec<usize> = h_vals
.iter()
.map(|v| (v.ceil() as usize).min(self.num_grid_per_side - 1))
.collect();
let w_ceil: Vec<usize> = w_vals
.iter()
.map(|v| (v.ceil() as usize).min(self.num_grid_per_side - 1))
.collect();
let dh: Vec<f32> = h_vals
.iter()
.zip(&h_floor)
.map(|(v, f)| v - *f as f32)
.collect();
let dw: Vec<f32> = w_vals
.iter()
.zip(&w_floor)
.map(|(v, f)| v - *f as f32)
.collect();
for ((&hf, &hc), &dh_val) in h_floor.iter().zip(&h_ceil).zip(&dh) {
for ((&wf, &wc), &dw_val) in w_floor.iter().zip(&w_ceil).zip(&dw) {
let base00 = (hf * self.num_grid_per_side + wf) as i64;
let base01 = (hf * self.num_grid_per_side + wc) as i64;
let base10 = (hc * self.num_grid_per_side + wf) as i64;
let base11 = (hc * self.num_grid_per_side + wc) as i64;
let w00 = (1.0 - dh_val) * (1.0 - dw_val);
let w01 = (1.0 - dh_val) * dw_val;
let w10 = dh_val * (1.0 - dw_val);
let w11 = dh_val * dw_val;
idx_lists[0].push(base00);
idx_lists[1].push(base01);
idx_lists[2].push(base10);
idx_lists[3].push(base11);
weight_lists[0].push(w00);
weight_lists[1].push(w01);
weight_lists[2].push(w10);
weight_lists[3].push(w11);
}
}
}
let idx_tensors = idx_lists
.iter()
.map(|idxs| Tensor::from_vec(idxs.clone(), (idxs.len(),), device))
.collect::<Result<Vec<_>>>()?;
let idx_tensor = Tensor::stack(&idx_tensors, 0)?;
let weight_tensors = weight_lists
.iter()
.map(|weights| Tensor::from_vec(weights.clone(), (weights.len(),), device))
.collect::<Result<Vec<_>>>()?;
let weight_tensor = Tensor::stack(&weight_tensors, 0)?.to_dtype(dtype)?;
let pos_embeds = self.pos_embed.forward(&idx_tensor)?;
let pos_embeds = pos_embeds.broadcast_mul(&weight_tensor.unsqueeze(D::Minus1)?)?;
let pos_embeds = pos_embeds.sum(0)?;
let mut splits = Vec::with_capacity(hw_lengths.len());
let mut start = 0;
for len in hw_lengths {
splits.push(pos_embeds.narrow(0, start, len)?);
start += len;
}
let mut permuted = Vec::with_capacity(grid.len());
for (pos_embed, g) in splits.into_iter().zip(&grid) {
let t = g[0] as usize;
let h = g[1] as usize;
let w = g[2] as usize;
let pos_embed = pos_embed.repeat((t, 1))?;
let pos_embed = pos_embed.reshape((
t,
h / self.spatial_merge_size,
self.spatial_merge_size,
w / self.spatial_merge_size,
self.spatial_merge_size,
self.hidden_size,
))?;
let pos_embed = pos_embed
.permute((0, 1, 3, 2, 4, 5))?
.reshape((t * h * w, self.hidden_size))?;
permuted.push(pos_embed);
}
Tensor::cat(&permuted, 0)
}
fn rot_pos_emb(&self, grid_thw: &Tensor) -> Result<Tensor> {
let device = self.rotary_pos_emb.inv_freq.device();
let grid = grid_thw.to_vec2::<u32>()?;
let max_hw = grid
.iter()
.flat_map(|v| v[1..3].iter())
.copied()
.max()
.unwrap_or(0) as usize;
let freq_table = self.rotary_pos_emb.make_embeds(max_hw)?;
let mut coords: Vec<(i64, i64)> = Vec::new();
for g in &grid {
let h = g[1] as usize;
let w = g[2] as usize;
let merged_h = h / self.spatial_merge_size;
let merged_w = w / self.spatial_merge_size;
let mut base_coords: Vec<(i64, i64)> = Vec::with_capacity(h * w);
for br in 0..merged_h {
for bc in 0..merged_w {
for ir in 0..self.spatial_merge_size {
for ic in 0..self.spatial_merge_size {
base_coords.push((
(br * self.spatial_merge_size + ir) as i64,
(bc * self.spatial_merge_size + ic) as i64,
));
}
}
}
}
for _ in 0..(g[0] as usize) {
coords.extend(base_coords.iter().cloned());
}
}
let total_tokens = coords.len();
let mut rows = Vec::with_capacity(total_tokens);
let mut cols = Vec::with_capacity(total_tokens);
for &(r, c) in &coords {
rows.push(r);
cols.push(c);
}
let rows = Tensor::from_vec(rows, (total_tokens,), device)?;
let cols = Tensor::from_vec(cols, (total_tokens,), device)?;
let row_embeds = freq_table.index_select(&rows, 0)?;
let col_embeds = freq_table.index_select(&cols, 0)?;
Tensor::stack(&[row_embeds, col_embeds], D::Minus2)?
.reshape((total_tokens, freq_table.dim(D::Minus1)? * 2))
}
fn build_cu_seqlens(&self, grid_thw: &Tensor) -> Result<Vec<usize>> {
let grid = grid_thw.to_vec2::<u32>()?;
let mut cu = Vec::with_capacity(grid.iter().map(|v| v[0] as usize).sum::<usize>() + 1);
cu.push(0usize);
let mut acc = 0usize;
for g in &grid {
let area = (g[1] * g[2]) as usize;
for _ in 0..(g[0] as usize) {
acc += area;
cu.push(acc);
}
}
Ok(cu)
}
pub fn forward(&self, xs: &Tensor, grid_thw: &Tensor) -> Result<(Tensor, Vec<Tensor>)> {
let dtype = self.pos_embed.embeddings().dtype();
let xs = self.patch_embed.forward(&xs.to_dtype(dtype)?)?;
let pos_embeds = self.fast_pos_embed_interpolate(grid_thw)?;
let mut hidden_states = xs.add(&pos_embeds)?;
let rotary_pos_emb = self.rot_pos_emb(grid_thw)?;
let seq_len = hidden_states.dim(0)?;
let rotary_pos_emb = rotary_pos_emb.reshape((seq_len, ()))?;
let emb = Tensor::cat(&[&rotary_pos_emb, &rotary_pos_emb], D::Minus1)?;
let cos = emb.cos()?.to_dtype(DType::F32)?;
let sin = emb.sin()?.to_dtype(DType::F32)?;
let cu_seqlens = self.build_cu_seqlens(grid_thw)?;
let mut deepstack_features = Vec::new();
for (layer_idx, block) in self.blocks.iter().enumerate() {
hidden_states = block.forward(&hidden_states, &cu_seqlens, &cos, &sin)?;
if let Some(merger_idx) = self.deepstack_lookup[layer_idx] {
let feat = self.deepstack_mergers[merger_idx].forward(&hidden_states)?;
deepstack_features.push(feat);
}
}
let hidden_states = self.merger.forward(&hidden_states)?;
Ok((hidden_states, deepstack_features))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen3_vl/conv3d_temporal_2.rs | candle-transformers/src/models/qwen3_vl/conv3d_temporal_2.rs | //! Conv3dConfig assuming a temporal patch size of 2
use candle::{IndexOp, Module, Result, Tensor};
use candle_nn::{Conv2d, Conv2dConfig, VarBuilder};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Conv3dConfig {
pub padding: usize,
pub stride: usize,
pub dilation: usize,
pub groups: usize,
}
impl Default for Conv3dConfig {
fn default() -> Self {
Self {
padding: 0,
stride: 1,
dilation: 1,
groups: 1,
}
}
}
pub struct Conv3dNoBias {
conv2d_1: Conv2d,
conv2d_2: Conv2d,
}
impl Conv3dNoBias {
pub fn new(
in_channels: usize,
out_channels: usize,
kernel_sizes: [usize; 3],
cfg: Conv3dConfig,
vb: VarBuilder,
) -> Result<Self> {
let ws = vb.get(
(
out_channels,
in_channels / cfg.groups,
kernel_sizes[0],
kernel_sizes[1],
kernel_sizes[2],
),
"weight",
)?;
// Split on temporal dimension
// https://github.com/pytorch/pytorch/issues/139066
let w1 = ws.i((.., .., 0, .., ..))?;
let w2 = ws.i((.., .., 1, .., ..))?;
let cfg = Conv2dConfig {
padding: cfg.padding,
stride: cfg.stride,
dilation: cfg.dilation,
groups: cfg.groups,
cudnn_fwd_algo: None,
};
Ok(Self {
conv2d_1: Conv2d::new(w1.contiguous()?, None, cfg),
conv2d_2: Conv2d::new(w2.contiguous()?, None, cfg),
})
}
}
impl Module for Conv3dNoBias {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs1 = xs.i((.., .., 0, .., ..))?;
let xs2 = xs.i((.., .., 1, .., ..))?;
(self.conv2d_1.forward(&xs1)? + self.conv2d_2.forward(&xs2)?)?.unsqueeze(2)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen3_vl/text.rs | candle-transformers/src/models/qwen3_vl/text.rs | use std::sync::{Arc, Mutex};
use candle::{DType, Device, IndexOp, Result, Tensor};
use candle_nn::{
embedding, kv_cache::KvCache, linear, linear_b, rms_norm, Activation, Embedding, Linear,
Module, RmsNorm, VarBuilder,
};
use super::config::TextConfig;
#[derive(Debug, Clone)]
pub struct RotaryEmbedding {
cos: Tensor,
sin: Tensor,
}
impl RotaryEmbedding {
pub fn new(
base: f32,
head_dim: usize,
max_position_embeddings: usize,
device: &Device,
dtype: DType,
) -> Result<Self> {
let inv_freq: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / base.powf(i as f32 / head_dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), device)?;
let t = Tensor::arange(0u32, max_position_embeddings as u32, device)?
.to_dtype(DType::F32)?
.reshape((max_position_embeddings, 1))?;
let freqs = t.matmul(&inv_freq)?;
let sin = freqs.sin()?.to_dtype(dtype)?;
let cos = freqs.cos()?.to_dtype(dtype)?;
Ok(Self { cos, sin })
}
pub fn forward(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offsets: &[usize],
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _qh, seq_len, _n_embd) = q.dims4()?;
let rope = candle_nn::rotary_emb::rope;
let mut q_embeds = Vec::new();
let mut k_embeds = Vec::new();
for (i, offset) in seqlen_offsets.iter().enumerate() {
let cos = self.cos.narrow(0, *offset, seq_len)?;
let sin = self.sin.narrow(0, *offset, seq_len)?;
let q_embed = rope(&q.i(i)?.unsqueeze(0)?.contiguous()?, &cos, &sin)?;
let k_embed = rope(&k.i(i)?.unsqueeze(0)?.contiguous()?, &cos, &sin)?;
q_embeds.push(q_embed);
k_embeds.push(k_embed);
}
Ok((Tensor::cat(&q_embeds, 0)?, Tensor::cat(&k_embeds, 0)?))
}
}
struct Mlp {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl Mlp {
fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_b(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?;
let up_proj = linear_b(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?;
let down_proj = linear_b(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = self.gate_proj.forward(xs)?.apply(&self.act_fn)?;
let rhs = self.up_proj.forward(xs)?;
self.down_proj.forward(&(lhs * rhs)?)
}
}
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
q_norm: RmsNorm,
k_norm: RmsNorm,
num_heads: usize,
num_kv_heads: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
n_kv_groups: usize,
softmax_scale: f64,
kv_cache: Arc<Mutex<KvCache>>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &TextConfig, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let q_proj = linear_b(hidden_sz, num_heads * cfg.head_dim, false, vb.pp("q_proj"))?;
let k_proj = linear_b(
hidden_sz,
num_kv_heads * cfg.head_dim,
false,
vb.pp("k_proj"),
)?;
let v_proj = linear_b(
hidden_sz,
num_kv_heads * cfg.head_dim,
false,
vb.pp("v_proj"),
)?;
let o_proj = linear_b(num_heads * cfg.head_dim, hidden_sz, false, vb.pp("o_proj"))?;
let q_norm = rms_norm(cfg.head_dim, cfg.rms_norm_eps, vb.pp("q_norm"))?;
let k_norm = rms_norm(cfg.head_dim, cfg.rms_norm_eps, vb.pp("k_norm"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_heads,
num_kv_heads,
head_dim: cfg.head_dim,
rotary_emb,
n_kv_groups: cfg.num_attention_heads / cfg.num_key_value_heads,
softmax_scale: 1.0 / (cfg.head_dim as f64).sqrt(),
kv_cache: Arc::new(Mutex::new(KvCache::new(2, cfg.max_position_embeddings))),
})
}
#[allow(clippy::too_many_arguments)]
fn forward(
&self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offsets: &[usize],
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let mut q = self.q_proj.forward(xs)?;
let mut k = self.k_proj.forward(xs)?;
let mut v = self.v_proj.forward(xs)?;
q = q
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
k = k
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
v = v
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
q = q.apply(&self.q_norm)?;
k = k.apply(&self.k_norm)?;
(q, k) = self.rotary_emb.forward(&q, &k, seqlen_offsets)?;
let q = q.contiguous()?;
let k = k.contiguous()?;
let v = v.contiguous()?;
let (k, v) = self
.kv_cache
.lock()
.expect("Need a lock because of the deepstack injection")
.append(&k, &v)?;
let k = crate::utils::repeat_kv(k, self.n_kv_groups)?.contiguous()?;
let v = crate::utils::repeat_kv(v, self.n_kv_groups)?.contiguous()?;
let mut attn_output = {
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * self.softmax_scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&v)?
};
attn_output = attn_output.transpose(1, 2)?.reshape((b_sz, q_len, ()))?;
self.o_proj.forward(&attn_output)
}
}
pub struct DecoderLayer {
self_attn: Attention,
mlp: Mlp,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &TextConfig, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = rms_norm(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
#[allow(clippy::too_many_arguments)]
fn forward(
&self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offsets: &[usize],
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self
.self_attn
.forward(&xs, attention_mask, seqlen_offsets)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self
.mlp
.forward(&xs.apply(&self.post_attention_layernorm)?)?;
residual + xs
}
}
pub struct Qwen3VLTextModel {
embed_tokens: Embedding,
pub(super) norm: RmsNorm,
layers: Vec<DecoderLayer>,
lm_head: Linear,
pub(super) dtype: DType,
pub(super) num_attn_heads: usize,
}
impl Qwen3VLTextModel {
pub fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model").pp("language_model");
let embed_tokens = embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(
cfg.rope_theta as f32,
cfg.head_dim,
cfg.max_position_embeddings,
vb.device(),
vb_m.dtype(),
)?);
let vb_l = vb_m.pp("layers");
let mut layers = Vec::new();
for layer_idx in 0..cfg.num_hidden_layers {
layers.push(DecoderLayer::new(
rotary_emb.clone(),
cfg,
vb_l.pp(layer_idx),
)?);
}
let norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = if !cfg.tie_word_embeddings {
linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
} else {
candle_nn::Linear::new(embed_tokens.embeddings().clone(), None)
};
Ok(Self {
embed_tokens,
norm,
layers,
lm_head,
dtype: vb.dtype(),
num_attn_heads: cfg.num_attention_heads,
})
}
pub fn embed_tokens(&self, input_ids: &Tensor) -> Result<Tensor> {
self.embed_tokens.forward(input_ids)
}
pub fn forward_embeds(
&self,
mut xs: Tensor,
attention_mask: Option<&Tensor>,
seqlen_offsets: &[usize],
visual_pos_masks: Option<&Tensor>,
deepstack_visual_embeds: Option<&[Tensor]>,
) -> Result<Tensor> {
let (_, seq_len, _) = xs.dims3()?;
for (i, layer) in self.layers.iter().enumerate() {
xs = layer.forward(
&xs,
attention_mask
.as_ref()
.map(|m| m.to_device(xs.device()).unwrap())
.as_ref(),
seqlen_offsets,
)?;
// Integrate DeepStack visual features when provided.
if let (Some(visual_pos_masks), Some(deepstack)) =
(visual_pos_masks, deepstack_visual_embeds)
{
if i < deepstack.len() {
xs = self.deepstack_process(xs, visual_pos_masks, &deepstack[i])?;
}
}
}
xs = xs.apply(&self.norm)?;
self.lm_head
.forward(&xs)?
.i((.., seq_len - 1, ..))?
.contiguous()
}
fn deepstack_process(
&self,
hidden_states: Tensor,
visual_pos_masks: &Tensor,
visual_embeds: &Tensor,
) -> Result<Tensor> {
let device = hidden_states.device();
let dtype = hidden_states.dtype();
let mask = visual_pos_masks.to_device(device)?.to_dtype(DType::F32)?;
let mask_flat = mask.flatten_all()?;
let masked_count = mask_flat.sum_all()?.to_scalar::<f32>()? as usize;
let visual_embeds = visual_embeds.to_device(device)?.to_dtype(dtype)?;
if masked_count == 0 {
if visual_embeds.dim(0)? != 0 {
candle::bail!(
"DeepStack visual embeds ({}) provided but mask is empty",
visual_embeds.dim(0)?
);
}
return Ok(hidden_states);
}
if visual_embeds.dim(0)? != masked_count {
candle::bail!(
"Mismatch between DeepStack visual embeds ({}) and mask positions ({})",
visual_embeds.dim(0)?,
masked_count
);
}
let (batch, seq, hidden) = hidden_states.dims3()?;
let total_positions = batch * seq;
let mut hidden_flat = hidden_states.reshape((total_positions, hidden))?;
let prefix = mask_flat.cumsum(0)?;
let rank = (prefix - &mask_flat)?.mul(&mask_flat)?;
let rank_u32 = rank.to_dtype(DType::U32)?;
let positions = Tensor::arange(0u32, total_positions as u32, device)?;
let positions_f32 = positions.to_dtype(DType::F32)?;
let masked_positions = positions_f32.mul(&mask_flat)?;
let mut position_per_rank = Tensor::zeros((masked_count,), DType::F32, device)?;
position_per_rank = position_per_rank.scatter_add(&rank_u32, &masked_positions, 0)?;
let position_per_rank = position_per_rank.to_dtype(DType::U32)?;
let linear_index = position_per_rank.unsqueeze(1)?.repeat((1, hidden))?;
hidden_flat = hidden_flat.scatter_add(&linear_index, &visual_embeds, 0)?;
hidden_flat.reshape((batch, seq, hidden))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen3_vl/mod.rs | candle-transformers/src/models/qwen3_vl/mod.rs | #![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)]
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::VarBuilder;
use text::Qwen3VLTextModel;
use vision::Qwen3VLVisionModel;
pub mod config;
mod conv3d_temporal_2;
mod text;
mod vision;
pub use config::Config;
use crate::models::deepseek2::NonZeroOp;
pub struct Qwen3VLModel {
text: Qwen3VLTextModel,
vision: Qwen3VLVisionModel,
}
impl Qwen3VLModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vision = Qwen3VLVisionModel::new(&cfg.vision_config, vb.pp("model").pp("visual"))?;
let text = Qwen3VLTextModel::new(&cfg.text_config, vb.clone())?;
Ok(Self { text, vision })
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
dtype: DType,
device: &Device,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((
b_size,
self.text.num_attn_heads,
tgt_len,
tgt_len + seqlen_offset,
))?
.to_dtype(dtype)
}
#[allow(clippy::too_many_arguments)]
pub fn forward(
&self,
input_ids: &Tensor,
pixel_values: Option<Tensor>,
pixel_values_videos: Option<Tensor>,
image_grid_thw: Option<Tensor>,
video_grid_thw: Option<Tensor>,
seqlens: Vec<usize>,
continuous_img_pad: Vec<Vec<(usize, usize)>>,
continuous_vid_pad: Vec<Vec<(usize, usize)>>,
seqlen_offsets: &[usize],
) -> Result<Tensor> {
let (bs, seqlen) = input_ids.dims2()?;
let attention_mask = if seqlen <= 1 {
Some(self.prepare_decoder_attention_mask(
bs,
seqlen,
seqlen_offsets[0],
self.text.dtype,
input_ids.device(),
)?)
} else {
None
};
let mut input_embeds = self.text.embed_tokens(input_ids)?;
let (batch_size, seq_len, hidden_dim) = input_embeds.dims3()?;
let device = input_embeds.device().clone();
let mut image_mask_opt: Option<Tensor> = None;
let mut video_mask_opt: Option<Tensor> = None;
let mut deepstack_image_opt: Option<Vec<Tensor>> = None;
let mut deepstack_video_opt: Option<Vec<Tensor>> = None;
if let Some(pixel_values) = &pixel_values {
let Some(image_grid_thw_ref) = image_grid_thw.as_ref() else {
candle::bail!("pixel_values require image_grid_thw");
};
let mut pixel_values = pixel_values.clone();
let dims = pixel_values.dims();
if dims.len() == 3 {
pixel_values = pixel_values.reshape((dims[0] * dims[1], dims[2]))?;
}
let (image_embeds, deepstack_image_embeds) =
self.vision.forward(&pixel_values, image_grid_thw_ref)?;
let image_embeds = image_embeds.to_device(&device)?.to_dtype(self.text.dtype)?;
let mut deepstack_image_embeds = deepstack_image_embeds
.into_iter()
.map(|t| t.to_device(&device)?.to_dtype(self.text.dtype))
.collect::<Result<Vec<_>>>()?;
let mut offset = 0usize;
let mut image_mask =
Tensor::zeros((batch_size, seq_len), DType::F32, input_ids.device())?;
let total_expected: usize = continuous_img_pad
.iter()
.flat_map(|spans| spans.iter().map(|(s, e)| e - s))
.sum();
if image_embeds.dim(0)? != total_expected {
candle::bail!(
"Image embedding length {} does not match placeholder tokens {}",
image_embeds.dim(0)?,
total_expected
);
}
for (batch, spans) in continuous_img_pad.iter().enumerate() {
for &(start, end) in spans {
let len = end - start;
let chunk = image_embeds.narrow(0, offset, len)?;
offset += len;
input_embeds = input_embeds.slice_assign(
&[batch..batch + 1, start..end, 0..hidden_dim],
&chunk.unsqueeze(0)?,
)?;
let ones = Tensor::ones((1, len), DType::F32, input_ids.device())?;
image_mask = image_mask.slice_assign(&[batch..batch + 1, start..end], &ones)?;
}
}
image_mask_opt = Some(image_mask.to_dtype(DType::U8)?);
deepstack_image_opt = Some(std::mem::take(&mut deepstack_image_embeds));
}
if let Some(pixel_values_videos) = &pixel_values_videos {
let Some(video_grid_thw_ref) = video_grid_thw.as_ref() else {
candle::bail!("pixel_values_videos require video_grid_thw");
};
let mut pixel_values = pixel_values_videos.clone();
let dims = pixel_values.dims();
if dims.len() == 3 {
pixel_values = pixel_values.reshape((dims[0] * dims[1], dims[2]))?;
}
let (video_embeds, deepstack_video_embeds) =
self.vision.forward(&pixel_values, video_grid_thw_ref)?;
let video_embeds = video_embeds.to_device(&device)?.to_dtype(self.text.dtype)?;
let mut deepstack_video_embeds = deepstack_video_embeds
.into_iter()
.map(|t| t.to_device(&device)?.to_dtype(self.text.dtype))
.collect::<Result<Vec<_>>>()?;
let mut offset = 0usize;
let mut video_mask =
Tensor::zeros((batch_size, seq_len), DType::F32, input_ids.device())?;
let total_expected: usize = continuous_vid_pad
.iter()
.flat_map(|spans| spans.iter().map(|(s, e)| e - s))
.sum();
if video_embeds.dim(0)? != total_expected {
candle::bail!(
"Video embedding length {} does not match placeholder tokens {}",
video_embeds.dim(0)?,
total_expected
);
}
for (batch, spans) in continuous_vid_pad.iter().enumerate() {
for &(start, end) in spans {
let len = end - start;
let chunk = video_embeds.narrow(0, offset, len)?;
offset += len;
input_embeds = input_embeds.slice_assign(
&[batch..batch + 1, start..end, 0..hidden_dim],
&chunk.unsqueeze(0)?,
)?;
let ones = Tensor::ones((1, len), DType::F32, input_ids.device())?;
video_mask = video_mask.slice_assign(&[batch..batch + 1, start..end], &ones)?;
}
}
video_mask_opt = Some(video_mask.to_dtype(DType::U8)?);
deepstack_video_opt = Some(std::mem::take(&mut deepstack_video_embeds));
}
let (visual_pos_masks, deepstack_visual_embeds) = match (
image_mask_opt,
deepstack_image_opt,
video_mask_opt,
deepstack_video_opt,
) {
(Some(image_mask), Some(image_deepstack), Some(video_mask), Some(video_deepstack)) => {
let combined =
(image_mask.to_dtype(DType::F32)? + video_mask.to_dtype(DType::F32)?)?;
let visual_mask = combined.gt(0f32)?.to_dtype(DType::U8)?;
let visual_indices = visual_mask.flatten_all()?.nonzero()?.squeeze(1)?;
let visual_indices_vec = visual_indices.to_vec1::<i64>()?;
let image_flat = image_mask
.flatten_all()?
.to_dtype(DType::U8)?
.to_vec1::<u8>()?;
let num_visual = visual_indices_vec.len();
if image_deepstack.len() != video_deepstack.len() {
candle::bail!(
"DeepStack image layers ({}) do not match video layers ({})",
image_deepstack.len(),
video_deepstack.len()
);
}
let mut combined_layers = Vec::with_capacity(image_deepstack.len());
for (img_layer, vid_layer) in image_deepstack.iter().zip(video_deepstack.iter()) {
let mut rows = Vec::with_capacity(num_visual);
let mut img_offset = 0usize;
let mut vid_offset = 0usize;
for &idx in &visual_indices_vec {
let idx = idx as usize;
if image_flat[idx] != 0 {
rows.push(img_layer.i(img_offset)?);
img_offset += 1;
} else {
rows.push(vid_layer.i(vid_offset)?);
vid_offset += 1;
}
}
if img_offset != img_layer.dim(0)? || vid_offset != vid_layer.dim(0)? {
candle::bail!(
"DeepStack feature alignment failed for images ({}/{}) or videos ({}/{})",
img_offset,
img_layer.dim(0)?,
vid_offset,
vid_layer.dim(0)?
);
}
let row_refs: Vec<&Tensor> = rows.iter().collect();
combined_layers.push(Tensor::stack(&row_refs, 0)?);
}
(Some(visual_mask), Some(combined_layers))
}
(Some(image_mask), Some(image_deepstack), _, _) => {
(Some(image_mask), Some(image_deepstack))
}
(_, _, Some(video_mask), Some(video_deepstack)) => {
(Some(video_mask), Some(video_deepstack))
}
_ => (None, None),
};
let mut ropeidx_attn_mask_bs = Vec::new();
let max_seqlens = *seqlens.iter().max().unwrap();
for len in &seqlens {
ropeidx_attn_mask_bs.push(Tensor::new(
[vec![1f32; *len], vec![0f32; max_seqlens - len]].concat(),
input_ids.device(),
)?);
}
let out = self.text.forward_embeds(
input_embeds,
attention_mask.as_ref(),
seqlen_offsets,
visual_pos_masks.as_ref(),
deepstack_visual_embeds.as_deref(),
)?;
Ok(out)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/z_image/text_encoder.rs | candle-transformers/src/models/z_image/text_encoder.rs | //! Z-Image Text Encoder (Qwen3 Adapter)
//!
//! This module provides a Qwen3-based text encoder for Z-Image.
//! Key difference from the standard Qwen3 model:
//! - Returns the **second-to-last layer** hidden states (hidden_states[-2])
//! - Does NOT apply the final RMSNorm
use crate::models::with_tracing::{linear_b, Linear, RmsNorm};
use candle::{DType, Device, Module, Result, Tensor};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
/// Text Encoder configuration (Qwen3-based)
#[derive(Debug, Clone, serde::Deserialize)]
pub struct TextEncoderConfig {
#[serde(default = "default_vocab_size")]
pub vocab_size: usize,
#[serde(default = "default_hidden_size")]
pub hidden_size: usize,
#[serde(default = "default_intermediate_size")]
pub intermediate_size: usize,
#[serde(default = "default_num_hidden_layers")]
pub num_hidden_layers: usize,
#[serde(default = "default_num_attention_heads")]
pub num_attention_heads: usize,
#[serde(default = "default_num_key_value_heads")]
pub num_key_value_heads: usize,
#[serde(default = "default_head_dim")]
pub head_dim: usize,
#[serde(default = "default_rms_norm_eps")]
pub rms_norm_eps: f64,
#[serde(default = "default_rope_theta")]
pub rope_theta: f64,
#[serde(default = "default_attention_bias")]
pub attention_bias: bool,
#[serde(default = "default_hidden_act")]
pub hidden_act: Activation,
#[serde(default = "default_max_position_embeddings")]
pub max_position_embeddings: usize,
}
fn default_vocab_size() -> usize {
151936
}
fn default_hidden_size() -> usize {
2560
}
fn default_intermediate_size() -> usize {
9728
}
fn default_num_hidden_layers() -> usize {
36
}
fn default_num_attention_heads() -> usize {
32
}
fn default_num_key_value_heads() -> usize {
8
}
fn default_head_dim() -> usize {
128
}
fn default_rms_norm_eps() -> f64 {
1e-6
}
fn default_rope_theta() -> f64 {
1_000_000.0
}
fn default_attention_bias() -> bool {
false
}
fn default_hidden_act() -> Activation {
Activation::Silu
}
fn default_max_position_embeddings() -> usize {
40960
}
impl Default for TextEncoderConfig {
fn default() -> Self {
Self::z_image()
}
}
impl TextEncoderConfig {
/// Create configuration for Z-Image Text Encoder
pub fn z_image() -> Self {
Self {
vocab_size: 151936,
hidden_size: 2560,
intermediate_size: 9728,
num_hidden_layers: 36,
num_attention_heads: 32,
num_key_value_heads: 8,
head_dim: 128,
rms_norm_eps: 1e-6,
rope_theta: 1_000_000.0,
attention_bias: false,
hidden_act: Activation::Silu,
max_position_embeddings: 40960,
}
}
}
// ==================== Rotary Embedding ====================
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &TextEncoderConfig, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(DType::F32)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?.to_dtype(dtype)?,
cos: freqs.cos()?.to_dtype(dtype)?,
})
}
/// Apply RoPE (q, k shape: B x H x L x D)
fn apply(&self, q: &Tensor, k: &Tensor, offset: usize) -> Result<(Tensor, Tensor)> {
let (_, _, seq_len, _) = q.dims4()?;
let cos = self.cos.narrow(0, offset, seq_len)?;
let sin = self.sin.narrow(0, offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
// ==================== MLP ====================
#[derive(Debug, Clone)]
struct Mlp {
gate_proj: candle_nn::Linear,
up_proj: candle_nn::Linear,
down_proj: candle_nn::Linear,
act_fn: Activation,
}
impl Mlp {
fn new(cfg: &TextEncoderConfig, vb: VarBuilder) -> Result<Self> {
Ok(Self {
gate_proj: candle_nn::linear_no_bias(
cfg.hidden_size,
cfg.intermediate_size,
vb.pp("gate_proj"),
)?,
up_proj: candle_nn::linear_no_bias(
cfg.hidden_size,
cfg.intermediate_size,
vb.pp("up_proj"),
)?,
down_proj: candle_nn::linear_no_bias(
cfg.intermediate_size,
cfg.hidden_size,
vb.pp("down_proj"),
)?,
act_fn: cfg.hidden_act,
})
}
}
impl Module for Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let lhs = x.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = x.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
// ==================== Attention ====================
fn repeat_kv(x: Tensor, n_rep: usize) -> Result<Tensor> {
if n_rep == 1 {
Ok(x)
} else {
let (b_sz, n_kv_head, seq_len, head_dim) = x.dims4()?;
x.unsqueeze(2)?
.broadcast_as((b_sz, n_kv_head, n_rep, seq_len, head_dim))?
.reshape((b_sz, n_kv_head * n_rep, seq_len, head_dim))
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
q_norm: RmsNorm,
k_norm: RmsNorm,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
}
impl Attention {
fn new(
cfg: &TextEncoderConfig,
rotary_emb: Arc<RotaryEmbedding>,
vb: VarBuilder,
) -> Result<Self> {
let head_dim = cfg.head_dim;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let q_proj = linear_b(
cfg.hidden_size,
num_heads * head_dim,
cfg.attention_bias,
vb.pp("q_proj"),
)?;
let k_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias,
vb.pp("k_proj"),
)?;
let v_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias,
vb.pp("v_proj"),
)?;
let o_proj = linear_b(
num_heads * head_dim,
cfg.hidden_size,
cfg.attention_bias,
vb.pp("o_proj"),
)?;
let q_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("q_norm"))?;
let k_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("k_norm"))?;
let hidden_size = head_dim * cfg.num_attention_heads;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size,
rotary_emb,
})
}
fn forward(&self, x: &Tensor, attn_mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let (b, l, _) = x.dims3()?;
// 1. Proj
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
// 2. Reshape: (B, L, H, D) -> (B, H, L, D)
let q = q
.reshape((b, l, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
// 3. Per-head RMSNorm
let q_flat = q.flatten(0, 2)?;
let k_flat = k.flatten(0, 2)?;
let q_flat = self.q_norm.forward(&q_flat)?;
let k_flat = self.k_norm.forward(&k_flat)?;
let q = q_flat.reshape((b, self.num_heads, l, self.head_dim))?;
let k = k_flat.reshape((b, self.num_kv_heads, l, self.head_dim))?;
// 4. RoPE
let (q, k) = self.rotary_emb.apply(&q, &k, offset)?;
// 5. GQA repeat_kv
let k = repeat_kv(k, self.num_kv_groups)?.contiguous()?;
let v = repeat_kv(v, self.num_kv_groups)?.contiguous()?;
// 6. Attention score
let scale = 1.0 / (self.head_dim as f64).sqrt();
let mut scores = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(m) = attn_mask {
scores = scores.broadcast_add(m)?;
}
let probs = candle_nn::ops::softmax_last_dim(&scores)?;
let ctx = probs.matmul(&v)?; // (B, H, L, D)
// 7. Output proj
ctx.transpose(1, 2)?
.reshape((b, l, self.hidden_size))?
.apply(&self.o_proj)
}
}
// ==================== Decoder Layer ====================
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: Mlp,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl DecoderLayer {
fn new(cfg: &TextEncoderConfig, rotary: Arc<RotaryEmbedding>, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, rotary, vb.pp("self_attn"))?;
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let ln2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
ln1,
ln2,
})
}
fn forward(&self, x: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let h = self.ln1.forward(x)?;
let h = self.self_attn.forward(&h, mask, offset)?;
let x = (x + h)?;
let h2 = self.ln2.forward(&x)?;
let h2 = h2.apply(&self.mlp)?;
x + h2
}
}
// ==================== ZImageTextEncoder ====================
/// Z-Image Text Encoder (Qwen3-based)
///
/// Returns the second-to-last layer hidden states (hidden_states[-2])
/// without applying the final RMSNorm.
#[derive(Debug, Clone)]
pub struct ZImageTextEncoder {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
num_hidden_layers: usize,
device: Device,
dtype: DType,
}
impl ZImageTextEncoder {
pub fn new(cfg: &TextEncoderConfig, vb: VarBuilder) -> Result<Self> {
// Note: weights have "model." prefix
let vb_model = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_model.pp("embed_tokens"))?;
let rotary = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_layers = vb_model.pp("layers");
for i in 0..cfg.num_hidden_layers {
layers.push(DecoderLayer::new(cfg, rotary.clone(), vb_layers.pp(i))?);
}
// NOTE: We do NOT load the final norm (model.norm.weight)
// because we return the second-to-last layer output without final norm
Ok(Self {
embed_tokens,
layers,
num_hidden_layers: cfg.num_hidden_layers,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
/// Create causal attention mask
fn causal_mask(&self, b: usize, tgt: usize, offset: usize) -> Result<Tensor> {
let minf = f32::NEG_INFINITY;
let mask: Vec<_> = (0..tgt)
.flat_map(|i| {
(0..(tgt + offset)).map(move |j| if j <= i + offset { 0.0 } else { minf })
})
.collect();
Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype)
}
/// Encode text, returning second-to-last layer hidden states
///
/// # Arguments
/// * `input_ids` - Token IDs (B, seq_len)
///
/// # Returns
/// Hidden states (B, seq_len, hidden_size) from layer[-2]
///
/// **Important**: Returns raw output from layer[-2] WITHOUT final RMSNorm
pub fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let (b, l) = input_ids.dims2()?;
let mut hidden_states = self.embed_tokens.forward(input_ids)?;
let causal = if l == 1 {
None
} else {
Some(self.causal_mask(b, l, 0)?)
};
// num_hidden_layers = 36, second-to-last layer index = 34
let target_layer = self.num_hidden_layers - 2;
for (i, layer) in self.layers.iter().enumerate() {
hidden_states = layer.forward(&hidden_states, causal.as_ref(), 0)?;
// Return after second-to-last layer, do NOT apply final norm
if i == target_layer {
return Ok(hidden_states);
}
}
// Should not reach here
candle::bail!("Layer index out of bounds")
}
/// Get the output dimension (hidden_size)
pub fn hidden_size(&self) -> usize {
// This is derived from embed_tokens weight shape
self.embed_tokens.embeddings().dim(1).unwrap_or(2560)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/z_image/sampling.rs | candle-transformers/src/models/z_image/sampling.rs | //! Sampling utilities for Z-Image model.
use candle::{DType, Device, Result, Tensor};
/// Generate initial Gaussian noise
///
/// # Arguments
/// * `batch_size` - Batch size
/// * `channels` - Number of channels (typically 16, VAE latent channels)
/// * `height` - Height (latent space, i.e., image_height / 16)
/// * `width` - Width (latent space)
/// * `device` - Compute device
///
/// # Returns
/// Noise tensor of shape (batch_size, channels, height, width)
pub fn get_noise(
batch_size: usize,
channels: usize,
height: usize,
width: usize,
device: &Device,
) -> Result<Tensor> {
Tensor::randn(0f32, 1.0, (batch_size, channels, height, width), device)
}
/// Get linear time schedule with shift
///
/// # Arguments
/// * `num_steps` - Number of inference steps
/// * `mu` - Time shift parameter (from calculate_shift)
///
/// # Returns
/// Time points from 1.0 to 0.0 (num_steps+1 points)
pub fn get_schedule(num_steps: usize, mu: f64) -> Vec<f64> {
let timesteps: Vec<f64> = (0..=num_steps)
.map(|v| v as f64 / num_steps as f64)
.rev()
.collect();
// Apply time shift (for Flow Matching)
timesteps
.into_iter()
.map(|t| {
if t <= 0.0 || t >= 1.0 {
t // boundary case
} else {
let e = mu.exp();
e / (e + (1.0 / t - 1.0))
}
})
.collect()
}
/// Post-process image from VAE output
/// Converts from [-1, 1] to [0, 255] u8 image
pub fn postprocess_image(image: &Tensor) -> Result<Tensor> {
let image = image.clamp(-1.0, 1.0)?;
let image = ((image + 1.0)? * 127.5)?;
image.to_dtype(DType::U8)
}
/// CFG configuration
#[derive(Debug, Clone)]
pub struct CfgConfig {
/// Guidance scale (typically 5.0)
pub guidance_scale: f64,
/// CFG truncation threshold (1.0 = full CFG, 0.0 = no CFG)
pub cfg_truncation: f64,
/// Whether to normalize CFG output
pub cfg_normalization: bool,
}
impl Default for CfgConfig {
fn default() -> Self {
Self {
guidance_scale: 5.0,
cfg_truncation: 1.0,
cfg_normalization: false,
}
}
}
/// Apply Classifier-Free Guidance
///
/// # Arguments
/// * `pos_pred` - Positive (conditional) prediction
/// * `neg_pred` - Negative (unconditional) prediction
/// * `cfg` - CFG configuration
/// * `t_norm` - Normalized time [0, 1]
pub fn apply_cfg(
pos_pred: &Tensor,
neg_pred: &Tensor,
cfg: &CfgConfig,
t_norm: f64,
) -> Result<Tensor> {
// CFG truncation: disable CFG in late sampling
let current_scale = if t_norm > cfg.cfg_truncation {
0.0
} else {
cfg.guidance_scale
};
if current_scale <= 0.0 {
return Ok(pos_pred.clone());
}
// CFG formula: pred = pos + scale * (pos - neg)
let diff = (pos_pred - neg_pred)?;
let pred = (pos_pred + (diff * current_scale)?)?;
// Optional: CFG normalization (limit output norm)
if cfg.cfg_normalization {
let ori_norm = pos_pred.sqr()?.sum_all()?.sqrt()?;
let new_norm = pred.sqr()?.sum_all()?.sqrt()?;
let ori_norm_val = ori_norm.to_scalar::<f32>()?;
let new_norm_val = new_norm.to_scalar::<f32>()?;
if new_norm_val > ori_norm_val {
let scale = ori_norm_val / new_norm_val;
return pred * scale as f64;
}
}
Ok(pred)
}
/// Scale latents to initial noise level
///
/// For flow matching, the initial sample should be pure noise.
/// This function scales the noise by the initial sigma.
pub fn scale_noise(noise: &Tensor, sigma: f64) -> Result<Tensor> {
noise * sigma
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/z_image/vae.rs | candle-transformers/src/models/z_image/vae.rs | //! Z-Image VAE (AutoEncoderKL) - Diffusers Format
//!
//! This VAE implementation uses the diffusers weight naming format,
//! which is different from the Flux autoencoder original format.
//!
//! Key differences from Flux autoencoder:
//! 1. Weight paths: `encoder.down_blocks.{i}.resnets.{j}.*` vs `encoder.down.{i}.block.{j}.*`
//! 2. Attention naming: `to_q/to_k/to_v/to_out.0.*` vs `q/k/v/proj_out.*`
//! 3. Shortcut naming: `conv_shortcut.*` vs `nin_shortcut.*`
use candle::{Module, Result, Tensor, D};
use candle_nn::{conv2d, group_norm, Conv2d, Conv2dConfig, GroupNorm, VarBuilder};
// ==================== Config ====================
/// VAE configuration
#[derive(Debug, Clone, serde::Deserialize)]
pub struct VaeConfig {
#[serde(default = "default_in_channels")]
pub in_channels: usize,
#[serde(default = "default_out_channels")]
pub out_channels: usize,
#[serde(default = "default_latent_channels")]
pub latent_channels: usize,
#[serde(default = "default_block_out_channels")]
pub block_out_channels: Vec<usize>,
#[serde(default = "default_layers_per_block")]
pub layers_per_block: usize,
#[serde(default = "default_scaling_factor")]
pub scaling_factor: f64,
#[serde(default = "default_shift_factor")]
pub shift_factor: f64,
#[serde(default = "default_norm_num_groups")]
pub norm_num_groups: usize,
}
fn default_in_channels() -> usize {
3
}
fn default_out_channels() -> usize {
3
}
fn default_latent_channels() -> usize {
16
}
fn default_block_out_channels() -> Vec<usize> {
vec![128, 256, 512, 512]
}
fn default_layers_per_block() -> usize {
2
}
fn default_scaling_factor() -> f64 {
0.3611
}
fn default_shift_factor() -> f64 {
0.1159
}
fn default_norm_num_groups() -> usize {
32
}
impl Default for VaeConfig {
fn default() -> Self {
Self::z_image()
}
}
impl VaeConfig {
/// Create configuration for Z-Image VAE
pub fn z_image() -> Self {
Self {
in_channels: 3,
out_channels: 3,
latent_channels: 16,
block_out_channels: vec![128, 256, 512, 512],
layers_per_block: 2,
scaling_factor: 0.3611,
shift_factor: 0.1159,
norm_num_groups: 32,
}
}
}
// ==================== Attention ====================
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v)
}
/// VAE Attention block (diffusers format)
///
/// Note: VAE attention uses Linear with bias (2D weight shape)
/// Unlike Transformer attention which uses linear_no_bias
#[derive(Debug, Clone)]
struct Attention {
group_norm: GroupNorm,
to_q: candle_nn::Linear,
to_k: candle_nn::Linear,
to_v: candle_nn::Linear,
to_out: candle_nn::Linear,
}
impl Attention {
fn new(channels: usize, num_groups: usize, vb: VarBuilder) -> Result<Self> {
let group_norm = group_norm(num_groups, channels, 1e-6, vb.pp("group_norm"))?;
// VAE attention uses Linear with bias
let to_q = candle_nn::linear(channels, channels, vb.pp("to_q"))?;
let to_k = candle_nn::linear(channels, channels, vb.pp("to_k"))?;
let to_v = candle_nn::linear(channels, channels, vb.pp("to_v"))?;
let to_out = candle_nn::linear(channels, channels, vb.pp("to_out").pp("0"))?;
Ok(Self {
group_norm,
to_q,
to_k,
to_v,
to_out,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let (b, c, h, w) = xs.dims4()?;
// GroupNorm
let xs = xs.apply(&self.group_norm)?;
// (B, C, H, W) -> (B, H, W, C) -> (B*H*W, C)
let xs = xs.permute((0, 2, 3, 1))?.reshape((b * h * w, c))?;
// Linear projections
let q = xs.apply(&self.to_q)?; // (B*H*W, C)
let k = xs.apply(&self.to_k)?;
let v = xs.apply(&self.to_v)?;
// Reshape for attention: (B*H*W, C) -> (B, H*W, C) -> (B, 1, H*W, C)
let q = q.reshape((b, h * w, c))?.unsqueeze(1)?;
let k = k.reshape((b, h * w, c))?.unsqueeze(1)?;
let v = v.reshape((b, h * w, c))?.unsqueeze(1)?;
// Scaled dot-product attention
let xs = scaled_dot_product_attention(&q, &k, &v)?;
// (B, 1, H*W, C) -> (B*H*W, C)
let xs = xs.squeeze(1)?.reshape((b * h * w, c))?;
// Output projection
let xs = xs.apply(&self.to_out)?;
// (B*H*W, C) -> (B, H, W, C) -> (B, C, H, W)
let xs = xs.reshape((b, h, w, c))?.permute((0, 3, 1, 2))?;
// Residual connection
xs + residual
}
}
// ==================== ResnetBlock2D ====================
/// ResNet block (diffusers format)
#[derive(Debug, Clone)]
struct ResnetBlock2D {
norm1: GroupNorm,
conv1: Conv2d,
norm2: GroupNorm,
conv2: Conv2d,
conv_shortcut: Option<Conv2d>,
}
impl ResnetBlock2D {
fn new(
in_channels: usize,
out_channels: usize,
num_groups: usize,
vb: VarBuilder,
) -> Result<Self> {
let conv_cfg = Conv2dConfig {
padding: 1,
..Default::default()
};
let norm1 = group_norm(num_groups, in_channels, 1e-6, vb.pp("norm1"))?;
let conv1 = conv2d(in_channels, out_channels, 3, conv_cfg, vb.pp("conv1"))?;
let norm2 = group_norm(num_groups, out_channels, 1e-6, vb.pp("norm2"))?;
let conv2 = conv2d(out_channels, out_channels, 3, conv_cfg, vb.pp("conv2"))?;
let conv_shortcut = if in_channels != out_channels {
Some(conv2d(
in_channels,
out_channels,
1,
Default::default(),
vb.pp("conv_shortcut"),
)?)
} else {
None
};
Ok(Self {
norm1,
conv1,
norm2,
conv2,
conv_shortcut,
})
}
}
impl Module for ResnetBlock2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let h = xs
.apply(&self.norm1)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv1)?
.apply(&self.norm2)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv2)?;
match &self.conv_shortcut {
Some(conv) => xs.apply(conv)? + h,
None => xs + h,
}
}
}
// ==================== DownEncoderBlock2D ====================
#[derive(Debug, Clone)]
struct Downsample2D {
conv: Conv2d,
}
impl Downsample2D {
fn new(channels: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = Conv2dConfig {
stride: 2,
padding: 0,
..Default::default()
};
let conv = conv2d(channels, channels, 3, conv_cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl Module for Downsample2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
// Manual padding: (0, 1, 0, 1) for right=1, bottom=1
let xs = xs.pad_with_zeros(D::Minus1, 0, 1)?; // width: right
let xs = xs.pad_with_zeros(D::Minus2, 0, 1)?; // height: bottom
xs.apply(&self.conv)
}
}
#[derive(Debug, Clone)]
struct DownEncoderBlock2D {
resnets: Vec<ResnetBlock2D>,
downsampler: Option<Downsample2D>,
}
impl DownEncoderBlock2D {
fn new(
in_channels: usize,
out_channels: usize,
num_layers: usize,
num_groups: usize,
add_downsample: bool,
vb: VarBuilder,
) -> Result<Self> {
let mut resnets = Vec::with_capacity(num_layers);
let vb_resnets = vb.pp("resnets");
for i in 0..num_layers {
let in_c = if i == 0 { in_channels } else { out_channels };
resnets.push(ResnetBlock2D::new(
in_c,
out_channels,
num_groups,
vb_resnets.pp(i),
)?);
}
let downsampler = if add_downsample {
Some(Downsample2D::new(
out_channels,
vb.pp("downsamplers").pp("0"),
)?)
} else {
None
};
Ok(Self {
resnets,
downsampler,
})
}
}
impl Module for DownEncoderBlock2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut h = xs.clone();
for resnet in &self.resnets {
h = h.apply(resnet)?;
}
if let Some(ds) = &self.downsampler {
h = h.apply(ds)?;
}
Ok(h)
}
}
// ==================== UpDecoderBlock2D ====================
#[derive(Debug, Clone)]
struct Upsample2D {
conv: Conv2d,
}
impl Upsample2D {
fn new(channels: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = Conv2dConfig {
padding: 1,
..Default::default()
};
let conv = conv2d(channels, channels, 3, conv_cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl Module for Upsample2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_, _, h, w) = xs.dims4()?;
xs.upsample_nearest2d(h * 2, w * 2)?.apply(&self.conv)
}
}
#[derive(Debug, Clone)]
struct UpDecoderBlock2D {
resnets: Vec<ResnetBlock2D>,
upsampler: Option<Upsample2D>,
}
impl UpDecoderBlock2D {
fn new(
in_channels: usize,
out_channels: usize,
num_layers: usize, // decoder has num_layers + 1 resnets per block
num_groups: usize,
add_upsample: bool,
vb: VarBuilder,
) -> Result<Self> {
let mut resnets = Vec::with_capacity(num_layers + 1);
let vb_resnets = vb.pp("resnets");
for i in 0..=num_layers {
let in_c = if i == 0 { in_channels } else { out_channels };
resnets.push(ResnetBlock2D::new(
in_c,
out_channels,
num_groups,
vb_resnets.pp(i),
)?);
}
let upsampler = if add_upsample {
Some(Upsample2D::new(out_channels, vb.pp("upsamplers").pp("0"))?)
} else {
None
};
Ok(Self { resnets, upsampler })
}
}
impl Module for UpDecoderBlock2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut h = xs.clone();
for resnet in &self.resnets {
h = h.apply(resnet)?;
}
if let Some(us) = &self.upsampler {
h = h.apply(us)?;
}
Ok(h)
}
}
// ==================== UNetMidBlock2D ====================
#[derive(Debug, Clone)]
struct UNetMidBlock2D {
resnet_0: ResnetBlock2D,
attention: Attention,
resnet_1: ResnetBlock2D,
}
impl UNetMidBlock2D {
fn new(channels: usize, num_groups: usize, vb: VarBuilder) -> Result<Self> {
let resnet_0 =
ResnetBlock2D::new(channels, channels, num_groups, vb.pp("resnets").pp("0"))?;
let attention = Attention::new(channels, num_groups, vb.pp("attentions").pp("0"))?;
let resnet_1 =
ResnetBlock2D::new(channels, channels, num_groups, vb.pp("resnets").pp("1"))?;
Ok(Self {
resnet_0,
attention,
resnet_1,
})
}
}
impl Module for UNetMidBlock2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.resnet_0)?
.apply(&self.attention)?
.apply(&self.resnet_1)
}
}
// ==================== Encoder ====================
/// VAE Encoder
#[derive(Debug, Clone)]
pub struct Encoder {
conv_in: Conv2d,
down_blocks: Vec<DownEncoderBlock2D>,
mid_block: UNetMidBlock2D,
conv_norm_out: GroupNorm,
conv_out: Conv2d,
}
impl Encoder {
pub fn new(cfg: &VaeConfig, vb: VarBuilder) -> Result<Self> {
let conv_cfg = Conv2dConfig {
padding: 1,
..Default::default()
};
let conv_in = conv2d(
cfg.in_channels,
cfg.block_out_channels[0],
3,
conv_cfg,
vb.pp("conv_in"),
)?;
let mut down_blocks = Vec::with_capacity(cfg.block_out_channels.len());
let vb_down = vb.pp("down_blocks");
for (i, &out_channels) in cfg.block_out_channels.iter().enumerate() {
let in_channels = if i == 0 {
cfg.block_out_channels[0]
} else {
cfg.block_out_channels[i - 1]
};
let add_downsample = i < cfg.block_out_channels.len() - 1;
down_blocks.push(DownEncoderBlock2D::new(
in_channels,
out_channels,
cfg.layers_per_block,
cfg.norm_num_groups,
add_downsample,
vb_down.pp(i),
)?);
}
let mid_channels = *cfg.block_out_channels.last().unwrap();
let mid_block = UNetMidBlock2D::new(mid_channels, cfg.norm_num_groups, vb.pp("mid_block"))?;
let conv_norm_out = group_norm(
cfg.norm_num_groups,
mid_channels,
1e-6,
vb.pp("conv_norm_out"),
)?;
let conv_out = conv2d(
mid_channels,
2 * cfg.latent_channels,
3,
conv_cfg,
vb.pp("conv_out"),
)?;
Ok(Self {
conv_in,
down_blocks,
mid_block,
conv_norm_out,
conv_out,
})
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut h = xs.apply(&self.conv_in)?;
for block in &self.down_blocks {
h = h.apply(block)?;
}
h.apply(&self.mid_block)?
.apply(&self.conv_norm_out)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv_out)
}
}
// ==================== Decoder ====================
/// VAE Decoder
#[derive(Debug, Clone)]
pub struct Decoder {
conv_in: Conv2d,
mid_block: UNetMidBlock2D,
up_blocks: Vec<UpDecoderBlock2D>,
conv_norm_out: GroupNorm,
conv_out: Conv2d,
}
impl Decoder {
pub fn new(cfg: &VaeConfig, vb: VarBuilder) -> Result<Self> {
let conv_cfg = Conv2dConfig {
padding: 1,
..Default::default()
};
let mid_channels = *cfg.block_out_channels.last().unwrap();
let conv_in = conv2d(
cfg.latent_channels,
mid_channels,
3,
conv_cfg,
vb.pp("conv_in"),
)?;
let mid_block = UNetMidBlock2D::new(mid_channels, cfg.norm_num_groups, vb.pp("mid_block"))?;
// Decoder up_blocks order is reversed from encoder down_blocks
let reversed_channels: Vec<usize> = cfg.block_out_channels.iter().rev().cloned().collect();
let mut up_blocks = Vec::with_capacity(reversed_channels.len());
let vb_up = vb.pp("up_blocks");
for (i, &out_channels) in reversed_channels.iter().enumerate() {
let in_channels = if i == 0 {
mid_channels
} else {
reversed_channels[i - 1]
};
let add_upsample = i < reversed_channels.len() - 1;
up_blocks.push(UpDecoderBlock2D::new(
in_channels,
out_channels,
cfg.layers_per_block,
cfg.norm_num_groups,
add_upsample,
vb_up.pp(i),
)?);
}
let final_channels = *reversed_channels.last().unwrap();
let conv_norm_out = group_norm(
cfg.norm_num_groups,
final_channels,
1e-6,
vb.pp("conv_norm_out"),
)?;
let conv_out = conv2d(
final_channels,
cfg.out_channels,
3,
conv_cfg,
vb.pp("conv_out"),
)?;
Ok(Self {
conv_in,
mid_block,
up_blocks,
conv_norm_out,
conv_out,
})
}
}
impl Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut h = xs.apply(&self.conv_in)?.apply(&self.mid_block)?;
for block in &self.up_blocks {
h = h.apply(block)?;
}
h.apply(&self.conv_norm_out)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv_out)
}
}
// ==================== DiagonalGaussian ====================
/// Diagonal Gaussian distribution sampling (VAE reparameterization trick)
#[derive(Debug, Clone)]
pub struct DiagonalGaussian {
sample: bool,
}
impl DiagonalGaussian {
pub fn new(sample: bool) -> Self {
Self { sample }
}
}
impl Module for DiagonalGaussian {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let chunks = xs.chunk(2, 1)?; // Split along channel dimension
let mean = &chunks[0];
let logvar = &chunks[1];
if self.sample {
let std = (logvar * 0.5)?.exp()?;
mean + (std * mean.randn_like(0., 1.)?)?
} else {
Ok(mean.clone())
}
}
}
// ==================== AutoEncoderKL ====================
/// Z-Image VAE (AutoEncoderKL) - Diffusers Format
#[derive(Debug, Clone)]
pub struct AutoEncoderKL {
encoder: Encoder,
decoder: Decoder,
reg: DiagonalGaussian,
scale_factor: f64,
shift_factor: f64,
}
impl AutoEncoderKL {
pub fn new(cfg: &VaeConfig, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let decoder = Decoder::new(cfg, vb.pp("decoder"))?;
let reg = DiagonalGaussian::new(true);
Ok(Self {
encoder,
decoder,
reg,
scale_factor: cfg.scaling_factor,
shift_factor: cfg.shift_factor,
})
}
/// Encode image to latent space
/// xs: (B, 3, H, W) RGB image, range [-1, 1]
/// Returns: (B, latent_channels, H/8, W/8)
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let z = xs.apply(&self.encoder)?.apply(&self.reg)?;
(z - self.shift_factor)? * self.scale_factor
}
/// Decode latent to image
/// xs: (B, latent_channels, H/8, W/8)
/// Returns: (B, 3, H, W) RGB image, range [-1, 1]
pub fn decode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = ((xs / self.scale_factor)? + self.shift_factor)?;
xs.apply(&self.decoder)
}
/// Get scaling factor
pub fn scale_factor(&self) -> f64 {
self.scale_factor
}
/// Get shift factor
pub fn shift_factor(&self) -> f64 {
self.shift_factor
}
}
impl Module for AutoEncoderKL {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.decode(&self.encode(xs)?)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/z_image/mod.rs | candle-transformers/src/models/z_image/mod.rs | /*
* @Author: SpenserCai
* @Date: 2026-01-02 11:35:48
* @version:
* @LastEditors: SpenserCai
* @LastEditTime: 2026-01-02 11:48:26
* @Description: file content
*/
//! Z-Image Model
//!
//! Z-Image is a text-to-image generation model from Alibaba using Flow Matching.
//!
//! - 🤗 [Hugging Face Model](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo)
//! - [Official Website](https://z-image-turbo.org/)
//!
//! # Example
//!
//! ```bash
//! cargo run --features metal --example z_image --release -- \
//! --prompt "A beautiful landscape" --height 1024 --width 1024
//! ```
//!
//! # Architecture
//!
//! - Transformer: ~24B parameters, 30 main layers + 2 noise_refiner + 2 context_refiner
//! - Text Encoder: Qwen3 (hidden_size=2560, 36 layers)
//! - VAE: AutoencoderKL (diffusers format)
//! - Scheduler: FlowMatchEulerDiscreteScheduler (shift=3.0)
pub mod preprocess;
pub mod sampling;
pub mod scheduler;
pub mod text_encoder;
pub mod transformer;
pub mod vae;
// Re-export main types
pub use preprocess::{prepare_inputs, PreparedInputs};
pub use sampling::{get_noise, get_schedule, postprocess_image};
pub use scheduler::{calculate_shift, FlowMatchEulerDiscreteScheduler, SchedulerConfig};
pub use text_encoder::{TextEncoderConfig, ZImageTextEncoder};
pub use transformer::{Config, ZImageTransformer2DModel};
pub use vae::{AutoEncoderKL, VaeConfig};
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/z_image/preprocess.rs | candle-transformers/src/models/z_image/preprocess.rs | //! Input preprocessing utilities for Z-Image
//!
//! Provides padding and mask construction to convert variable-length inputs
//! into fixed-shape batch tensors.
use candle::{DType, Device, Result, Tensor};
use super::transformer::SEQ_MULTI_OF;
/// Preprocessed inputs structure
#[derive(Debug, Clone)]
pub struct PreparedInputs {
/// Latent tensor (B, C, 1, H, W)
pub latents: Tensor,
/// Padded caption features (B, max_text_len, dim)
pub cap_feats: Tensor,
/// Caption attention mask (B, max_text_len), 1=valid, 0=padding
pub cap_mask: Tensor,
/// Original text lengths for each sample
pub text_lengths: Vec<usize>,
}
/// Compute padding length to align to SEQ_MULTI_OF
#[inline]
pub fn compute_padding_len(ori_len: usize) -> usize {
(SEQ_MULTI_OF - (ori_len % SEQ_MULTI_OF)) % SEQ_MULTI_OF
}
/// Pad variable-length text embeddings to uniform length
///
/// # Arguments
/// * `text_embeddings` - Variable-length text embeddings, each of shape (seq_len, dim)
/// * `pad_value` - Padding value (typically 0.0)
/// * `device` - Device
///
/// # Returns
/// * Padded tensor (B, max_len, dim)
/// * Attention mask (B, max_len), 1=valid, 0=padding
/// * Original lengths
pub fn pad_text_embeddings(
text_embeddings: &[Tensor],
pad_value: f32,
device: &Device,
) -> Result<(Tensor, Tensor, Vec<usize>)> {
if text_embeddings.is_empty() {
candle::bail!("text_embeddings cannot be empty");
}
let batch_size = text_embeddings.len();
let dim = text_embeddings[0].dim(1)?;
let dtype = text_embeddings[0].dtype();
// Compute max length and align to SEQ_MULTI_OF
let lengths: Vec<usize> = text_embeddings
.iter()
.map(|t| t.dim(0))
.collect::<Result<Vec<_>>>()?;
let max_len = *lengths.iter().max().unwrap();
let padded_len = max_len + compute_padding_len(max_len);
// Build padded tensor and mask
let mut padded_list = Vec::with_capacity(batch_size);
let mut mask_list = Vec::with_capacity(batch_size);
for (i, emb) in text_embeddings.iter().enumerate() {
let seq_len = lengths[i];
let pad_len = padded_len - seq_len;
// Pad embedding
let padded = if pad_len > 0 {
let padding = Tensor::full(pad_value, (pad_len, dim), device)?.to_dtype(dtype)?;
Tensor::cat(&[emb, &padding], 0)?
} else {
emb.clone()
};
padded_list.push(padded);
// Create mask: 1 for valid, 0 for padding
let valid = Tensor::ones((seq_len,), DType::U8, device)?;
let mask = if pad_len > 0 {
let invalid = Tensor::zeros((pad_len,), DType::U8, device)?;
Tensor::cat(&[&valid, &invalid], 0)?
} else {
valid
};
mask_list.push(mask);
}
// Stack into batch
let cap_feats = Tensor::stack(&padded_list, 0)?;
let cap_mask = Tensor::stack(&mask_list, 0)?;
Ok((cap_feats, cap_mask, lengths))
}
/// Prepare all inputs, converting variable-length inputs to fixed-shape batch tensors
///
/// # Arguments
/// * `latents` - Latent tensor (B, C, H, W)
/// * `text_embeddings` - Variable-length text embeddings, each of shape (seq_len, cap_feat_dim)
/// * `device` - Device
///
/// # Returns
/// PreparedInputs containing all preprocessed tensors
pub fn prepare_inputs(
latents: &Tensor,
text_embeddings: &[Tensor],
device: &Device,
) -> Result<PreparedInputs> {
// Latents: (B, C, H, W) -> (B, C, 1, H, W) add frame dimension
let latents = latents.unsqueeze(2)?;
// Pad text embeddings
let (cap_feats, cap_mask, text_lengths) = pad_text_embeddings(text_embeddings, 0.0, device)?;
Ok(PreparedInputs {
latents,
cap_feats,
cap_mask,
text_lengths,
})
}
/// Create attention mask for a single sample
/// Useful for testing or simplified scenarios
pub fn create_attention_mask(
valid_len: usize,
total_len: usize,
device: &Device,
) -> Result<Tensor> {
let valid = Tensor::ones((valid_len,), DType::U8, device)?;
if valid_len < total_len {
let invalid = Tensor::zeros((total_len - valid_len,), DType::U8, device)?;
Tensor::cat(&[&valid, &invalid], 0)
} else {
Ok(valid)
}
}
/// Create a batch of uniform text embeddings
///
/// # Arguments
/// * `text_embedding` - Single text embedding (seq_len, dim)
/// * `batch_size` - Number of copies to create
///
/// # Returns
/// Batched text embeddings (batch_size, seq_len, dim)
pub fn batch_text_embedding(text_embedding: &Tensor, batch_size: usize) -> Result<Tensor> {
let (seq_len, dim) = text_embedding.dims2()?;
text_embedding
.unsqueeze(0)?
.broadcast_as((batch_size, seq_len, dim))?
.contiguous()
}
/// Create a batch of uniform masks
///
/// # Arguments
/// * `mask` - Single mask (seq_len,)
/// * `batch_size` - Number of copies to create
///
/// # Returns
/// Batched masks (batch_size, seq_len)
pub fn batch_mask(mask: &Tensor, batch_size: usize) -> Result<Tensor> {
let seq_len = mask.dim(0)?;
mask.unsqueeze(0)?
.broadcast_as((batch_size, seq_len))?
.contiguous()
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/z_image/transformer.rs | candle-transformers/src/models/z_image/transformer.rs | //! Z-Image Transformer (ZImageTransformer2DModel)
//!
//! Core transformer implementation for Z-Image text-to-image generation.
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{linear, linear_no_bias, VarBuilder};
use crate::models::with_tracing::RmsNorm;
// ==================== Flash Attention Wrapper ====================
/// Flash Attention wrapper for CUDA platform
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
#[allow(dead_code)]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
candle::bail!("flash-attn feature not enabled, compile with '--features flash-attn'")
}
// ==================== Constants ====================
/// AdaLN embedding dimension (256)
pub const ADALN_EMBED_DIM: usize = 256;
/// Sequence padding alignment (32)
pub const SEQ_MULTI_OF: usize = 32;
/// Frequency embedding size for timestep encoding
pub const FREQUENCY_EMBEDDING_SIZE: usize = 256;
/// Max period for sinusoidal encoding
pub const MAX_PERIOD: f64 = 10000.0;
// ==================== Config ====================
/// Z-Image Transformer configuration
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
#[serde(default = "default_patch_size")]
pub all_patch_size: Vec<usize>,
#[serde(default = "default_f_patch_size")]
pub all_f_patch_size: Vec<usize>,
#[serde(default = "default_in_channels")]
pub in_channels: usize,
#[serde(default = "default_dim")]
pub dim: usize,
#[serde(default = "default_n_layers")]
pub n_layers: usize,
#[serde(default = "default_n_refiner_layers")]
pub n_refiner_layers: usize,
#[serde(default = "default_n_heads")]
pub n_heads: usize,
#[serde(default = "default_n_kv_heads")]
pub n_kv_heads: usize,
#[serde(default = "default_norm_eps")]
pub norm_eps: f64,
#[serde(default = "default_qk_norm")]
pub qk_norm: bool,
#[serde(default = "default_cap_feat_dim")]
pub cap_feat_dim: usize,
#[serde(default = "default_rope_theta")]
pub rope_theta: f64,
#[serde(default = "default_t_scale")]
pub t_scale: f64,
#[serde(default = "default_axes_dims")]
pub axes_dims: Vec<usize>,
#[serde(default = "default_axes_lens")]
pub axes_lens: Vec<usize>,
/// Whether to use accelerated attention (CUDA flash-attn / Metal SDPA)
/// Default is true, automatically selects optimal implementation per platform
#[serde(default = "default_use_accelerated_attn")]
pub use_accelerated_attn: bool,
}
fn default_use_accelerated_attn() -> bool {
true
}
fn default_patch_size() -> Vec<usize> {
vec![2]
}
fn default_f_patch_size() -> Vec<usize> {
vec![1]
}
fn default_in_channels() -> usize {
16
}
fn default_dim() -> usize {
3840
}
fn default_n_layers() -> usize {
30
}
fn default_n_refiner_layers() -> usize {
2
}
fn default_n_heads() -> usize {
30
}
fn default_n_kv_heads() -> usize {
30
}
fn default_norm_eps() -> f64 {
1e-5
}
fn default_qk_norm() -> bool {
true
}
fn default_cap_feat_dim() -> usize {
2560
}
fn default_rope_theta() -> f64 {
256.0
}
fn default_t_scale() -> f64 {
1000.0
}
fn default_axes_dims() -> Vec<usize> {
vec![32, 48, 48]
}
fn default_axes_lens() -> Vec<usize> {
vec![1536, 512, 512]
}
impl Config {
/// Create configuration for Z-Image Turbo model
pub fn z_image_turbo() -> Self {
Self {
all_patch_size: vec![2],
all_f_patch_size: vec![1],
in_channels: 16,
dim: 3840,
n_layers: 30,
n_refiner_layers: 2,
n_heads: 30,
n_kv_heads: 30,
norm_eps: 1e-5,
qk_norm: true,
cap_feat_dim: 2560,
rope_theta: 256.0,
t_scale: 1000.0,
axes_dims: vec![32, 48, 48],
axes_lens: vec![1536, 512, 512],
use_accelerated_attn: true,
}
}
/// Set whether to use accelerated attention (for debugging)
pub fn set_use_accelerated_attn(&mut self, enabled: bool) {
self.use_accelerated_attn = enabled;
}
/// Get head dimension
pub fn head_dim(&self) -> usize {
self.dim / self.n_heads
}
/// Get hidden dimension for FFN
/// Matches Python: int(dim / 3 * 8) = 10240 for dim=3840
pub fn hidden_dim(&self) -> usize {
(self.dim / 3) * 8
}
}
// ==================== TimestepEmbedder ====================
/// Timestep embedding using sinusoidal encoding + MLP
#[derive(Debug, Clone)]
pub struct TimestepEmbedder {
linear1: candle_nn::Linear,
linear2: candle_nn::Linear,
frequency_embedding_size: usize,
}
impl TimestepEmbedder {
pub fn new(out_size: usize, mid_size: usize, vb: VarBuilder) -> Result<Self> {
let linear1 = linear(FREQUENCY_EMBEDDING_SIZE, mid_size, vb.pp("mlp").pp("0"))?;
let linear2 = linear(mid_size, out_size, vb.pp("mlp").pp("2"))?;
Ok(Self {
linear1,
linear2,
frequency_embedding_size: FREQUENCY_EMBEDDING_SIZE,
})
}
fn timestep_embedding(&self, t: &Tensor, device: &Device, dtype: DType) -> Result<Tensor> {
let half = self.frequency_embedding_size / 2;
let freqs = Tensor::arange(0u32, half as u32, device)?.to_dtype(DType::F32)?;
let freqs = (freqs * (-MAX_PERIOD.ln() / half as f64))?.exp()?;
let args = t
.unsqueeze(1)?
.to_dtype(DType::F32)?
.broadcast_mul(&freqs.unsqueeze(0)?)?;
let embedding = Tensor::cat(&[args.cos()?, args.sin()?], D::Minus1)?;
embedding.to_dtype(dtype)
}
pub fn forward(&self, t: &Tensor) -> Result<Tensor> {
let device = t.device();
let dtype = self.linear1.weight().dtype();
let t_freq = self.timestep_embedding(t, device, dtype)?;
t_freq.apply(&self.linear1)?.silu()?.apply(&self.linear2)
}
}
// ==================== FeedForward (SwiGLU) ====================
/// SwiGLU feedforward network
#[derive(Debug, Clone)]
pub struct FeedForward {
w1: candle_nn::Linear,
w2: candle_nn::Linear,
w3: candle_nn::Linear,
}
impl FeedForward {
pub fn new(dim: usize, hidden_dim: usize, vb: VarBuilder) -> Result<Self> {
let w1 = linear_no_bias(dim, hidden_dim, vb.pp("w1"))?;
let w2 = linear_no_bias(hidden_dim, dim, vb.pp("w2"))?;
let w3 = linear_no_bias(dim, hidden_dim, vb.pp("w3"))?;
Ok(Self { w1, w2, w3 })
}
}
impl Module for FeedForward {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x1 = x.apply(&self.w1)?.silu()?;
let x3 = x.apply(&self.w3)?;
(x1 * x3)?.apply(&self.w2)
}
}
// ==================== QkNorm ====================
/// QK normalization using RMSNorm
#[derive(Debug, Clone)]
pub struct QkNorm {
norm_q: RmsNorm,
norm_k: RmsNorm,
}
impl QkNorm {
pub fn new(head_dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let norm_q = RmsNorm::new(head_dim, eps, vb.pp("norm_q"))?;
let norm_k = RmsNorm::new(head_dim, eps, vb.pp("norm_k"))?;
Ok(Self { norm_q, norm_k })
}
pub fn forward(&self, q: &Tensor, k: &Tensor) -> Result<(Tensor, Tensor)> {
// q, k shape: (B, seq_len, n_heads, head_dim)
let q = self.norm_q.forward(q)?;
let k = self.norm_k.forward(k)?;
Ok((q, k))
}
}
// ==================== RopeEmbedder (3D) ====================
/// 3D Rotary Position Embedding for video/image generation
#[derive(Debug, Clone)]
pub struct RopeEmbedder {
#[allow(dead_code)]
theta: f64,
axes_dims: Vec<usize>,
#[allow(dead_code)]
axes_lens: Vec<usize>,
/// Pre-computed cos cache per axis
cos_cached: Vec<Tensor>,
/// Pre-computed sin cache per axis
sin_cached: Vec<Tensor>,
}
impl RopeEmbedder {
pub fn new(
theta: f64,
axes_dims: Vec<usize>,
axes_lens: Vec<usize>,
device: &Device,
dtype: DType,
) -> Result<Self> {
assert_eq!(axes_dims.len(), axes_lens.len());
let mut cos_cached = Vec::with_capacity(axes_dims.len());
let mut sin_cached = Vec::with_capacity(axes_dims.len());
for (d, e) in axes_dims.iter().zip(axes_lens.iter()) {
let half_d = d / 2;
let inv_freq: Vec<f32> = (0..half_d)
.map(|i| 1.0 / (theta as f32).powf((2 * i) as f32 / *d as f32))
.collect();
let inv_freq = Tensor::from_vec(inv_freq, half_d, device)?;
let positions = Tensor::arange(0u32, *e as u32, device)?.to_dtype(DType::F32)?;
let freqs = positions
.unsqueeze(1)?
.broadcast_mul(&inv_freq.unsqueeze(0)?)?;
cos_cached.push(freqs.cos()?.to_dtype(dtype)?);
sin_cached.push(freqs.sin()?.to_dtype(dtype)?);
}
Ok(Self {
theta,
axes_dims,
axes_lens,
cos_cached,
sin_cached,
})
}
/// Get RoPE cos/sin from position IDs
/// ids: (seq_len, 3) - [frame_id, height_id, width_id]
pub fn forward(&self, ids: &Tensor) -> Result<(Tensor, Tensor)> {
let mut cos_parts = Vec::with_capacity(self.axes_dims.len());
let mut sin_parts = Vec::with_capacity(self.axes_dims.len());
for (i, _) in self.axes_dims.iter().enumerate() {
let axis_ids = ids.i((.., i))?.contiguous()?; // (seq_len,) - must be contiguous for Metal
let cos_i = self.cos_cached[i].index_select(&axis_ids, 0)?;
let sin_i = self.sin_cached[i].index_select(&axis_ids, 0)?;
cos_parts.push(cos_i);
sin_parts.push(sin_i);
}
let cos = Tensor::cat(&cos_parts, D::Minus1)?; // (seq_len, head_dim/2)
let sin = Tensor::cat(&sin_parts, D::Minus1)?;
Ok((cos, sin))
}
}
/// Apply RoPE (real-number form, equivalent to PyTorch complex multiplication)
///
/// x: (B, seq_len, n_heads, head_dim)
/// cos, sin: (seq_len, head_dim/2)
pub fn apply_rotary_emb(x: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> {
let (b, seq_len, n_heads, head_dim) = x.dims4()?;
let half_dim = head_dim / 2;
// Reshape x to interleaved real/imag form: (B, seq_len, n_heads, half_dim, 2)
let x = x.reshape((b, seq_len, n_heads, half_dim, 2))?;
// Extract real and imag parts
let x_real = x.i((.., .., .., .., 0))?; // (B, seq_len, n_heads, half_dim)
let x_imag = x.i((.., .., .., .., 1))?;
// Expand cos/sin for broadcasting: (seq_len, half_dim) -> (1, seq_len, 1, half_dim)
let cos = cos.unsqueeze(0)?.unsqueeze(2)?;
let sin = sin.unsqueeze(0)?.unsqueeze(2)?;
// Complex multiplication: (a + bi)(c + di) = (ac - bd) + (ad + bc)i
let y_real = (x_real.broadcast_mul(&cos)? - x_imag.broadcast_mul(&sin)?)?;
let y_imag = (x_real.broadcast_mul(&sin)? + x_imag.broadcast_mul(&cos)?)?;
// Interleave back
Tensor::stack(&[y_real, y_imag], D::Minus1)?.reshape((b, seq_len, n_heads, head_dim))
}
// ==================== ZImageAttention ====================
/// Z-Image attention with QK normalization and 3D RoPE
#[derive(Debug, Clone)]
pub struct ZImageAttention {
to_q: candle_nn::Linear,
to_k: candle_nn::Linear,
to_v: candle_nn::Linear,
to_out: candle_nn::Linear,
qk_norm: Option<QkNorm>,
n_heads: usize,
head_dim: usize,
use_accelerated_attn: bool,
}
impl ZImageAttention {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dim = cfg.dim;
let n_heads = cfg.n_heads;
let head_dim = cfg.head_dim();
let to_q = linear_no_bias(dim, n_heads * head_dim, vb.pp("to_q"))?;
let to_k = linear_no_bias(dim, cfg.n_kv_heads * head_dim, vb.pp("to_k"))?;
let to_v = linear_no_bias(dim, cfg.n_kv_heads * head_dim, vb.pp("to_v"))?;
let to_out = linear_no_bias(n_heads * head_dim, dim, vb.pp("to_out").pp("0"))?;
let qk_norm = if cfg.qk_norm {
Some(QkNorm::new(head_dim, 1e-5, vb.clone())?)
} else {
None
};
Ok(Self {
to_q,
to_k,
to_v,
to_out,
qk_norm,
n_heads,
head_dim,
use_accelerated_attn: cfg.use_accelerated_attn,
})
}
pub fn forward(
&self,
hidden_states: &Tensor,
attention_mask: Option<&Tensor>,
cos: &Tensor,
sin: &Tensor,
) -> Result<Tensor> {
let (b, seq_len, _) = hidden_states.dims3()?;
// Project to Q, K, V
let q = hidden_states.apply(&self.to_q)?;
let k = hidden_states.apply(&self.to_k)?;
let v = hidden_states.apply(&self.to_v)?;
// Reshape: (B, seq_len, n_heads * head_dim) -> (B, seq_len, n_heads, head_dim)
let q = q.reshape((b, seq_len, self.n_heads, self.head_dim))?;
let k = k.reshape((b, seq_len, self.n_heads, self.head_dim))?;
let v = v.reshape((b, seq_len, self.n_heads, self.head_dim))?;
// Apply QK norm
let (q, k) = if let Some(ref norm) = self.qk_norm {
norm.forward(&q, &k)?
} else {
(q, k)
};
// Apply RoPE
let q = apply_rotary_emb(&q, cos, sin)?;
let k = apply_rotary_emb(&k, cos, sin)?;
// Transpose for attention: (B, n_heads, seq_len, head_dim)
let q = q.transpose(1, 2)?.contiguous()?;
let k = k.transpose(1, 2)?.contiguous()?;
let v = v.transpose(1, 2)?.contiguous()?;
let scale = 1.0 / (self.head_dim as f64).sqrt();
let device = hidden_states.device();
// Cross-platform attention dispatch
let context = self.attention_dispatch(&q, &k, &v, attention_mask, scale, device)?;
// Reshape back: (B, n_heads, seq_len, head_dim) -> (B, seq_len, dim)
let context = context.transpose(1, 2)?.reshape((b, seq_len, ()))?;
context.apply(&self.to_out)
}
/// Cross-platform attention dispatch
fn attention_dispatch(
&self,
q: &Tensor,
k: &Tensor,
v: &Tensor,
mask: Option<&Tensor>,
scale: f64,
device: &Device,
) -> Result<Tensor> {
// If acceleration disabled, use basic implementation
if !self.use_accelerated_attn {
return self.attention_basic(q, k, v, mask, scale);
}
// Platform dispatch: prefer optimal implementation per platform
if device.is_cuda() {
self.attention_cuda(q, k, v, mask, scale)
} else if device.is_metal() {
self.attention_metal(q, k, v, mask, scale)
} else {
// CPU fallback
self.attention_basic(q, k, v, mask, scale)
}
}
/// CUDA: Use Flash Attention
#[allow(unused_variables)]
fn attention_cuda(
&self,
q: &Tensor,
k: &Tensor,
v: &Tensor,
mask: Option<&Tensor>,
scale: f64,
) -> Result<Tensor> {
#[cfg(feature = "flash-attn")]
{
// flash_attn does not directly support custom mask
// Fallback to basic implementation when mask is present
if mask.is_some() {
return self.attention_basic(q, k, v, mask, scale);
}
// flash_attn input format: (batch, seq_len, num_heads, head_size)
// Current format: (batch, num_heads, seq_len, head_size)
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let result = flash_attn(&q, &k, &v, scale as f32, false)?;
result.transpose(1, 2)
}
#[cfg(not(feature = "flash-attn"))]
{
// flash-attn not compiled, fallback to basic
self.attention_basic(q, k, v, mask, scale)
}
}
/// Metal: Use fused SDPA kernel
fn attention_metal(
&self,
q: &Tensor,
k: &Tensor,
v: &Tensor,
mask: Option<&Tensor>,
scale: f64,
) -> Result<Tensor> {
// Prepare SDPA format mask
let sdpa_mask = self.prepare_sdpa_mask(mask, q)?;
// candle_nn::ops::sdpa
// Input format: (bs, qhead, seq, hidden) - matches current format
// Supports: BF16/F16/F32, head_dim=128
candle_nn::ops::sdpa(q, k, v, sdpa_mask.as_ref(), false, scale as f32, 1.0)
}
/// Fallback implementation
fn attention_basic(
&self,
q: &Tensor,
k: &Tensor,
v: &Tensor,
mask: Option<&Tensor>,
scale: f64,
) -> Result<Tensor> {
let mut attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(m) = mask {
// mask: (B, seq_len) -> (B, 1, 1, seq_len)
let m = m.unsqueeze(1)?.unsqueeze(2)?;
let m = m.to_dtype(attn_weights.dtype())?;
// 1=valid, 0=padding -> 0=valid, -inf=padding
let m = ((m - 1.0)? * 1e9)?;
attn_weights = attn_weights.broadcast_add(&m)?;
}
let attn_probs = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_probs.matmul(v)
}
/// Prepare SDPA format mask
fn prepare_sdpa_mask(&self, mask: Option<&Tensor>, q: &Tensor) -> Result<Option<Tensor>> {
match mask {
Some(m) => {
// mask: (B, seq_len) -> (B, n_heads, seq_len, seq_len)
let (b, _, seq_len, _) = q.dims4()?;
let m = m.unsqueeze(1)?.unsqueeze(2)?;
let m = m.to_dtype(q.dtype())?;
// SDPA uses additive mask: 0=valid, -inf=masked
let m = ((m - 1.0)? * 1e9)?;
// broadcast to (B, n_heads, seq_len, seq_len)
let m = m.broadcast_as((b, self.n_heads, seq_len, seq_len))?;
Ok(Some(m))
}
None => Ok(None),
}
}
}
// ==================== ZImageTransformerBlock ====================
/// Z-Image transformer block with optional AdaLN modulation
#[derive(Debug, Clone)]
pub struct ZImageTransformerBlock {
attention: ZImageAttention,
feed_forward: FeedForward,
attention_norm1: RmsNorm,
attention_norm2: RmsNorm,
ffn_norm1: RmsNorm,
ffn_norm2: RmsNorm,
adaln_modulation: Option<candle_nn::Linear>,
}
impl ZImageTransformerBlock {
pub fn new(cfg: &Config, modulation: bool, vb: VarBuilder) -> Result<Self> {
let dim = cfg.dim;
let hidden_dim = cfg.hidden_dim();
let attention = ZImageAttention::new(cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(dim, hidden_dim, vb.pp("feed_forward"))?;
let attention_norm1 = RmsNorm::new(dim, cfg.norm_eps, vb.pp("attention_norm1"))?;
let attention_norm2 = RmsNorm::new(dim, cfg.norm_eps, vb.pp("attention_norm2"))?;
let ffn_norm1 = RmsNorm::new(dim, cfg.norm_eps, vb.pp("ffn_norm1"))?;
let ffn_norm2 = RmsNorm::new(dim, cfg.norm_eps, vb.pp("ffn_norm2"))?;
let adaln_modulation = if modulation {
let adaln_dim = dim.min(ADALN_EMBED_DIM);
Some(linear(
adaln_dim,
4 * dim,
vb.pp("adaLN_modulation").pp("0"),
)?)
} else {
None
};
Ok(Self {
attention,
feed_forward,
attention_norm1,
attention_norm2,
ffn_norm1,
ffn_norm2,
adaln_modulation,
})
}
pub fn forward(
&self,
x: &Tensor,
attn_mask: Option<&Tensor>,
cos: &Tensor,
sin: &Tensor,
adaln_input: Option<&Tensor>,
) -> Result<Tensor> {
if let Some(ref adaln) = self.adaln_modulation {
let adaln_input = adaln_input.expect("adaln_input required when modulation=true");
// (B, 256) -> (B, 4*dim) -> (B, 1, 4*dim) -> chunk into 4
let modulation = adaln_input.apply(adaln)?.unsqueeze(1)?;
let chunks = modulation.chunk(4, D::Minus1)?;
let (scale_msa, gate_msa, scale_mlp, gate_mlp) =
(&chunks[0], &chunks[1], &chunks[2], &chunks[3]);
// Apply tanh gate
let gate_msa = gate_msa.tanh()?;
let gate_mlp = gate_mlp.tanh()?;
let scale_msa = (scale_msa + 1.0)?;
let scale_mlp = (scale_mlp + 1.0)?;
// Attention block
let normed = self.attention_norm1.forward(x)?;
let scaled = normed.broadcast_mul(&scale_msa)?;
let attn_out = self.attention.forward(&scaled, attn_mask, cos, sin)?;
let attn_out = self.attention_norm2.forward(&attn_out)?;
let x = (x + gate_msa.broadcast_mul(&attn_out)?)?;
// FFN block
let normed = self.ffn_norm1.forward(&x)?;
let scaled = normed.broadcast_mul(&scale_mlp)?;
let ffn_out = self.feed_forward.forward(&scaled)?;
let ffn_out = self.ffn_norm2.forward(&ffn_out)?;
x + gate_mlp.broadcast_mul(&ffn_out)?
} else {
// Without modulation
let normed = self.attention_norm1.forward(x)?;
let attn_out = self.attention.forward(&normed, attn_mask, cos, sin)?;
let attn_out = self.attention_norm2.forward(&attn_out)?;
let x = (x + attn_out)?;
let normed = self.ffn_norm1.forward(&x)?;
let ffn_out = self.feed_forward.forward(&normed)?;
let ffn_out = self.ffn_norm2.forward(&ffn_out)?;
x + ffn_out
}
}
}
// ==================== FinalLayer ====================
/// LayerNorm without learnable parameters (elementwise_affine=False)
#[derive(Debug, Clone)]
pub struct LayerNormNoParams {
eps: f64,
}
impl LayerNormNoParams {
pub fn new(eps: f64) -> Self {
Self { eps }
}
}
impl Module for LayerNormNoParams {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
// Subtract mean
let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x = x.broadcast_sub(&mean_x)?;
// Divide by std
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed.to_dtype(x_dtype)
}
}
/// Final layer for output projection
#[derive(Debug, Clone)]
pub struct FinalLayer {
norm_final: LayerNormNoParams,
linear: candle_nn::Linear,
adaln_silu: candle_nn::Linear,
}
impl FinalLayer {
pub fn new(hidden_size: usize, out_channels: usize, vb: VarBuilder) -> Result<Self> {
let norm_final = LayerNormNoParams::new(1e-6);
let linear = candle_nn::linear(hidden_size, out_channels, vb.pp("linear"))?;
let adaln_dim = hidden_size.min(ADALN_EMBED_DIM);
let adaln_silu =
candle_nn::linear(adaln_dim, hidden_size, vb.pp("adaLN_modulation").pp("1"))?;
Ok(Self {
norm_final,
linear,
adaln_silu,
})
}
pub fn forward(&self, x: &Tensor, c: &Tensor) -> Result<Tensor> {
let scale = c.silu()?.apply(&self.adaln_silu)?;
let scale = (scale + 1.0)?.unsqueeze(1)?;
let x = self.norm_final.forward(x)?.broadcast_mul(&scale)?;
x.apply(&self.linear)
}
}
// ==================== Patchify / Unpatchify ====================
/// Convert image to patch sequence
/// Matches Python: image.view(C, F_t, pF, H_t, pH, W_t, pW).permute(1,3,5,2,4,6,0)
///
/// For Z-Image with F=1, pF=1, we optimize to use 6D operations.
/// input: (B, C, 1, H, W)
/// output: (B, num_patches, patch_dim), (F, H, W) original size
pub fn patchify(
x: &Tensor,
patch_size: usize,
f_patch_size: usize,
) -> Result<(Tensor, (usize, usize, usize))> {
let (b, c, f, h, w) = x.dims5()?;
let ph = patch_size;
let pw = patch_size;
let pf = f_patch_size;
let f_tokens = f / pf;
let h_tokens = h / ph;
let w_tokens = w / pw;
let num_patches = f_tokens * h_tokens * w_tokens;
let patch_dim = pf * ph * pw * c;
// For F=1, pF=1 case (image generation), use optimized 6D path
if f == 1 && pf == 1 {
// Step 1: Squeeze F dimension: (B, C, 1, H, W) -> (B, C, H, W)
let x = x.squeeze(2)?;
// Step 2: Reshape H into (H_tokens, pH): (B, C, H, W) -> (B, C, H_t, pH, W)
let x = x.reshape((b, c, h_tokens, ph, w))?;
// Step 3: Reshape W into (W_tokens, pW): (B, C, H_t, pH, W) -> (B, C, H_t, pH, W_t, pW)
let x = x.reshape((b, c, h_tokens, ph, w_tokens, pw))?;
// Step 4: Permute to match Python: (C, H_t, pH, W_t, pW) -> (H_t, W_t, pH, pW, C)
// For batch: (B, C, H_t, pH, W_t, pW) -> (B, H_t, W_t, pH, pW, C)
// Permutation: (0, 2, 4, 3, 5, 1)
let x = x.permute((0, 2, 4, 3, 5, 1))?;
// Step 5: Reshape to patches: (B, H_t, W_t, pH, pW, C) -> (B, H_t*W_t, pH*pW*C)
let x = x.reshape((b, num_patches, patch_dim))?;
Ok((x, (f, h, w)))
} else {
// General case: use contiguous + reshape approach
// This is less common for Z-Image image generation
let x = x.permute((0, 2, 3, 4, 1))?.contiguous()?; // (B, F, H, W, C)
let x = x.reshape((b, f_tokens, pf, h_tokens, ph, w_tokens * pw * c))?;
let x = x.permute((0, 1, 3, 5, 2, 4))?.contiguous()?;
let x = x.reshape((b, num_patches, patch_dim))?;
Ok((x, (f, h, w)))
}
}
/// Convert patch sequence back to image
/// Matches Python: x.view(F_t, H_t, W_t, pF, pH, pW, C).permute(6,0,3,1,4,2,5)
///
/// For Z-Image with F=1, pF=1, we optimize to use 6D operations.
/// input: (B, seq_len, patch_dim)
/// output: (B, C, F, H, W)
pub fn unpatchify(
x: &Tensor,
size: (usize, usize, usize),
patch_size: usize,
f_patch_size: usize,
out_channels: usize,
) -> Result<Tensor> {
let (f, h, w) = size;
let ph = patch_size;
let pw = patch_size;
let pf = f_patch_size;
let f_tokens = f / pf;
let h_tokens = h / ph;
let w_tokens = w / pw;
let ori_len = f_tokens * h_tokens * w_tokens;
let (b, _, _) = x.dims3()?;
let x = x.narrow(1, 0, ori_len)?; // Remove padding
// For F=1, pF=1 case (image generation), use optimized 6D path
if f == 1 && pf == 1 {
// Step 1: Reshape to (B, H_t, W_t, pH, pW, C)
let x = x.reshape((b, h_tokens, w_tokens, ph, pw, out_channels))?;
// Step 2: Permute to match Python: (H_t, W_t, pH, pW, C) -> (C, H_t, pH, W_t, pW)
// For batch: (B, H_t, W_t, pH, pW, C) -> (B, C, H_t, pH, W_t, pW)
// Permutation: (0, 5, 1, 3, 2, 4)
let x = x.permute((0, 5, 1, 3, 2, 4))?;
// Step 3: Reshape to combine H and W: (B, C, H_t, pH, W_t, pW) -> (B, C, H, W)
let x = x.reshape((b, out_channels, h, w))?;
// Step 4: Add back F dimension: (B, C, H, W) -> (B, C, 1, H, W)
let x = x.unsqueeze(2)?;
Ok(x)
} else {
// General case
let x = x.reshape((b, f_tokens, h_tokens, w_tokens, pf * ph * pw * out_channels))?;
let x = x.reshape((b, f_tokens, h_tokens, w_tokens * pf, ph, pw * out_channels))?;
let x = x.permute((0, 5, 1, 3, 2, 4))?.contiguous()?;
let x = x.reshape((b, out_channels, f, h, w))?;
Ok(x)
}
}
/// Create 3D coordinate grid for RoPE position IDs
/// size: (F, H, W)
/// start: (f0, h0, w0)
/// output: (F*H*W, 3)
pub fn create_coordinate_grid(
size: (usize, usize, usize),
start: (usize, usize, usize),
device: &Device,
) -> Result<Tensor> {
let (f, h, w) = size;
let (f0, h0, w0) = start;
let mut coords = Vec::with_capacity(f * h * w * 3);
for fi in 0..f {
for hi in 0..h {
for wi in 0..w {
coords.push((f0 + fi) as u32);
coords.push((h0 + hi) as u32);
coords.push((w0 + wi) as u32);
}
}
}
Tensor::from_vec(coords, (f * h * w, 3), device)
}
// ==================== ZImageTransformer2DModel ====================
/// Z-Image Transformer 2D Model
#[derive(Debug, Clone)]
pub struct ZImageTransformer2DModel {
t_embedder: TimestepEmbedder,
cap_embedder_norm: RmsNorm,
cap_embedder_linear: candle_nn::Linear,
x_embedder: candle_nn::Linear,
final_layer: FinalLayer,
#[allow(dead_code)]
x_pad_token: Tensor,
#[allow(dead_code)]
cap_pad_token: Tensor,
noise_refiner: Vec<ZImageTransformerBlock>,
context_refiner: Vec<ZImageTransformerBlock>,
layers: Vec<ZImageTransformerBlock>,
rope_embedder: RopeEmbedder,
cfg: Config,
}
impl ZImageTransformer2DModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let device = vb.device();
let dtype = vb.dtype();
// TimestepEmbedder
let adaln_dim = cfg.dim.min(ADALN_EMBED_DIM);
let t_embedder = TimestepEmbedder::new(adaln_dim, 1024, vb.pp("t_embedder"))?;
// Caption embedder
let cap_embedder_norm = RmsNorm::new(
cfg.cap_feat_dim,
cfg.norm_eps,
vb.pp("cap_embedder").pp("0"),
)?;
let cap_embedder_linear = linear(cfg.cap_feat_dim, cfg.dim, vb.pp("cap_embedder").pp("1"))?;
// Patch embedder (assuming patch_size=2, f_patch_size=1)
let patch_dim = cfg.all_f_patch_size[0]
* cfg.all_patch_size[0]
* cfg.all_patch_size[0]
* cfg.in_channels;
let x_embedder = linear(patch_dim, cfg.dim, vb.pp("all_x_embedder").pp("2-1"))?;
// Final layer
let out_channels = cfg.all_patch_size[0]
* cfg.all_patch_size[0]
* cfg.all_f_patch_size[0]
* cfg.in_channels;
let final_layer =
FinalLayer::new(cfg.dim, out_channels, vb.pp("all_final_layer").pp("2-1"))?;
// Pad tokens
let x_pad_token = vb.get((1, cfg.dim), "x_pad_token")?;
let cap_pad_token = vb.get((1, cfg.dim), "cap_pad_token")?;
// Noise refiner (with modulation)
let mut noise_refiner = Vec::with_capacity(cfg.n_refiner_layers);
for i in 0..cfg.n_refiner_layers {
noise_refiner.push(ZImageTransformerBlock::new(
cfg,
true,
vb.pp("noise_refiner").pp(i),
)?);
}
// Context refiner (without modulation)
let mut context_refiner = Vec::with_capacity(cfg.n_refiner_layers);
for i in 0..cfg.n_refiner_layers {
context_refiner.push(ZImageTransformerBlock::new(
cfg,
false,
vb.pp("context_refiner").pp(i),
)?);
}
// Main layers (with modulation)
let mut layers = Vec::with_capacity(cfg.n_layers);
for i in 0..cfg.n_layers {
layers.push(ZImageTransformerBlock::new(
cfg,
true,
vb.pp("layers").pp(i),
)?);
}
// RoPE embedder
let rope_embedder = RopeEmbedder::new(
cfg.rope_theta,
cfg.axes_dims.clone(),
cfg.axes_lens.clone(),
device,
dtype,
)?;
Ok(Self {
t_embedder,
cap_embedder_norm,
cap_embedder_linear,
x_embedder,
final_layer,
x_pad_token,
cap_pad_token,
noise_refiner,
context_refiner,
layers,
rope_embedder,
cfg: cfg.clone(),
})
}
/// Forward pass
///
/// # Arguments
/// * `x` - Latent tensor (B, C, F, H, W)
/// * `t` - Timesteps [0, 1] (B,)
/// * `cap_feats` - Caption features (B, text_len, cap_feat_dim)
/// * `cap_mask` - Caption attention mask (B, text_len), 1=valid, 0=padding
pub fn forward(
&self,
x: &Tensor,
t: &Tensor,
cap_feats: &Tensor,
cap_mask: &Tensor,
) -> Result<Tensor> {
let device = x.device();
let (b, _c, f, h, w) = x.dims5()?;
let patch_size = self.cfg.all_patch_size[0];
let f_patch_size = self.cfg.all_f_patch_size[0];
// 1. Timestep embedding
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/z_image/scheduler.rs | candle-transformers/src/models/z_image/scheduler.rs | //! FlowMatch Euler Discrete Scheduler for Z-Image
//!
//! Implements the flow matching scheduler used in Z-Image generation.
use candle::{Result, Tensor};
/// FlowMatchEulerDiscreteScheduler configuration
#[derive(Debug, Clone, serde::Deserialize)]
pub struct SchedulerConfig {
#[serde(default = "default_num_train_timesteps")]
pub num_train_timesteps: usize,
#[serde(default = "default_shift")]
pub shift: f64,
#[serde(default)]
pub use_dynamic_shifting: bool,
}
fn default_num_train_timesteps() -> usize {
1000
}
fn default_shift() -> f64 {
3.0
}
impl Default for SchedulerConfig {
fn default() -> Self {
Self {
num_train_timesteps: default_num_train_timesteps(),
shift: default_shift(),
use_dynamic_shifting: false,
}
}
}
impl SchedulerConfig {
/// Create configuration for Z-Image Turbo
pub fn z_image_turbo() -> Self {
Self {
num_train_timesteps: 1000,
shift: 3.0,
use_dynamic_shifting: false,
}
}
}
/// FlowMatch Euler Discrete Scheduler
#[derive(Debug, Clone)]
pub struct FlowMatchEulerDiscreteScheduler {
/// Configuration
pub config: SchedulerConfig,
/// Timesteps for inference
pub timesteps: Vec<f64>,
/// Sigma values
pub sigmas: Vec<f64>,
/// Minimum sigma
pub sigma_min: f64,
/// Maximum sigma
pub sigma_max: f64,
/// Current step index
step_index: usize,
}
impl FlowMatchEulerDiscreteScheduler {
pub fn new(config: SchedulerConfig) -> Self {
let num_train_timesteps = config.num_train_timesteps;
let shift = config.shift;
// Generate initial sigmas
let timesteps: Vec<f64> = (1..=num_train_timesteps).rev().map(|t| t as f64).collect();
let sigmas: Vec<f64> = timesteps
.iter()
.map(|&t| t / num_train_timesteps as f64)
.collect();
// Apply shift
let sigmas: Vec<f64> = if !config.use_dynamic_shifting {
sigmas
.iter()
.map(|&s| shift * s / (1.0 + (shift - 1.0) * s))
.collect()
} else {
sigmas
};
let timesteps: Vec<f64> = sigmas
.iter()
.map(|&s| s * num_train_timesteps as f64)
.collect();
let sigma_max = sigmas[0];
let sigma_min = *sigmas.last().unwrap_or(&0.0);
Self {
config,
timesteps,
sigmas,
sigma_min,
sigma_max,
step_index: 0,
}
}
/// Set timesteps for inference
///
/// # Arguments
/// * `num_inference_steps` - Number of denoising steps
/// * `mu` - Optional time shift parameter (from calculate_shift)
pub fn set_timesteps(&mut self, num_inference_steps: usize, mu: Option<f64>) {
let sigma_max = self.sigmas[0];
let sigma_min = *self.sigmas.last().unwrap_or(&0.0);
// Linear interpolation to generate timesteps
let timesteps: Vec<f64> = (0..num_inference_steps)
.map(|i| {
let t = i as f64 / num_inference_steps as f64;
sigma_max * (1.0 - t) + sigma_min * t
})
.map(|s| s * self.config.num_train_timesteps as f64)
.collect();
let mut sigmas: Vec<f64> = timesteps
.iter()
.map(|&t| t / self.config.num_train_timesteps as f64)
.collect();
// Apply shift
if let Some(mu) = mu {
if self.config.use_dynamic_shifting {
// time_shift: exp(mu) / (exp(mu) + (1/t - 1))
sigmas = sigmas
.iter()
.map(|&t| {
if t <= 0.0 {
0.0
} else {
let e_mu = mu.exp();
e_mu / (e_mu + (1.0 / t - 1.0))
}
})
.collect();
}
} else if !self.config.use_dynamic_shifting {
let shift = self.config.shift;
sigmas = sigmas
.iter()
.map(|&s| shift * s / (1.0 + (shift - 1.0) * s))
.collect();
}
// Add terminal sigma = 0
sigmas.push(0.0);
self.timesteps = timesteps;
self.sigmas = sigmas;
self.step_index = 0;
}
/// Get current sigma value
pub fn current_sigma(&self) -> f64 {
self.sigmas[self.step_index]
}
/// Get current timestep (for model input)
/// Converts scheduler timestep to model input format: (1000 - t) / 1000
pub fn current_timestep_normalized(&self) -> f64 {
let t = self.timesteps.get(self.step_index).copied().unwrap_or(0.0);
(1000.0 - t) / 1000.0
}
/// Euler step
///
/// # Arguments
/// * `model_output` - Model predicted velocity field
/// * `sample` - Current sample x_t
///
/// # Returns
/// Next sample x_{t-1}
pub fn step(&mut self, model_output: &Tensor, sample: &Tensor) -> Result<Tensor> {
let sigma = self.sigmas[self.step_index];
let sigma_next = self.sigmas[self.step_index + 1];
let dt = sigma_next - sigma;
// prev_sample = sample + dt * model_output
let prev_sample = (sample + (model_output * dt)?)?;
self.step_index += 1;
Ok(prev_sample)
}
/// Reset scheduler state
pub fn reset(&mut self) {
self.step_index = 0;
}
/// Get number of inference steps
pub fn num_inference_steps(&self) -> usize {
self.timesteps.len()
}
/// Get current step index
pub fn step_index(&self) -> usize {
self.step_index
}
/// Check if denoising is complete
pub fn is_complete(&self) -> bool {
self.step_index >= self.timesteps.len()
}
}
/// Calculate timestep shift parameter mu
///
/// # Arguments
/// * `image_seq_len` - Image sequence length (after patchify)
/// * `base_seq_len` - Base sequence length (typically 256)
/// * `max_seq_len` - Maximum sequence length (typically 4096)
/// * `base_shift` - Base shift value (typically 0.5)
/// * `max_shift` - Maximum shift value (typically 1.15)
pub fn calculate_shift(
image_seq_len: usize,
base_seq_len: usize,
max_seq_len: usize,
base_shift: f64,
max_shift: f64,
) -> f64 {
let m = (max_shift - base_shift) / (max_seq_len - base_seq_len) as f64;
let b = base_shift - m * base_seq_len as f64;
image_seq_len as f64 * m + b
}
/// Constants for shift calculation
pub const BASE_IMAGE_SEQ_LEN: usize = 256;
pub const MAX_IMAGE_SEQ_LEN: usize = 4096;
pub const BASE_SHIFT: f64 = 0.5;
pub const MAX_SHIFT: f64 = 1.15;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/openclip/text_model.rs | candle-transformers/src/models/openclip/text_model.rs | //! Text encoder as used in most OpenCLIP pretrained models
//! https://github.com/mlfoundations/open_clip
use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::{
embedding, layer_norm, linear, ops::softmax_last_dim, Embedding, LayerNorm, Linear, Module,
VarBuilder,
};
#[derive(Debug, Clone)]
pub struct Config {
pub vocab_size: usize,
pub embed_dim: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub pad_with: Option<String>,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub projection_dim: usize,
}
impl Config {
pub fn vit_base_patch32() -> Self {
Self {
vocab_size: 49408,
embed_dim: 512,
intermediate_size: 2048,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 12,
num_attention_heads: 8,
projection_dim: 512,
}
}
}
#[derive(Clone, Debug)]
struct TextEmbeddings {
token_embedding: Embedding,
position_embedding: Tensor,
}
impl TextEmbeddings {
fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let token_embedding = embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?;
let position_embedding = vs.get(
(c.max_position_embeddings, c.embed_dim),
"positional_embedding",
)?;
Ok(TextEmbeddings {
token_embedding,
position_embedding,
})
}
}
impl Module for TextEmbeddings {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let seq_length = input_ids.dim(D::Minus1)?;
let inputs_embeds = self.token_embedding.forward(input_ids)?;
let position_embedding = self.position_embedding.narrow(0, 0, seq_length)?;
inputs_embeds.broadcast_add(&position_embedding)
}
}
#[derive(Clone, Debug)]
struct Attention {
k_proj: candle_nn::Linear,
v_proj: candle_nn::Linear,
q_proj: candle_nn::Linear,
out_proj: Linear,
head_dim: usize,
scale: f64,
num_attention_heads: usize,
}
impl Attention {
fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let embed_dim = c.embed_dim;
let num_attention_heads = c.num_attention_heads;
let in_proj_weights = vs
.get((embed_dim * 3, embed_dim), "in_proj_weight")?
.chunk(3, 0)?;
let (q_w, k_w, v_w) = (
&in_proj_weights[0],
&in_proj_weights[1],
&in_proj_weights[2],
);
let in_proj_biases = vs.get(embed_dim * 3, "in_proj_bias")?.chunk(3, 0)?;
let (q_b, k_b, v_b) = (&in_proj_biases[0], &in_proj_biases[1], &in_proj_biases[2]);
let q_proj = Linear::new(q_w.clone(), Some(q_b.clone()));
let k_proj = Linear::new(k_w.clone(), Some(k_b.clone()));
let v_proj = Linear::new(v_w.clone(), Some(v_b.clone()));
let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?;
let head_dim = embed_dim / num_attention_heads;
let scale = (head_dim as f64).powf(-0.5);
Ok(Attention {
k_proj,
v_proj,
q_proj,
out_proj,
head_dim,
scale,
num_attention_heads,
})
}
fn shape_multihead(&self, xs: &Tensor, bsz: usize, seq_len: usize) -> Result<Tensor> {
xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?
.to_dtype(DType::F32)
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let in_dtype = xs.dtype();
let (bsz, seq_len, embed_dim) = xs.dims3()?;
let q = self.shape_multihead(&self.q_proj.forward(xs)?, bsz, seq_len)?;
let k = self.shape_multihead(&self.k_proj.forward(xs)?, bsz, seq_len)?;
let v = self.shape_multihead(&self.v_proj.forward(xs)?, bsz, seq_len)?;
let q = (q * self.scale)?;
let attn_weights = q.matmul(&k.transpose(D::Minus1, D::Minus2)?)?;
let attn_weights = softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?.to_dtype(in_dtype)?;
let attn_output = attn_output
.transpose(1, 2)?
.contiguous()?
.reshape((bsz, seq_len, embed_dim))?;
let out = self.out_proj.forward(&attn_output)?;
Ok(out)
}
}
#[derive(Clone, Debug)]
struct Mlp {
fc1: Linear,
fc2: Linear,
}
impl Mlp {
fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let fc1 = linear(c.embed_dim, c.intermediate_size, vs.pp("c_fc"))?;
let fc2 = linear(c.intermediate_size, c.embed_dim, vs.pp("c_proj"))?;
Ok(Mlp { fc1, fc2 })
}
}
impl Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?;
self.fc2.forward(&xs.gelu_erf()?)
}
}
#[derive(Clone, Debug)]
struct EncoderLayer {
self_attn: Attention,
layer_norm1: LayerNorm,
mlp: Mlp,
layer_norm2: LayerNorm,
}
impl EncoderLayer {
fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let self_attn = Attention::new(vs.pp("attn"), c)?;
let layer_norm1 = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_1"))?;
let mlp = Mlp::new(vs.pp("mlp"), c)?;
let layer_norm2 = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_2"))?;
Ok(EncoderLayer {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = self.layer_norm1.forward(xs)?;
let xs = self.self_attn.forward(&xs)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self.layer_norm2.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
let out = (xs + residual)?;
Ok(out)
}
}
#[derive(Clone, Debug)]
pub struct Encoder {
layers: Vec<EncoderLayer>,
}
impl Encoder {
pub fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let vs = vs.pp("resblocks");
let mut layers: Vec<EncoderLayer> = Vec::new();
for index in 0..c.num_hidden_layers {
let layer = EncoderLayer::new(vs.pp(index.to_string()), c)?;
layers.push(layer)
}
Ok(Encoder { layers })
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs)?;
}
Ok(xs)
}
}
/// A text transformer as used in CLIP variants.
#[derive(Clone, Debug)]
pub struct OpenClipTextTransformer {
embeddings: TextEmbeddings,
encoder: Encoder,
final_layer_norm: LayerNorm,
}
impl OpenClipTextTransformer {
pub fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let embeddings = TextEmbeddings::new(vs.clone(), c)?;
let final_layer_norm = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_final"))?;
let encoder = Encoder::new(vs.pp("transformer"), c)?;
Ok(OpenClipTextTransformer {
embeddings,
encoder,
final_layer_norm,
})
}
pub fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let input_ids = self.embeddings.forward(input_ids)?;
let input_ids = self.encoder.forward(&input_ids)?;
self.final_layer_norm.forward(&input_ids)
}
}
impl Module for OpenClipTextTransformer {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let output = self.forward(input_ids)?;
let sequence_max_indices = input_ids.argmax(D::Minus1)?.to_dtype(DType::I64)?;
let mut indices = Vec::new();
for (batch_idx, &seq_idx) in sequence_max_indices.to_vec1::<i64>()?.iter().enumerate() {
let index = output.i((batch_idx, seq_idx as usize))?.unsqueeze(0)?;
indices.push(index);
}
Tensor::cat(&indices, 0)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/openclip/mod.rs | candle-transformers/src/models/openclip/mod.rs | //! Open Contrastive Language-Image Pre-Training
//!
//! Open Contrastive Language-Image Pre-Training (OpenCLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - 💻 [GH Link](https://github.com/mlfoundations/open_clip)
//! - 📝 [Paper](https://arxiv.org/abs/2212.07143)
//!
//! ## Overview
//!
//! 
pub mod text_model;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/wuerstchen/prior.rs | candle-transformers/src/models/wuerstchen/prior.rs | use super::common::{AttnBlock, ResBlock, TimestepBlock};
use candle::{DType, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug)]
struct Block {
res_block: ResBlock,
ts_block: TimestepBlock,
attn_block: AttnBlock,
}
#[derive(Debug)]
pub struct WPrior {
projection: candle_nn::Conv2d,
cond_mapper_lin1: candle_nn::Linear,
cond_mapper_lin2: candle_nn::Linear,
blocks: Vec<Block>,
out_ln: super::common::WLayerNorm,
out_conv: candle_nn::Conv2d,
c_r: usize,
}
impl WPrior {
#[allow(clippy::too_many_arguments)]
pub fn new(
c_in: usize,
c: usize,
c_cond: usize,
c_r: usize,
depth: usize,
nhead: usize,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
let projection = candle_nn::conv2d(c_in, c, 1, Default::default(), vb.pp("projection"))?;
let cond_mapper_lin1 = candle_nn::linear(c_cond, c, vb.pp("cond_mapper.0"))?;
let cond_mapper_lin2 = candle_nn::linear(c, c, vb.pp("cond_mapper.2"))?;
let out_ln = super::common::WLayerNorm::new(c)?;
let out_conv = candle_nn::conv2d(c, c_in * 2, 1, Default::default(), vb.pp("out.1"))?;
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let res_block = ResBlock::new(c, 0, 3, vb.pp(format!("blocks.{}", 3 * index)))?;
let ts_block = TimestepBlock::new(c, c_r, vb.pp(format!("blocks.{}", 3 * index + 1)))?;
let attn_block = AttnBlock::new(
c,
c,
nhead,
true,
use_flash_attn,
vb.pp(format!("blocks.{}", 3 * index + 2)),
)?;
blocks.push(Block {
res_block,
ts_block,
attn_block,
})
}
Ok(Self {
projection,
cond_mapper_lin1,
cond_mapper_lin2,
blocks,
out_ln,
out_conv,
c_r,
})
}
pub fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> {
const MAX_POSITIONS: usize = 10000;
let r = (r * MAX_POSITIONS as f64)?;
let half_dim = self.c_r / 2;
let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64;
let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)?
* -emb)?
.exp()?;
let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?;
let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?;
let emb = if self.c_r % 2 == 1 {
emb.pad_with_zeros(D::Minus1, 0, 1)?
} else {
emb
};
emb.to_dtype(r.dtype())
}
pub fn forward(&self, xs: &Tensor, r: &Tensor, c: &Tensor) -> Result<Tensor> {
let x_in = xs;
let mut xs = xs.apply(&self.projection)?;
let c_embed = c
.apply(&self.cond_mapper_lin1)?
.apply(&|xs: &_| candle_nn::ops::leaky_relu(xs, 0.2))?
.apply(&self.cond_mapper_lin2)?;
let r_embed = self.gen_r_embedding(r)?;
for block in self.blocks.iter() {
xs = block.res_block.forward(&xs, None)?;
xs = block.ts_block.forward(&xs, &r_embed)?;
xs = block.attn_block.forward(&xs, &c_embed)?;
}
let ab = xs.apply(&self.out_ln)?.apply(&self.out_conv)?.chunk(2, 1)?;
(x_in - &ab[0])? / ((&ab[1] - 1.)?.abs()? + 1e-5)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/wuerstchen/attention_processor.rs | candle-transformers/src/models/wuerstchen/attention_processor.rs | use candle::{Module, Result, Tensor};
use candle_nn::{linear, Linear, VarBuilder};
// A simplified version of:
// https://github.com/huggingface/diffusers/blob/119ad2c3dc8a8fb8446a83f4bf6f20929487b47f/src/diffusers/models/attention_processor.py#L38
#[derive(Debug)]
pub struct Attention {
to_q: Linear,
to_k: Linear,
to_v: Linear,
to_out: Linear,
heads: usize,
scale: f64,
use_flash_attn: bool,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl Attention {
pub fn new(
query_dim: usize,
heads: usize,
dim_head: usize,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
let inner_dim = dim_head * heads;
let scale = 1.0 / f64::sqrt(dim_head as f64);
let to_q = linear(query_dim, inner_dim, vb.pp("to_q"))?;
let to_k = linear(query_dim, inner_dim, vb.pp("to_k"))?;
let to_v = linear(query_dim, inner_dim, vb.pp("to_v"))?;
let to_out = linear(inner_dim, query_dim, vb.pp("to_out.0"))?;
Ok(Self {
to_q,
to_k,
to_v,
to_out,
scale,
heads,
use_flash_attn,
})
}
fn batch_to_head_dim(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, dim) = xs.dims3()?;
xs.reshape((b_size / self.heads, self.heads, seq_len, dim))?
.permute((0, 2, 1, 3))?
.reshape((b_size / self.heads, seq_len, dim * self.heads))
}
fn head_to_batch_dim(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, dim) = xs.dims3()?;
xs.reshape((b_size, seq_len, self.heads, dim / self.heads))?
.permute((0, 2, 1, 3))?
.reshape((b_size * self.heads, seq_len, dim / self.heads))
}
fn get_attention_scores(&self, query: &Tensor, key: &Tensor) -> Result<Tensor> {
let attn_probs = (query.matmul(&key.t()?)? * self.scale)?;
candle_nn::ops::softmax_last_dim(&attn_probs)
}
pub fn forward(&self, xs: &Tensor, encoder_hidden_states: &Tensor) -> Result<Tensor> {
let (b_size, channel, h, w) = xs.dims4()?;
let xs = xs.reshape((b_size, channel, h * w))?.t()?;
let query = self.to_q.forward(&xs)?;
let key = self.to_k.forward(encoder_hidden_states)?;
let value = self.to_v.forward(encoder_hidden_states)?;
let query = self.head_to_batch_dim(&query)?;
let key = self.head_to_batch_dim(&key)?;
let value = self.head_to_batch_dim(&value)?;
let xs = if self.use_flash_attn {
let init_dtype = query.dtype();
let q = query
.to_dtype(candle::DType::F16)?
.unsqueeze(0)?
.transpose(1, 2)?;
let k = key
.to_dtype(candle::DType::F16)?
.unsqueeze(0)?
.transpose(1, 2)?;
let v = value
.to_dtype(candle::DType::F16)?
.unsqueeze(0)?
.transpose(1, 2)?;
flash_attn(&q, &k, &v, self.scale as f32, false)?
.transpose(1, 2)?
.squeeze(0)?
.to_dtype(init_dtype)?
} else {
let attn_prs = self.get_attention_scores(&query, &key)?;
attn_prs.matmul(&value)?
};
let xs = self.batch_to_head_dim(&xs)?;
self.to_out
.forward(&xs)?
.t()?
.reshape((b_size, channel, h, w))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/wuerstchen/diffnext.rs | candle-transformers/src/models/wuerstchen/diffnext.rs | use super::common::{AttnBlock, GlobalResponseNorm, LayerNormNoWeights, TimestepBlock, WLayerNorm};
use candle::{DType, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug)]
pub struct ResBlockStageB {
depthwise: candle_nn::Conv2d,
norm: WLayerNorm,
channelwise_lin1: candle_nn::Linear,
channelwise_grn: GlobalResponseNorm,
channelwise_lin2: candle_nn::Linear,
}
impl ResBlockStageB {
pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
groups: c,
padding: ksize / 2,
..Default::default()
};
let depthwise = candle_nn::conv2d(c, c, ksize, cfg, vb.pp("depthwise"))?;
let norm = WLayerNorm::new(c)?;
let channelwise_lin1 = candle_nn::linear(c + c_skip, c * 4, vb.pp("channelwise.0"))?;
let channelwise_grn = GlobalResponseNorm::new(4 * c, vb.pp("channelwise.2"))?;
let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?;
Ok(Self {
depthwise,
norm,
channelwise_lin1,
channelwise_grn,
channelwise_lin2,
})
}
pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> {
let x_res = xs;
let xs = xs.apply(&self.depthwise)?.apply(&self.norm)?;
let xs = match x_skip {
None => xs.clone(),
Some(x_skip) => Tensor::cat(&[&xs, x_skip], 1)?,
};
let xs = xs
.permute((0, 2, 3, 1))?
.contiguous()?
.apply(&self.channelwise_lin1)?
.gelu()?
.apply(&self.channelwise_grn)?
.apply(&self.channelwise_lin2)?
.permute((0, 3, 1, 2))?;
xs + x_res
}
}
#[derive(Debug)]
struct SubBlock {
res_block: ResBlockStageB,
ts_block: TimestepBlock,
attn_block: Option<AttnBlock>,
}
#[derive(Debug)]
struct DownBlock {
layer_norm: Option<WLayerNorm>,
conv: Option<candle_nn::Conv2d>,
sub_blocks: Vec<SubBlock>,
}
#[derive(Debug)]
struct UpBlock {
sub_blocks: Vec<SubBlock>,
layer_norm: Option<WLayerNorm>,
conv: Option<candle_nn::ConvTranspose2d>,
}
#[derive(Debug)]
pub struct WDiffNeXt {
clip_mapper: candle_nn::Linear,
effnet_mappers: Vec<Option<candle_nn::Conv2d>>,
seq_norm: LayerNormNoWeights,
embedding_conv: candle_nn::Conv2d,
embedding_ln: WLayerNorm,
down_blocks: Vec<DownBlock>,
up_blocks: Vec<UpBlock>,
clf_ln: WLayerNorm,
clf_conv: candle_nn::Conv2d,
c_r: usize,
patch_size: usize,
}
impl WDiffNeXt {
#[allow(clippy::too_many_arguments)]
pub fn new(
c_in: usize,
c_out: usize,
c_r: usize,
c_cond: usize,
clip_embd: usize,
patch_size: usize,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
const C_HIDDEN: [usize; 4] = [320, 640, 1280, 1280];
const BLOCKS: [usize; 4] = [4, 4, 14, 4];
const NHEAD: [usize; 4] = [1, 10, 20, 20];
const INJECT_EFFNET: [bool; 4] = [false, true, true, true];
const EFFNET_EMBD: usize = 16;
let clip_mapper = candle_nn::linear(clip_embd, c_cond, vb.pp("clip_mapper"))?;
let mut effnet_mappers = Vec::with_capacity(2 * INJECT_EFFNET.len());
let vb_e = vb.pp("effnet_mappers");
for (i, &inject) in INJECT_EFFNET.iter().enumerate() {
let c = if inject {
Some(candle_nn::conv2d(
EFFNET_EMBD,
c_cond,
1,
Default::default(),
vb_e.pp(i),
)?)
} else {
None
};
effnet_mappers.push(c)
}
for (i, &inject) in INJECT_EFFNET.iter().rev().enumerate() {
let c = if inject {
Some(candle_nn::conv2d(
EFFNET_EMBD,
c_cond,
1,
Default::default(),
vb_e.pp(i + INJECT_EFFNET.len()),
)?)
} else {
None
};
effnet_mappers.push(c)
}
let seq_norm = LayerNormNoWeights::new(c_cond)?;
let embedding_ln = WLayerNorm::new(C_HIDDEN[0])?;
let embedding_conv = candle_nn::conv2d(
c_in * patch_size * patch_size,
C_HIDDEN[0],
1,
Default::default(),
vb.pp("embedding.1"),
)?;
let mut down_blocks = Vec::with_capacity(C_HIDDEN.len());
for (i, &c_hidden) in C_HIDDEN.iter().enumerate() {
let vb = vb.pp("down_blocks").pp(i);
let (layer_norm, conv, start_layer_i) = if i > 0 {
let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?;
let cfg = candle_nn::Conv2dConfig {
stride: 2,
..Default::default()
};
let conv = candle_nn::conv2d(C_HIDDEN[i - 1], c_hidden, 2, cfg, vb.pp("0.1"))?;
(Some(layer_norm), Some(conv), 1)
} else {
(None, None, 0)
};
let mut sub_blocks = Vec::with_capacity(BLOCKS[i]);
let mut layer_i = start_layer_i;
for _j in 0..BLOCKS[i] {
let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 };
let res_block = ResBlockStageB::new(c_hidden, c_skip, 3, vb.pp(layer_i))?;
layer_i += 1;
let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?;
layer_i += 1;
let attn_block = if i == 0 {
None
} else {
let attn_block = AttnBlock::new(
c_hidden,
c_cond,
NHEAD[i],
true,
use_flash_attn,
vb.pp(layer_i),
)?;
layer_i += 1;
Some(attn_block)
};
let sub_block = SubBlock {
res_block,
ts_block,
attn_block,
};
sub_blocks.push(sub_block)
}
let down_block = DownBlock {
layer_norm,
conv,
sub_blocks,
};
down_blocks.push(down_block)
}
let mut up_blocks = Vec::with_capacity(C_HIDDEN.len());
for (i, &c_hidden) in C_HIDDEN.iter().enumerate().rev() {
let vb = vb.pp("up_blocks").pp(C_HIDDEN.len() - 1 - i);
let mut sub_blocks = Vec::with_capacity(BLOCKS[i]);
let mut layer_i = 0;
for j in 0..BLOCKS[i] {
let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 };
let c_skip_res = if i < BLOCKS.len() - 1 && j == 0 {
c_hidden + c_skip
} else {
c_skip
};
let res_block = ResBlockStageB::new(c_hidden, c_skip_res, 3, vb.pp(layer_i))?;
layer_i += 1;
let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?;
layer_i += 1;
let attn_block = if i == 0 {
None
} else {
let attn_block = AttnBlock::new(
c_hidden,
c_cond,
NHEAD[i],
true,
use_flash_attn,
vb.pp(layer_i),
)?;
layer_i += 1;
Some(attn_block)
};
let sub_block = SubBlock {
res_block,
ts_block,
attn_block,
};
sub_blocks.push(sub_block)
}
let (layer_norm, conv) = if i > 0 {
let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?;
let cfg = candle_nn::ConvTranspose2dConfig {
stride: 2,
..Default::default()
};
let conv = candle_nn::conv_transpose2d(
c_hidden,
C_HIDDEN[i - 1],
2,
cfg,
vb.pp(layer_i).pp(1),
)?;
(Some(layer_norm), Some(conv))
} else {
(None, None)
};
let up_block = UpBlock {
layer_norm,
conv,
sub_blocks,
};
up_blocks.push(up_block)
}
let clf_ln = WLayerNorm::new(C_HIDDEN[0])?;
let clf_conv = candle_nn::conv2d(
C_HIDDEN[0],
2 * c_out * patch_size * patch_size,
1,
Default::default(),
vb.pp("clf.1"),
)?;
Ok(Self {
clip_mapper,
effnet_mappers,
seq_norm,
embedding_conv,
embedding_ln,
down_blocks,
up_blocks,
clf_ln,
clf_conv,
c_r,
patch_size,
})
}
fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> {
const MAX_POSITIONS: usize = 10000;
let r = (r * MAX_POSITIONS as f64)?;
let half_dim = self.c_r / 2;
let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64;
let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)?
* -emb)?
.exp()?;
let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?;
let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?;
let emb = if self.c_r % 2 == 1 {
emb.pad_with_zeros(D::Minus1, 0, 1)?
} else {
emb
};
emb.to_dtype(r.dtype())
}
fn gen_c_embeddings(&self, clip: &Tensor) -> Result<Tensor> {
clip.apply(&self.clip_mapper)?.apply(&self.seq_norm)
}
pub fn forward(
&self,
xs: &Tensor,
r: &Tensor,
effnet: &Tensor,
clip: Option<&Tensor>,
) -> Result<Tensor> {
const EPS: f64 = 1e-3;
let r_embed = self.gen_r_embedding(r)?;
let clip = match clip {
None => None,
Some(clip) => Some(self.gen_c_embeddings(clip)?),
};
let x_in = xs;
let mut xs = xs
.apply(&|xs: &_| candle_nn::ops::pixel_unshuffle(xs, self.patch_size))?
.apply(&self.embedding_conv)?
.apply(&self.embedding_ln)?;
let mut level_outputs = Vec::new();
for (i, down_block) in self.down_blocks.iter().enumerate() {
if let Some(ln) = &down_block.layer_norm {
xs = xs.apply(ln)?
}
if let Some(conv) = &down_block.conv {
xs = xs.apply(conv)?
}
let skip = match &self.effnet_mappers[i] {
None => None,
Some(m) => {
let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?;
Some(m.forward(&effnet)?)
}
};
for block in down_block.sub_blocks.iter() {
xs = block.res_block.forward(&xs, skip.as_ref())?;
xs = block.ts_block.forward(&xs, &r_embed)?;
if let Some(attn_block) = &block.attn_block {
xs = attn_block.forward(&xs, clip.as_ref().unwrap())?;
}
}
level_outputs.push(xs.clone())
}
level_outputs.reverse();
let mut xs = level_outputs[0].clone();
for (i, up_block) in self.up_blocks.iter().enumerate() {
let effnet_c = match &self.effnet_mappers[self.down_blocks.len() + i] {
None => None,
Some(m) => {
let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?;
Some(m.forward(&effnet)?)
}
};
for (j, block) in up_block.sub_blocks.iter().enumerate() {
let skip = if j == 0 && i > 0 {
Some(&level_outputs[i])
} else {
None
};
let skip = match (skip, effnet_c.as_ref()) {
(Some(skip), Some(effnet_c)) => Some(Tensor::cat(&[skip, effnet_c], 1)?),
(None, Some(skip)) | (Some(skip), None) => Some(skip.clone()),
(None, None) => None,
};
xs = block.res_block.forward(&xs, skip.as_ref())?;
xs = block.ts_block.forward(&xs, &r_embed)?;
if let Some(attn_block) = &block.attn_block {
xs = attn_block.forward(&xs, clip.as_ref().unwrap())?;
}
}
if let Some(ln) = &up_block.layer_norm {
xs = xs.apply(ln)?
}
if let Some(conv) = &up_block.conv {
xs = xs.apply(conv)?
}
}
let ab = xs
.apply(&self.clf_ln)?
.apply(&self.clf_conv)?
.apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, self.patch_size))?
.chunk(2, 1)?;
let b = ((candle_nn::ops::sigmoid(&ab[1])? * (1. - EPS * 2.))? + EPS)?;
(x_in - &ab[0])? / b
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/wuerstchen/mod.rs | candle-transformers/src/models/wuerstchen/mod.rs | //! Würstchen Efficient Diffusion Model
//!
//! Würstchen is an efficient diffusion model architecture for generating images using
//! a two-stage approach with a small decoder and prior network.
//!
//! - 💻 [GH Link](https://github.com/dome272/Wuerstchen)
//! - 🤗 [HF Link](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py)
//! - 📝 [Paper](https://openreview.net/pdf?id=gU58AyJlYz)
//!
//! ## Example
//!
//! <div align=center>
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/wuerstchen/assets/cat.jpg" alt="" width=320>
//! <p>"Anthropomorphic cat dressed as a fire fighter"</p>
//! </div>
pub mod attention_processor;
pub mod common;
pub mod ddpm;
pub mod diffnext;
pub mod paella_vq;
pub mod prior;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/wuerstchen/common.rs | candle-transformers/src/models/wuerstchen/common.rs | use candle::{DType, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
// https://github.com/huggingface/diffusers/blob/19edca82f1ff194c07317369a92b470dbae97f34/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py#L22
#[derive(Debug)]
pub struct WLayerNorm {
eps: f64,
}
impl WLayerNorm {
pub fn new(_size: usize) -> Result<Self> {
Ok(Self { eps: 1e-6 })
}
}
impl Module for WLayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.permute((0, 2, 3, 1))?;
let x_dtype = xs.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = xs.dim(D::Minus1)?;
let xs = xs.to_dtype(internal_dtype)?;
let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let xs = xs.broadcast_sub(&mean_x)?;
let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?
.to_dtype(x_dtype)?
.permute((0, 3, 1, 2))
}
}
#[derive(Debug)]
pub struct LayerNormNoWeights {
eps: f64,
}
impl LayerNormNoWeights {
pub fn new(_size: usize) -> Result<Self> {
Ok(Self { eps: 1e-6 })
}
}
impl Module for LayerNormNoWeights {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let x_dtype = xs.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = xs.dim(D::Minus1)?;
let xs = xs.to_dtype(internal_dtype)?;
let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let xs = xs.broadcast_sub(&mean_x)?;
let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?
.to_dtype(x_dtype)
}
}
#[derive(Debug)]
pub struct TimestepBlock {
mapper: candle_nn::Linear,
}
impl TimestepBlock {
pub fn new(c: usize, c_timestep: usize, vb: VarBuilder) -> Result<Self> {
let mapper = candle_nn::linear(c_timestep, c * 2, vb.pp("mapper"))?;
Ok(Self { mapper })
}
pub fn forward(&self, xs: &Tensor, t: &Tensor) -> Result<Tensor> {
let ab = self
.mapper
.forward(t)?
.unsqueeze(2)?
.unsqueeze(3)?
.chunk(2, 1)?;
xs.broadcast_mul(&(&ab[0] + 1.)?)?.broadcast_add(&ab[1])
}
}
#[derive(Debug)]
pub struct GlobalResponseNorm {
gamma: Tensor,
beta: Tensor,
}
impl GlobalResponseNorm {
pub fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let gamma = vb.get((1, 1, 1, dim), "gamma")?;
let beta = vb.get((1, 1, 1, dim), "beta")?;
Ok(Self { gamma, beta })
}
}
impl Module for GlobalResponseNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let agg_norm = xs.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let stand_div_norm =
agg_norm.broadcast_div(&(agg_norm.mean_keepdim(D::Minus1)? + 1e-6)?)?;
xs.broadcast_mul(&stand_div_norm)?
.broadcast_mul(&self.gamma)?
.broadcast_add(&self.beta)?
+ xs
}
}
#[derive(Debug)]
pub struct ResBlock {
depthwise: candle_nn::Conv2d,
norm: WLayerNorm,
channelwise_lin1: candle_nn::Linear,
channelwise_grn: GlobalResponseNorm,
channelwise_lin2: candle_nn::Linear,
}
impl ResBlock {
pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
padding: ksize / 2,
groups: c,
..Default::default()
};
let depthwise = candle_nn::conv2d(c + c_skip, c, ksize, cfg, vb.pp("depthwise"))?;
let norm = WLayerNorm::new(c)?;
let channelwise_lin1 = candle_nn::linear(c, c * 4, vb.pp("channelwise.0"))?;
let channelwise_grn = GlobalResponseNorm::new(c * 4, vb.pp("channelwise.2"))?;
let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?;
Ok(Self {
depthwise,
norm,
channelwise_lin1,
channelwise_grn,
channelwise_lin2,
})
}
pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> {
let x_res = xs;
let xs = match x_skip {
None => xs.clone(),
Some(x_skip) => Tensor::cat(&[xs, x_skip], 1)?,
};
let xs = xs
.apply(&self.depthwise)?
.apply(&self.norm)?
.permute((0, 2, 3, 1))?;
let xs = xs
.apply(&self.channelwise_lin1)?
.gelu_erf()?
.apply(&self.channelwise_grn)?
.apply(&self.channelwise_lin2)?
.permute((0, 3, 1, 2))?;
xs + x_res
}
}
use super::attention_processor::Attention;
#[derive(Debug)]
pub struct AttnBlock {
self_attn: bool,
norm: WLayerNorm,
attention: Attention,
kv_mapper_lin: candle_nn::Linear,
}
impl AttnBlock {
pub fn new(
c: usize,
c_cond: usize,
nhead: usize,
self_attn: bool,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
let norm = WLayerNorm::new(c)?;
let attention = Attention::new(c, nhead, c / nhead, use_flash_attn, vb.pp("attention"))?;
let kv_mapper_lin = candle_nn::linear(c_cond, c, vb.pp("kv_mapper.1"))?;
Ok(Self {
self_attn,
norm,
attention,
kv_mapper_lin,
})
}
pub fn forward(&self, xs: &Tensor, kv: &Tensor) -> Result<Tensor> {
let kv = candle_nn::ops::silu(kv)?.apply(&self.kv_mapper_lin)?;
let norm_xs = self.norm.forward(xs)?;
let kv = if self.self_attn {
let (b_size, channel, _, _) = xs.dims4()?;
let norm_xs = norm_xs.reshape((b_size, channel, ()))?.transpose(1, 2)?;
Tensor::cat(&[&norm_xs, &kv], 1)?.contiguous()?
} else {
kv
};
xs + self.attention.forward(&norm_xs, &kv)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/wuerstchen/paella_vq.rs | candle-transformers/src/models/wuerstchen/paella_vq.rs | use super::common::LayerNormNoWeights;
use candle::{Module, Result, Tensor};
use candle_nn::VarBuilder;
#[derive(Debug)]
pub struct MixingResidualBlock {
norm1: LayerNormNoWeights,
depthwise_conv: candle_nn::Conv2d,
norm2: LayerNormNoWeights,
channelwise_lin1: candle_nn::Linear,
channelwise_lin2: candle_nn::Linear,
gammas: Vec<f32>,
}
impl MixingResidualBlock {
pub fn new(inp: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoWeights::new(inp)?;
let norm2 = LayerNormNoWeights::new(inp)?;
let cfg = candle_nn::Conv2dConfig {
groups: inp,
..Default::default()
};
let depthwise_conv = candle_nn::conv2d(inp, inp, 3, cfg, vb.pp("depthwise.1"))?;
let channelwise_lin1 = candle_nn::linear(inp, embed_dim, vb.pp("channelwise.0"))?;
let channelwise_lin2 = candle_nn::linear(embed_dim, inp, vb.pp("channelwise.2"))?;
let gammas = vb.get(6, "gammas")?.to_vec1::<f32>()?;
Ok(Self {
norm1,
depthwise_conv,
norm2,
channelwise_lin1,
channelwise_lin2,
gammas,
})
}
}
impl Module for MixingResidualBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mods = &self.gammas;
let x_temp = xs
.permute((0, 2, 3, 1))?
.apply(&self.norm1)?
.permute((0, 3, 1, 2))?
.affine(1. + mods[0] as f64, mods[1] as f64)?;
let x_temp = candle_nn::ops::replication_pad2d(&x_temp, 1)?;
let xs = (xs + x_temp.apply(&self.depthwise_conv)? * mods[2] as f64)?;
let x_temp = xs
.permute((0, 2, 3, 1))?
.apply(&self.norm2)?
.permute((0, 3, 1, 2))?
.affine(1. + mods[3] as f64, mods[4] as f64)?;
let x_temp = x_temp
.permute((0, 2, 3, 1))?
.contiguous()?
.apply(&self.channelwise_lin1)?
.gelu()?
.apply(&self.channelwise_lin2)?
.permute((0, 3, 1, 2))?;
xs + x_temp * mods[5] as f64
}
}
#[derive(Debug)]
pub struct PaellaVQ {
in_block_conv: candle_nn::Conv2d,
out_block_conv: candle_nn::Conv2d,
down_blocks: Vec<(Option<candle_nn::Conv2d>, MixingResidualBlock)>,
down_blocks_conv: candle_nn::Conv2d,
down_blocks_bn: candle_nn::BatchNorm,
up_blocks_conv: candle_nn::Conv2d,
up_blocks: Vec<(Vec<MixingResidualBlock>, Option<candle_nn::ConvTranspose2d>)>,
}
impl PaellaVQ {
pub fn new(vb: VarBuilder) -> Result<Self> {
const IN_CHANNELS: usize = 3;
const OUT_CHANNELS: usize = 3;
const LATENT_CHANNELS: usize = 4;
const EMBED_DIM: usize = 384;
const BOTTLENECK_BLOCKS: usize = 12;
const C_LEVELS: [usize; 2] = [EMBED_DIM / 2, EMBED_DIM];
let in_block_conv = candle_nn::conv2d(
IN_CHANNELS * 4,
C_LEVELS[0],
1,
Default::default(),
vb.pp("in_block.1"),
)?;
let out_block_conv = candle_nn::conv2d(
C_LEVELS[0],
OUT_CHANNELS * 4,
1,
Default::default(),
vb.pp("out_block.0"),
)?;
let mut down_blocks = Vec::new();
let vb_d = vb.pp("down_blocks");
let mut d_idx = 0;
for (i, &c_level) in C_LEVELS.iter().enumerate() {
let conv_block = if i > 0 {
let cfg = candle_nn::Conv2dConfig {
padding: 1,
stride: 2,
..Default::default()
};
let block = candle_nn::conv2d(C_LEVELS[i - 1], c_level, 4, cfg, vb_d.pp(d_idx))?;
d_idx += 1;
Some(block)
} else {
None
};
let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_d.pp(d_idx))?;
d_idx += 1;
down_blocks.push((conv_block, res_block))
}
let vb_d = vb_d.pp(d_idx);
let down_blocks_conv = candle_nn::conv2d_no_bias(
C_LEVELS[1],
LATENT_CHANNELS,
1,
Default::default(),
vb_d.pp(0),
)?;
let down_blocks_bn = candle_nn::batch_norm(LATENT_CHANNELS, 1e-5, vb_d.pp(1))?;
let mut up_blocks = Vec::new();
let vb_u = vb.pp("up_blocks");
let mut u_idx = 0;
let up_blocks_conv = candle_nn::conv2d(
LATENT_CHANNELS,
C_LEVELS[1],
1,
Default::default(),
vb_u.pp(u_idx).pp(0),
)?;
u_idx += 1;
for (i, &c_level) in C_LEVELS.iter().rev().enumerate() {
let mut res_blocks = Vec::new();
let n_bottleneck_blocks = if i == 0 { BOTTLENECK_BLOCKS } else { 1 };
for _j in 0..n_bottleneck_blocks {
let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_u.pp(u_idx))?;
u_idx += 1;
res_blocks.push(res_block)
}
let conv_block = if i < C_LEVELS.len() - 1 {
let cfg = candle_nn::ConvTranspose2dConfig {
padding: 1,
stride: 2,
..Default::default()
};
let block = candle_nn::conv_transpose2d(
c_level,
C_LEVELS[C_LEVELS.len() - i - 2],
4,
cfg,
vb_u.pp(u_idx),
)?;
u_idx += 1;
Some(block)
} else {
None
};
up_blocks.push((res_blocks, conv_block))
}
Ok(Self {
in_block_conv,
down_blocks,
down_blocks_conv,
down_blocks_bn,
up_blocks,
up_blocks_conv,
out_block_conv,
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = candle_nn::ops::pixel_unshuffle(xs, 2)?.apply(&self.in_block_conv)?;
for down_block in self.down_blocks.iter() {
if let Some(conv) = &down_block.0 {
xs = xs.apply(conv)?
}
xs = xs.apply(&down_block.1)?
}
xs.apply(&self.down_blocks_conv)?
.apply_t(&self.down_blocks_bn, false)
}
pub fn decode(&self, xs: &Tensor) -> Result<Tensor> {
// TODO: quantizer if we want to support `force_not_quantize=False`.
let mut xs = xs.apply(&self.up_blocks_conv)?;
for up_block in self.up_blocks.iter() {
for b in up_block.0.iter() {
xs = xs.apply(b)?;
}
if let Some(conv) = &up_block.1 {
xs = xs.apply(conv)?
}
}
xs.apply(&self.out_block_conv)?
.apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, 2))
}
}
impl Module for PaellaVQ {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.decode(&self.encode(xs)?)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/wuerstchen/ddpm.rs | candle-transformers/src/models/wuerstchen/ddpm.rs | use candle::{Result, Tensor};
#[derive(Debug, Clone)]
pub struct DDPMWSchedulerConfig {
scaler: f64,
s: f64,
}
impl Default for DDPMWSchedulerConfig {
fn default() -> Self {
Self {
scaler: 1f64,
s: 0.008f64,
}
}
}
pub struct DDPMWScheduler {
init_alpha_cumprod: f64,
init_noise_sigma: f64,
timesteps: Vec<f64>,
pub config: DDPMWSchedulerConfig,
}
impl DDPMWScheduler {
pub fn new(inference_steps: usize, config: DDPMWSchedulerConfig) -> Result<Self> {
let init_alpha_cumprod = (config.s / (1. + config.s) * std::f64::consts::PI)
.cos()
.powi(2);
let timesteps = (0..=inference_steps)
.map(|i| 1. - i as f64 / inference_steps as f64)
.collect::<Vec<_>>();
Ok(Self {
init_alpha_cumprod,
init_noise_sigma: 1.0,
timesteps,
config,
})
}
pub fn timesteps(&self) -> &[f64] {
&self.timesteps
}
fn alpha_cumprod(&self, t: f64) -> f64 {
let scaler = self.config.scaler;
let s = self.config.s;
let t = if scaler > 1. {
1. - (1. - t).powf(scaler)
} else if scaler < 1. {
t.powf(scaler)
} else {
t
};
let alpha_cumprod = ((t + s) / (1. + s) * std::f64::consts::PI * 0.5)
.cos()
.powi(2)
/ self.init_alpha_cumprod;
alpha_cumprod.clamp(0.0001, 0.9999)
}
fn previous_timestep(&self, ts: f64) -> f64 {
let index = self
.timesteps
.iter()
.enumerate()
.map(|(idx, v)| (idx, (v - ts).abs()))
.min_by(|x, y| x.1.total_cmp(&y.1))
.unwrap()
.0;
self.timesteps[index + 1]
}
/// Ensures interchangeability with schedulers that need to scale the denoising model input
/// depending on the current timestep.
pub fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Tensor {
sample
}
pub fn step(&self, model_output: &Tensor, ts: f64, sample: &Tensor) -> Result<Tensor> {
let prev_t = self.previous_timestep(ts);
let alpha_cumprod = self.alpha_cumprod(ts);
let alpha_cumprod_prev = self.alpha_cumprod(prev_t);
let alpha = alpha_cumprod / alpha_cumprod_prev;
let mu = (sample - model_output * ((1. - alpha) / (1. - alpha_cumprod).sqrt()))?;
let mu = (mu * (1. / alpha).sqrt())?;
let std_noise = mu.randn_like(0., 1.)?;
let std =
std_noise * ((1. - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt();
if prev_t == 0. {
Ok(mu)
} else {
mu + std
}
}
pub fn init_noise_sigma(&self) -> f64 {
self.init_noise_sigma
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/pixtral/llava.rs | candle-transformers/src/models/pixtral/llava.rs | use candle::{Module, Result, Tensor};
use candle_nn::{linear, Linear, VarBuilder};
use super::vision_model;
use crate::models::mistral;
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub projector_hidden_act: candle_nn::Activation,
pub text_config: mistral::Config,
pub vision_config: vision_model::Config,
pub image_token_index: usize,
pub image_seq_length: usize,
}
#[derive(Debug, Clone)]
pub struct MultiModalProjector {
linear_1: Linear,
act: candle_nn::Activation,
linear_2: Linear,
}
impl MultiModalProjector {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let (hidden_v, hidden_t) = (cfg.vision_config.hidden_size, cfg.text_config.hidden_size);
let linear_1 = linear(hidden_v, hidden_t, vb.pp("linear_1"))?;
let linear_2 = linear(hidden_t, hidden_t, vb.pp("linear_2"))?;
Ok(Self {
linear_1,
act: cfg.projector_hidden_act,
linear_2,
})
}
}
impl Module for MultiModalProjector {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.linear_1)?
.apply(&self.act)?
.apply(&self.linear_2)
}
}
#[derive(Debug, Clone)]
pub struct Model {
pub multi_modal_projector: MultiModalProjector,
pub language_model: mistral::Model,
pub vision_tower: vision_model::Model,
pub patch_size: usize,
pub dtype: candle::DType,
pub pos: usize,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let language_model = mistral::Model::new(&cfg.text_config, vb.pp("language_model"))?;
let vision_tower = vision_model::Model::new(
&cfg.vision_config,
vb.pp("vision_tower").to_dtype(candle::DType::F32),
)?;
let multi_modal_projector = MultiModalProjector::new(
cfg,
vb.pp("multi_modal_projector").to_dtype(candle::DType::F32),
)?;
Ok(Self {
multi_modal_projector,
language_model,
vision_tower,
patch_size: cfg.vision_config.patch_size,
dtype: vb.dtype(),
pos: 0,
})
}
pub fn clear_kv_cache(&mut self) {
self.language_model.clear_kv_cache();
self.pos = 0;
}
pub fn encode_image(&self, image: &Tensor) -> Result<Tensor> {
let image_embeds = self.vision_tower.forward(image)?;
self.multi_modal_projector.forward(&image_embeds)
}
pub fn lm_forward(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let (_, seq_len) = input_ids.dims2()?;
let logits = self.language_model.forward(input_ids, self.pos)?;
self.pos += seq_len;
Ok(logits)
}
pub fn lm_forward_embeds(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_, seq_len, _) = xs.dims3()?;
let logits = self.language_model.forward_embeds(xs, None, self.pos)?;
self.pos += seq_len;
Ok(logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/pixtral/vision_model.rs | candle-transformers/src/models/pixtral/vision_model.rs | use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b, rms_norm, Linear, RmsNorm, VarBuilder};
fn default_act() -> candle_nn::Activation {
candle_nn::Activation::Silu
}
fn default_hidden_size() -> usize {
1024
}
fn default_intermediate_size() -> usize {
4096
}
fn default_num_channels() -> usize {
3
}
fn default_num_hidden_layers() -> usize {
24
}
fn default_num_attention_heads() -> usize {
16
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
#[serde(default = "default_hidden_size")]
pub hidden_size: usize,
#[serde(default = "default_num_channels")]
pub num_channels: usize,
pub image_size: usize,
pub patch_size: usize,
pub rope_theta: f64,
#[serde(default = "default_intermediate_size")]
pub intermediate_size: usize,
#[serde(default = "default_num_hidden_layers")]
pub num_hidden_layers: usize,
pub head_dim: Option<usize>,
#[serde(default = "default_num_attention_heads")]
pub num_attention_heads: usize,
#[serde(default = "default_act")]
pub hidden_act: candle_nn::Activation,
}
impl Config {
pub fn pixtral_12b_2409() -> Self {
Self {
hidden_size: 1024,
num_channels: 3,
image_size: 1024,
patch_size: 16,
rope_theta: 10000.0,
intermediate_size: 4096,
num_hidden_layers: 24,
num_attention_heads: 16,
head_dim: None,
// Default
hidden_act: candle_nn::Activation::Silu,
}
}
fn head_dim(&self) -> usize {
self.head_dim
.unwrap_or(self.hidden_size / self.num_attention_heads)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
scale: f64,
num_heads: usize,
head_dim: usize,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let head_dim = cfg.head_dim();
let q_proj = linear_b(h, h, false, vb.pp("q_proj"))?;
let k_proj = linear_b(h, h, false, vb.pp("k_proj"))?;
let v_proj = linear_b(h, h, false, vb.pp("v_proj"))?;
let o_proj = linear_b(h, h, false, vb.pp("o_proj"))?;
let scale = (head_dim as f64).powf(-0.5);
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
scale,
num_heads,
head_dim,
})
}
fn forward(
&self,
xs: &Tensor,
emb: &RotaryEmbedding,
subsampled_positions: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b, patches, _) = xs.dims3()?;
let query_states = xs.apply(&self.q_proj)?;
let key_states = xs.apply(&self.k_proj)?;
let value_states = xs.apply(&self.v_proj)?;
let shape = (b, patches, self.num_heads, self.head_dim);
let query_states = query_states.reshape(shape)?.transpose(1, 2)?.contiguous()?;
let key_states = key_states.reshape(shape)?.transpose(1, 2)?.contiguous()?;
let value_states = value_states.reshape(shape)?.transpose(1, 2)?.contiguous()?;
let (query_states, key_states) =
emb.apply_rotary_emb_qkv(&query_states, &key_states, subsampled_positions)?;
let attn_weights = (query_states.matmul(&key_states.t()?)? * self.scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights
.matmul(&value_states)?
.transpose(1, 2)?
.reshape((b, patches, ()))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct Mlp {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl Mlp {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let (h, i) = (cfg.hidden_size, cfg.intermediate_size);
let gate_proj = linear_b(h, i, false, vb.pp("gate_proj"))?;
let up_proj = linear_b(h, i, false, vb.pp("up_proj"))?;
let down_proj = linear_b(i, h, false, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
(xs.apply(&self.gate_proj)?.apply(&self.act_fn)? * xs.apply(&self.up_proj))?
.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct AttentionLayer {
attention_norm: RmsNorm,
feed_forward: Mlp,
attention: Attention,
ffn_norm: RmsNorm,
}
impl AttentionLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention_norm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("attention_norm"))?;
let feed_forward = Mlp::new(cfg, vb.pp("feed_forward"))?;
let attention = Attention::new(cfg, vb.pp("attention"))?;
let ffn_norm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("ffn_norm"))?;
Ok(Self {
attention_norm,
feed_forward,
attention,
ffn_norm,
})
}
fn forward(
&self,
xs: &Tensor,
emb: &RotaryEmbedding,
subsampled_positions: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs;
let xs = self.attention.forward(
&xs.apply(&self.attention_norm)?,
emb,
subsampled_positions,
attention_mask,
)?;
let xs = (residual + xs)?;
let residual = &xs;
let xs = xs.apply(&self.ffn_norm)?.apply(&self.feed_forward)?;
xs + residual
}
}
#[derive(Debug, Clone)]
struct Transformer {
layers: Vec<AttentionLayer>,
}
impl Transformer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb = vb.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = AttentionLayer::new(cfg, vb.pp(layer_idx))?;
layers.push(layer)
}
Ok(Self { layers })
}
fn forward(
&self,
xs: &Tensor,
emb: &RotaryEmbedding,
subsampled_positions: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, emb, subsampled_positions, attention_mask)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
cos: Tensor,
sin: Tensor,
}
impl RotaryEmbedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dtype = vb.dtype();
let dev = vb.device();
let dim = cfg.head_dim();
let rope_theta = cfg.rope_theta as f32;
let max_patches_per_side = cfg.image_size / cfg.patch_size;
let freqs: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32))
.collect();
let freqs_h = freqs.iter().step_by(2).copied().collect::<Vec<_>>();
let freqs_h = Tensor::new(freqs_h, dev)?;
let freqs_w = freqs.iter().skip(1).step_by(2).copied().collect::<Vec<_>>();
let freqs_w = Tensor::new(freqs_w, dev)?;
let h = Tensor::arange(0u32, max_patches_per_side as u32, dev)?.to_dtype(DType::F32)?;
let w = Tensor::arange(0u32, max_patches_per_side as u32, dev)?.to_dtype(DType::F32)?;
let freqs_h = h.unsqueeze(1)?.matmul(&freqs_h.unsqueeze(0)?)?;
let freqs_w = w.unsqueeze(1)?.matmul(&freqs_w.unsqueeze(0)?)?;
let inv_freq = Tensor::cat(
&[
freqs_h.unsqueeze(1)?.repeat((1, max_patches_per_side, 1))?,
freqs_w.unsqueeze(0)?.repeat((max_patches_per_side, 1, 1))?,
],
D::Minus1,
)?
.reshape(((), dim / 2))?;
let cos = inv_freq.cos()?.to_dtype(dtype)?;
let sin = inv_freq.sin()?.to_dtype(dtype)?;
Ok(Self { cos, sin })
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
subsampled_positions: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, _seq_len, _n_embd) = q.dims4()?;
let (cos, sin) = match subsampled_positions {
None => (&self.cos, &self.sin),
Some(pos) => (
&self.cos.index_select(pos, 0)?,
&self.sin.index_select(pos, 0)?,
),
};
let q_embed = candle_nn::rotary_emb::rope(q, cos, sin)?;
let k_embed = candle_nn::rotary_emb::rope(k, cos, sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
pub struct Model {
patch_conv: candle_nn::Conv2d,
ln_pre: RmsNorm,
transformer: Transformer,
patch_positional_embedding: RotaryEmbedding,
max_image_width: u32,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let conv2d_cfg = candle_nn::Conv2dConfig {
stride: cfg.patch_size,
..Default::default()
};
let patch_conv = candle_nn::conv2d_no_bias(
cfg.num_channels,
cfg.hidden_size,
cfg.patch_size,
conv2d_cfg,
vb.pp("patch_conv"),
)?;
let ln_pre = candle_nn::rms_norm(cfg.hidden_size, 1e-5, vb.pp("ln_pre"))?;
let transformer = Transformer::new(cfg, vb.pp("transformer"))?;
let patch_positional_embedding =
RotaryEmbedding::new(cfg, vb.pp("patch_positional_embedding"))?;
let max_image_width = (cfg.image_size / cfg.patch_size) as u32;
Ok(Self {
patch_conv,
ln_pre,
transformer,
patch_positional_embedding,
max_image_width,
})
}
pub fn position_ids_in_meshgrid(
&self,
num_patches_h: usize,
num_patches_w: usize,
device: &Device,
) -> Result<Tensor> {
let idx = Tensor::arange(0, num_patches_h as u32, device)?;
let idy = Tensor::arange(0, num_patches_w as u32, device)?;
let mesh = Tensor::meshgrid(&[idx, idy], false)?;
let ids = (&mesh[0] * (self.max_image_width as f64) + &mesh[1])?.flatten_all()?;
Ok(ids)
}
}
impl Module for Model {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let patch_embeds = xs.apply(&self.patch_conv)?;
let subsampled_positions = Some(self.position_ids_in_meshgrid(
patch_embeds.dim(2)?,
patch_embeds.dim(3)?,
patch_embeds.device(),
)?);
let patch_embeds = patch_embeds.flatten_from(2)?.t()?.apply(&self.ln_pre)?;
self.transformer.forward(
&patch_embeds,
&self.patch_positional_embedding,
subsampled_positions.as_ref(),
None,
)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/pixtral/mod.rs | candle-transformers/src/models/pixtral/mod.rs | //! Pixtral Language-Image Pre-Training
//!
//! Pixtral is an architecture trained for multimodal learning
//! using images paired with text descriptions.
//!
//! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/tree/main/src/transformers/models/pixtral)
//! - 📝 [Blog Post](https://mistral.ai/news/pixtral-12b/)
//! - 🤗 [HF Model Card](https://huggingface.co/mistralai/Pixtral-12B-2409)
//! - 🤗 [HF Community Model Card](https://huggingface.co/mistral-community/pixtral-12b)
//!
//! # Example
//!
//! <div align=center>
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/flux/assets/flux-robot.jpg" alt="" width=320>
//! </div>
//!
//! ```bash
//! cargo run --profile=release-with-debug \
//! --features cuda \
//! --example pixtral -- \
//! --image candle-examples/examples/flux/assets/flux-robot.jpg
//! ```
//!
//! ```txt
//! Describe the image.
//!
//! The image depicts a charming, rustic robot standing on a sandy beach at sunset.
//! The robot has a vintage, steampunk aesthetic with visible gears and mechanical
//! parts. It is holding a small lantern in one hand, which emits a warm glow, and
//! its other arm is extended forward as if reaching out or guiding the way. The
//! robot's body is adorned with the word "RUST" in bright orange letters, adding to
//! its rustic theme.
//!
//! The background features a dramatic sky filled with clouds, illuminated by the
//! setting sun, casting a golden hue over the scene. Gentle waves lap against the
//! shore, creating a serene and picturesque atmosphere. The overall mood of the
//! image is whimsical and nostalgic, evoking a sense of adventure and tranquility.
//! ```
pub mod llava;
pub mod vision_model;
pub use llava::{Config, Model};
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/clip/text_model.rs | candle-transformers/src/models/clip/text_model.rs | //! Contrastive Language-Image Pre-Training
//!
//! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - [GH](https://github.com/openai/CLIP)
//! - [Code](https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip)
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn as nn;
use candle_nn::Module;
use super::EncoderConfig;
#[derive(Debug, Clone, Copy)]
pub enum Activation {
QuickGelu,
}
impl Module for Activation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?,
}
}
}
#[derive(Debug, Clone)]
pub struct ClipTextConfig {
pub vocab_size: usize,
pub embed_dim: usize,
pub activation: Activation,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub pad_with: Option<String>,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
#[allow(dead_code)]
pub projection_dim: usize,
}
impl ClipTextConfig {
// The config details can be found in the "text_config" section of this json file:
// https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json
pub fn vit_base_patch32() -> Self {
Self {
vocab_size: 49408,
embed_dim: 512,
intermediate_size: 2048,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 12,
num_attention_heads: 8,
projection_dim: 512,
activation: Activation::QuickGelu,
}
}
}
// ClipTextEmbeddings mostly based on the existing implementation in the stable diffision model.
// TODO rewrite to be more similar to https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L142
#[derive(Clone, Debug)]
struct ClipTextEmbeddings {
token_embedding: candle_nn::Embedding,
position_embedding: candle_nn::Embedding,
position_ids: Tensor,
}
impl ClipTextEmbeddings {
fn new(vs: candle_nn::VarBuilder, c: &ClipTextConfig) -> Result<Self> {
let token_embedding =
candle_nn::embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?;
let position_embedding: nn::Embedding = candle_nn::embedding(
c.max_position_embeddings,
c.embed_dim,
vs.pp("position_embedding"),
)?;
let position_ids =
Tensor::arange(0u32, c.max_position_embeddings as u32, vs.device())?.unsqueeze(0)?;
Ok(Self {
token_embedding,
position_embedding,
position_ids,
})
}
}
impl Module for ClipTextEmbeddings {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let seq_length = input_ids.dim(D::Minus1)?;
let inputs_embeds = self.token_embedding.forward(input_ids)?;
let position_ids = self.position_ids.narrow(1, 0, seq_length)?;
let position_embedding = self.position_embedding.forward(&position_ids)?;
inputs_embeds.broadcast_add(&position_embedding)
}
}
#[derive(Clone, Debug)]
struct ClipAttention {
k_proj: candle_nn::Linear,
v_proj: candle_nn::Linear,
q_proj: candle_nn::Linear,
out_proj: candle_nn::Linear,
head_dim: usize,
scale: f64,
num_attention_heads: usize,
}
impl ClipAttention {
fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let embed_dim = c.embed_dim();
let num_attention_heads = c.num_attention_heads();
let k_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("k_proj"))?;
let v_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("v_proj"))?;
let q_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("q_proj"))?;
let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?;
let head_dim = embed_dim / num_attention_heads;
let scale = (head_dim as f64).powf(-0.5);
Ok(ClipAttention {
k_proj,
v_proj,
q_proj,
out_proj,
head_dim,
scale,
num_attention_heads,
})
}
fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> {
xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let in_dtype = xs.dtype();
let (bsz, seq_len, embed_dim) = xs.dims3()?;
let query_states = (self.q_proj.forward(xs)? * self.scale)?;
let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim);
let query_states = self
.shape(&query_states, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let key_states = self
.shape(&self.k_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let value_states = self
.shape(&self.v_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?;
let src_len = key_states.dim(1)?;
let attn_weights = if let Some(causal_attention_mask) = causal_attention_mask {
attn_weights
.reshape((bsz, self.num_attention_heads, seq_len, src_len))?
.broadcast_add(causal_attention_mask)?
.reshape((bsz * self.num_attention_heads, seq_len, src_len))?
} else {
attn_weights
};
let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?;
let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?;
let attn_output = attn_output
.reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))?
.transpose(1, 2)?
.reshape((bsz, seq_len, embed_dim))?;
self.out_proj.forward(&attn_output)
}
}
#[derive(Clone, Debug)]
struct ClipMlp {
fc1: candle_nn::Linear,
fc2: candle_nn::Linear,
activation: Activation,
}
impl ClipMlp {
fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let fc1 = candle_nn::linear(c.embed_dim(), c.intermediate_size(), vs.pp("fc1"))?;
let fc2 = candle_nn::linear(c.intermediate_size(), c.embed_dim(), vs.pp("fc2"))?;
Ok(ClipMlp {
fc1,
fc2,
activation: c.activation(),
})
}
}
impl ClipMlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?;
self.fc2.forward(&self.activation.forward(&xs)?)
}
}
#[derive(Clone, Debug)]
struct ClipEncoderLayer {
self_attn: ClipAttention,
layer_norm1: candle_nn::LayerNorm,
mlp: ClipMlp,
layer_norm2: candle_nn::LayerNorm,
}
impl ClipEncoderLayer {
fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let self_attn = ClipAttention::new(vs.pp("self_attn"), c)?;
let layer_norm1 = candle_nn::layer_norm(c.embed_dim(), 1e-5, vs.pp("layer_norm1"))?;
let mlp = ClipMlp::new(vs.pp("mlp"), c)?;
let layer_norm2 = candle_nn::layer_norm(c.embed_dim(), 1e-5, vs.pp("layer_norm2"))?;
Ok(ClipEncoderLayer {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = self.layer_norm1.forward(xs)?;
let xs = self.self_attn.forward(&xs, causal_attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self.layer_norm2.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
xs + residual
}
}
#[derive(Clone, Debug)]
pub struct ClipEncoder {
layers: Vec<ClipEncoderLayer>,
}
impl ClipEncoder {
pub fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let vs = vs.pp("layers");
let mut layers: Vec<ClipEncoderLayer> = Vec::new();
for index in 0..c.num_hidden_layers() {
let layer = ClipEncoderLayer::new(vs.pp(index.to_string()), c)?;
layers.push(layer)
}
Ok(ClipEncoder { layers })
}
pub fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, causal_attention_mask)?;
}
Ok(xs)
}
// required by LLaVA
pub fn output_hidden_states(
&self,
xs: &Tensor,
causal_attention_mask: Option<&Tensor>,
) -> Result<Vec<Tensor>> {
let mut xs = xs.clone();
let mut hidden_states = Vec::new();
for layer in self.layers.iter() {
xs = layer.forward(&xs, causal_attention_mask)?;
hidden_states.push(xs.clone());
}
Ok(hidden_states)
}
}
/// A CLIP transformer based model.
#[derive(Clone, Debug)]
pub struct ClipTextTransformer {
embeddings: ClipTextEmbeddings,
encoder: ClipEncoder,
final_layer_norm: candle_nn::LayerNorm,
}
impl ClipTextTransformer {
pub fn new(vs: candle_nn::VarBuilder, c: &ClipTextConfig) -> Result<Self> {
let embeddings = ClipTextEmbeddings::new(vs.pp("embeddings"), c)?;
let encoder = ClipEncoder::new(vs.pp("encoder"), &EncoderConfig::Text(c.clone()))?;
let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("final_layer_norm"))?;
Ok(ClipTextTransformer {
embeddings,
encoder,
final_layer_norm,
})
}
// TODO: rewrite to newer version
fn build_causal_attention_mask(
bsz: usize,
seq_len: usize,
mask_after: usize,
device: &Device,
) -> Result<Tensor> {
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| {
(0..seq_len).map(move |j| {
if j > i || j > mask_after {
f32::MIN
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?;
mask.broadcast_as((bsz, 1, seq_len, seq_len))
}
pub fn forward_with_mask(&self, input_ids: &Tensor, mask_after: usize) -> Result<Tensor> {
let (bsz, seq_len) = input_ids.dims2()?;
let input_ids = self.embeddings.forward(input_ids)?;
let causal_attention_mask =
Self::build_causal_attention_mask(bsz, seq_len, mask_after, input_ids.device())?;
let input_ids = self
.encoder
.forward(&input_ids, Some(&causal_attention_mask))?;
self.final_layer_norm.forward(&input_ids)
}
}
impl Module for ClipTextTransformer {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let output = self.forward_with_mask(input_ids, usize::MAX)?;
let sequence_max_indices = input_ids.argmax(D::Minus1)?.to_dtype(DType::I64)?;
let mut indices = Vec::new();
for (batch_idx, &seq_idx) in sequence_max_indices.to_vec1::<i64>()?.iter().enumerate() {
let index = output.i((batch_idx, seq_idx as usize))?.unsqueeze(0)?;
indices.push(index);
}
Tensor::cat(&indices, 0)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/clip/vision_model.rs | candle-transformers/src/models/clip/vision_model.rs | //! Contrastive Language-Image Pre-Training
//!
//! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! https://github.com/openai/CLIP
//! https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip
use candle::{Context, IndexOp, Result, Shape, Tensor, D};
use candle_nn as nn;
use candle_nn::Module;
use nn::Conv2dConfig;
use super::{
text_model::{Activation, ClipEncoder},
EncoderConfig,
};
#[derive(Debug, Clone)]
pub struct ClipVisionConfig {
pub embed_dim: usize,
pub activation: Activation,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
#[allow(dead_code)]
pub projection_dim: usize,
pub num_channels: usize,
pub image_size: usize,
pub patch_size: usize,
}
impl ClipVisionConfig {
// The config details can be found in the "vision_config" section of this json file:
// https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json
pub fn vit_base_patch32() -> Self {
Self {
embed_dim: 768,
activation: Activation::QuickGelu,
intermediate_size: 3072,
num_hidden_layers: 12,
num_attention_heads: 12,
projection_dim: 512,
num_channels: 3,
image_size: 224,
patch_size: 32,
}
}
pub fn clip_vit_large_patch14_336() -> Self {
Self {
embed_dim: 1024,
activation: Activation::QuickGelu,
intermediate_size: 4096,
num_hidden_layers: 24,
num_attention_heads: 16,
projection_dim: 768,
num_channels: 3,
image_size: 336,
patch_size: 14,
}
}
}
// https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L112
#[derive(Clone, Debug)]
struct ClipVisionEmbeddings {
patch_embedding: candle_nn::Conv2d,
position_ids: Tensor,
class_embedding: Tensor,
position_embedding: candle_nn::Embedding,
}
impl ClipVisionEmbeddings {
fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> {
// originally nn.Parameter
let class_embedding = if vs.contains_tensor("class_embedding") {
vs.get(c.embed_dim, "class_embedding")?
} else {
Tensor::randn(0f32, 1f32, c.embed_dim, vs.device())?
};
let num_patches = (c.image_size / c.patch_size).pow(2);
let num_positions = num_patches + 1;
let position_ids = Tensor::arange(0, num_positions as i64, vs.device())?;
let conv2dconfig = Conv2dConfig {
stride: c.patch_size,
..Default::default()
};
let position_embedding =
candle_nn::embedding(num_positions, c.embed_dim, vs.pp("position_embedding"))?;
let patch_embedding = candle_nn::conv2d_no_bias(
c.num_channels,
c.embed_dim,
c.patch_size,
conv2dconfig,
vs.pp("patch_embedding"),
)?;
Ok(Self {
patch_embedding,
position_ids,
class_embedding,
position_embedding,
})
}
}
impl Module for ClipVisionEmbeddings {
fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> {
let batch_size = pixel_values.shape().dims();
let patch_embeds = self
.patch_embedding
.forward(pixel_values)?
.flatten_from(2)?
.transpose(1, 2)?;
let shape = Shape::from((batch_size[0], 1, self.class_embedding.dim(D::Minus1)?));
let class_embeds = self.class_embedding.expand(shape)?;
let embeddings = Tensor::cat(&[class_embeds, patch_embeds], 1)?;
let position_embedding = self.position_embedding.forward(&self.position_ids)?;
embeddings.broadcast_add(&position_embedding)
}
}
// https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L743
#[derive(Clone, Debug)]
pub struct ClipVisionTransformer {
embeddings: ClipVisionEmbeddings,
encoder: ClipEncoder,
pre_layer_norm: candle_nn::LayerNorm,
final_layer_norm: candle_nn::LayerNorm,
}
impl ClipVisionTransformer {
pub fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> {
let embeddings = ClipVisionEmbeddings::new(vs.pp("embeddings"), c)?;
let pre_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("pre_layrnorm"))?;
let encoder = ClipEncoder::new(vs.pp("encoder"), &EncoderConfig::Vision(c.clone()))?;
let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("post_layernorm"))?;
Ok(Self {
embeddings,
encoder,
final_layer_norm,
pre_layer_norm,
})
}
// required by LLaVA
pub fn output_hidden_states(&self, pixel_values: &Tensor) -> Result<Vec<Tensor>> {
let hidden_states = pixel_values
.apply(&self.embeddings)?
.apply(&self.pre_layer_norm)?;
let mut result = self.encoder.output_hidden_states(&hidden_states, None)?;
let encoder_outputs = result.last().context("no last")?;
let pooled_output = encoder_outputs.i((.., 0, ..))?;
result.push(self.final_layer_norm.forward(&pooled_output)?.clone());
Ok(result)
}
}
impl Module for ClipVisionTransformer {
fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> {
let hidden_states = pixel_values
.apply(&self.embeddings)?
.apply(&self.pre_layer_norm)?;
let encoder_outputs = self.encoder.forward(&hidden_states, None)?;
// https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L787
// pooled_output = encoder_outputs[:, 0, :]
let pooled_output = encoder_outputs.i((.., 0, ..))?;
self.final_layer_norm.forward(&pooled_output)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/clip/mod.rs | candle-transformers/src/models/clip/mod.rs | //! Contrastive Language-Image Pre-Training
//!
//! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - 💻 [GH Link](https://github.com/openai/CLIP)
//! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip)
//! - 🤗 [HF Model](https://huggingface.co/openai/clip-vit-large-patch14-336)
//!
use self::{
text_model::{Activation, ClipTextTransformer},
vision_model::ClipVisionTransformer,
};
use candle::{Result, Tensor, D};
pub mod text_model;
pub mod vision_model;
#[derive(Clone, Debug)]
pub struct ClipModel {
text_model: ClipTextTransformer,
vision_model: ClipVisionTransformer,
visual_projection: candle_nn::Linear,
text_projection: candle_nn::Linear,
logit_scale: Tensor,
}
#[derive(Clone, Debug)]
pub enum EncoderConfig {
Text(text_model::ClipTextConfig),
Vision(vision_model::ClipVisionConfig),
}
impl EncoderConfig {
pub fn embed_dim(&self) -> usize {
match self {
Self::Text(c) => c.embed_dim,
Self::Vision(c) => c.embed_dim,
}
}
pub fn num_attention_heads(&self) -> usize {
match self {
Self::Text(c) => c.num_attention_heads,
Self::Vision(c) => c.num_attention_heads,
}
}
pub fn intermediate_size(&self) -> usize {
match self {
Self::Text(c) => c.intermediate_size,
Self::Vision(c) => c.intermediate_size,
}
}
pub fn num_hidden_layers(&self) -> usize {
match self {
Self::Text(c) => c.num_hidden_layers,
Self::Vision(c) => c.num_hidden_layers,
}
}
pub fn activation(&self) -> Activation {
match self {
Self::Text(_c) => Activation::QuickGelu,
Self::Vision(c) => c.activation,
}
}
}
#[derive(Clone, Debug)]
pub struct ClipConfig {
pub text_config: text_model::ClipTextConfig,
pub vision_config: vision_model::ClipVisionConfig,
pub logit_scale_init_value: f32,
pub image_size: usize,
}
impl ClipConfig {
// base image size is 224, model size is 600Mb
pub fn vit_base_patch32() -> Self {
let text_config = text_model::ClipTextConfig::vit_base_patch32();
let vision_config = vision_model::ClipVisionConfig::vit_base_patch32();
Self {
text_config,
vision_config,
logit_scale_init_value: 2.6592,
image_size: 224,
}
}
}
impl ClipModel {
pub fn new(vs: candle_nn::VarBuilder, c: &ClipConfig) -> Result<Self> {
let text_model = ClipTextTransformer::new(vs.pp("text_model"), &c.text_config)?;
let vision_model = ClipVisionTransformer::new(vs.pp("vision_model"), &c.vision_config)?;
let visual_projection = candle_nn::linear_no_bias(
c.vision_config.embed_dim,
c.vision_config.projection_dim,
vs.pp("visual_projection"),
)?;
let text_projection = candle_nn::linear_no_bias(
c.text_config.embed_dim,
c.text_config.projection_dim,
vs.pp("text_projection"),
)?;
// originally nn.Parameter
let logit_scale = if vs.contains_tensor("logit_scale") {
vs.get(&[], "logit_scale")?
} else {
Tensor::new(&[c.logit_scale_init_value], vs.device())?
};
Ok(Self {
text_model,
vision_model,
visual_projection,
text_projection,
logit_scale,
})
}
pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> {
input_ids
.apply(&self.text_model)?
.apply(&self.text_projection)
}
pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> {
pixel_values
.apply(&self.vision_model)?
.apply(&self.visual_projection)
}
pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> {
let image_features = self.get_image_features(pixel_values)?;
let text_features = self.get_text_features(input_ids)?;
let image_features_normalized = div_l2_norm(&image_features)?;
let text_features_normalized = div_l2_norm(&text_features)?;
let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?;
let logit_scale = self.logit_scale.exp()?;
let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?;
let logits_per_image = logits_per_text.t()?;
Ok((logits_per_text, logits_per_image))
}
}
pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> {
let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?;
v.broadcast_div(&l2_norm)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segment_anything/mask_decoder.rs | candle-transformers/src/models/segment_anything/mask_decoder.rs | use candle::{IndexOp, Result, Tensor};
use candle_nn::{Module, VarBuilder};
use super::transformer::TwoWayTransformer;
#[derive(Debug)]
struct MlpMaskDecoder {
layers: Vec<super::Linear>,
sigmoid_output: bool,
span: tracing::Span,
}
impl MlpMaskDecoder {
fn new(
input_dim: usize,
hidden_dim: usize,
output_dim: usize,
num_layers: usize,
sigmoid_output: bool,
vb: VarBuilder,
) -> Result<Self> {
let mut layers = Vec::with_capacity(num_layers);
let vb = vb.pp("layers");
for i in 0..num_layers {
let in_dim = if i == 0 { input_dim } else { hidden_dim };
let out_dim = if i + 1 == num_layers {
output_dim
} else {
hidden_dim
};
let layer = super::linear(vb.pp(i), in_dim, out_dim, true)?;
layers.push(layer)
}
let span = tracing::span!(tracing::Level::TRACE, "mlp-mask-decoder");
Ok(Self {
layers,
sigmoid_output,
span,
})
}
}
impl Module for MlpMaskDecoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for (i, layer) in self.layers.iter().enumerate() {
xs = layer.forward(&xs)?;
if i + 1 < self.layers.len() {
xs = xs.relu()?
}
}
if self.sigmoid_output {
candle_nn::ops::sigmoid(&xs)
} else {
Ok(xs)
}
}
}
#[derive(Debug)]
pub struct MaskDecoder {
iou_token: candle_nn::Embedding,
mask_tokens: candle_nn::Embedding,
iou_prediction_head: MlpMaskDecoder,
output_upscaling_conv1: candle_nn::ConvTranspose2d,
output_upscaling_ln: super::LayerNorm2d,
output_upscaling_conv2: candle_nn::ConvTranspose2d,
num_mask_tokens: usize,
output_hypernetworks_mlps: Vec<MlpMaskDecoder>,
transformer: TwoWayTransformer,
span: tracing::Span,
}
impl MaskDecoder {
pub fn new(
transformer_dim: usize,
num_multimask_outputs: usize,
iou_head_depth: usize,
iou_head_hidden_dim: usize,
vb: VarBuilder,
) -> Result<Self> {
let num_mask_tokens = num_multimask_outputs + 1;
let iou_prediction_head = MlpMaskDecoder::new(
transformer_dim,
iou_head_hidden_dim,
num_mask_tokens,
iou_head_depth,
false,
vb.pp("iou_prediction_head"),
)?;
let iou_token = candle_nn::embedding(1, transformer_dim, vb.pp("iou_token"))?;
let mask_tokens =
candle_nn::embedding(num_mask_tokens, transformer_dim, vb.pp("mask_tokens"))?;
let cfg = candle_nn::ConvTranspose2dConfig {
stride: 2,
..Default::default()
};
let output_upscaling_conv1 = candle_nn::conv_transpose2d(
transformer_dim,
transformer_dim / 4,
2,
cfg,
vb.pp("output_upscaling.0"),
)?;
let output_upscaling_ln =
super::LayerNorm2d::new(transformer_dim / 4, 1e-6, vb.pp("output_upscaling.1"))?;
let output_upscaling_conv2 = candle_nn::conv_transpose2d(
transformer_dim / 4,
transformer_dim / 8,
2,
cfg,
vb.pp("output_upscaling.3"),
)?;
let mut output_hypernetworks_mlps = Vec::with_capacity(num_mask_tokens);
let vb_o = vb.pp("output_hypernetworks_mlps");
for i in 0..num_mask_tokens {
let mlp = MlpMaskDecoder::new(
transformer_dim,
transformer_dim,
transformer_dim / 8,
3,
false,
vb_o.pp(i),
)?;
output_hypernetworks_mlps.push(mlp)
}
let transformer = TwoWayTransformer::new(
/* depth */ 2,
/* embedding_dim */ transformer_dim,
/* num_heads */ 8,
/* mlp_dim */ 2048,
vb.pp("transformer"),
)?;
let span = tracing::span!(tracing::Level::TRACE, "mask-decoder");
Ok(Self {
iou_token,
mask_tokens,
iou_prediction_head,
output_upscaling_conv1,
output_upscaling_ln,
output_upscaling_conv2,
num_mask_tokens,
output_hypernetworks_mlps,
transformer,
span,
})
}
pub fn forward(
&self,
image_embeddings: &Tensor,
image_pe: &Tensor,
sparse_prompt_embeddings: &Tensor,
dense_prompt_embeddings: &Tensor,
multimask_output: bool,
) -> Result<(Tensor, Tensor)> {
let _enter = self.span.enter();
let (masks, iou_pred) = self.predict_masks(
image_embeddings,
image_pe,
sparse_prompt_embeddings,
dense_prompt_embeddings,
)?;
let masks = if multimask_output {
masks.i((.., 1..))?
} else {
masks.i((.., 0..1))?
};
let iou_pred = if multimask_output {
iou_pred.i((.., 1..))?
} else {
iou_pred.i((.., 0..1))?
};
Ok((masks, iou_pred))
}
fn predict_masks(
&self,
image_embeddings: &Tensor,
image_pe: &Tensor,
sparse_prompt_embeddings: &Tensor,
dense_prompt_embeddings: &Tensor,
) -> Result<(Tensor, Tensor)> {
// Concatenate output tokens.
let output_tokens = Tensor::cat(
&[self.iou_token.embeddings(), self.mask_tokens.embeddings()],
0,
)?;
let (d1, d2) = output_tokens.dims2()?;
let output_tokens =
output_tokens
.unsqueeze(0)?
.expand((sparse_prompt_embeddings.dim(0)?, d1, d2))?;
let tokens = Tensor::cat(&[&output_tokens, sparse_prompt_embeddings], 1)?;
// Expand per-image data in batch direction to be per mask
let src = repeat_interleave(image_embeddings, tokens.dim(0)?, 0)?;
let src = src.broadcast_add(dense_prompt_embeddings)?;
let pos_src = repeat_interleave(image_pe, tokens.dim(0)?, 0)?;
let (b, c, h, w) = src.dims4()?;
// Run the transformer
let (hs, src) = self.transformer.forward(&src, &pos_src, &tokens)?;
let iou_token_out = hs.i((.., 0))?;
let mask_tokens_out = hs.i((.., 1..1 + self.num_mask_tokens))?;
// Upscale mask embeddings and predict masks using the masks tokens.
let src = src.transpose(1, 2)?.reshape((b, c, h, w))?;
let upscaled_embedding = self
.output_upscaling_conv1
.forward(&src)?
.apply(&self.output_upscaling_ln)?
.gelu()?
.apply(&self.output_upscaling_conv2)?
.gelu()?;
let mut hyper_in_list = Vec::with_capacity(self.num_mask_tokens);
for (i, mlp) in self.output_hypernetworks_mlps.iter().enumerate() {
let h = mlp.forward(&mask_tokens_out.i((.., i))?)?;
hyper_in_list.push(h)
}
let hyper_in = Tensor::stack(hyper_in_list.as_slice(), 1)?.contiguous()?;
let (b, c, h, w) = upscaled_embedding.dims4()?;
let masks = hyper_in.matmul(&upscaled_embedding.reshape((b, c, h * w))?)?;
let masks = masks.reshape((b, (), h, w))?;
// Generate mask quality predictions.
let iou_pred = self.iou_prediction_head.forward(&iou_token_out)?;
Ok((masks, iou_pred))
}
}
// Equivalent to torch.repeat_interleave
fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> {
let img = img.unsqueeze(dim + 1)?;
let mut dims = img.dims().to_vec();
dims[dim + 1] = repeats;
img.broadcast_as(dims)?.flatten(dim, dim + 1)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segment_anything/tiny_vit.rs | candle-transformers/src/models/segment_anything/tiny_vit.rs | // Adapted from:
// https://github.com/ChaoningZhang/MobileSAM/blob/master/mobile_sam/modeling/tiny_vit_sam.py
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{Conv2dConfig, Module, VarBuilder};
const MBCONV_EXPAND_RATIO: usize = 4;
const MLP_RATIO: usize = 4;
const LOCAL_CONV_SIZE: usize = 3;
const IMG_SIZE: usize = 1024;
const IN_CHANNELS: usize = 3;
#[derive(Debug)]
struct Conv2dBN {
c: candle_nn::Conv2d,
bn: candle_nn::BatchNorm,
span: tracing::Span,
}
impl Conv2dBN {
fn new(in_: usize, out: usize, ks: usize, cfg: Conv2dConfig, vb: VarBuilder) -> Result<Self> {
let c = candle_nn::conv2d_no_bias(in_, out, ks, cfg, vb.pp("c"))?;
let bn = candle_nn::batch_norm(out, 1e-5, vb.pp("bn"))?;
let span = tracing::span!(tracing::Level::TRACE, "conv2d-bn");
Ok(Self { c, bn, span })
}
}
impl Module for Conv2dBN {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.c)?.apply_t(&self.bn, false)
}
}
#[derive(Debug)]
struct PatchEmbed {
conv1: Conv2dBN,
conv2: Conv2dBN,
span: tracing::Span,
}
impl PatchEmbed {
fn new(in_chans: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
stride: 2,
padding: 1,
..Default::default()
};
let conv1 = Conv2dBN::new(in_chans, embed_dim / 2, 3, cfg, vb.pp("seq.0"))?;
let conv2 = Conv2dBN::new(embed_dim / 2, embed_dim, 3, cfg, vb.pp("seq.2"))?;
let span = tracing::span!(tracing::Level::TRACE, "patch-embed");
Ok(Self { conv1, conv2, span })
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.conv1)?.gelu()?.apply(&self.conv2)
}
}
#[derive(Debug)]
struct MBConv {
conv1: Conv2dBN,
conv2: Conv2dBN,
conv3: Conv2dBN,
span: tracing::Span,
}
impl MBConv {
fn new(in_: usize, out: usize, expand_ratio: usize, vb: VarBuilder) -> Result<Self> {
let hidden = in_ * expand_ratio;
let cfg2 = candle_nn::Conv2dConfig {
padding: 1,
groups: hidden,
..Default::default()
};
let conv1 = Conv2dBN::new(in_, hidden, 1, Default::default(), vb.pp("conv1"))?;
let conv2 = Conv2dBN::new(hidden, hidden, 3, cfg2, vb.pp("conv2"))?;
let conv3 = Conv2dBN::new(hidden, out, 1, Default::default(), vb.pp("conv3"))?;
let span = tracing::span!(tracing::Level::TRACE, "mb-conv");
Ok(Self {
conv1,
conv2,
conv3,
span,
})
}
}
impl Module for MBConv {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let shortcut = xs;
let xs = xs
.apply(&self.conv1)?
.gelu()?
.apply(&self.conv2)?
.gelu()?
.apply(&self.conv3)?;
(xs + shortcut)?.gelu()
}
}
#[derive(Debug)]
struct PatchMerging {
conv1: Conv2dBN,
conv2: Conv2dBN,
conv3: Conv2dBN,
input_resolution: (usize, usize),
span: tracing::Span,
}
impl PatchMerging {
fn new(
input_resolution: (usize, usize),
dim: usize,
out: usize,
vb: VarBuilder,
) -> Result<Self> {
let stride = if [320, 448, 576].contains(&out) { 1 } else { 2 };
let cfg2 = candle_nn::Conv2dConfig {
padding: 1,
stride,
groups: out,
..Default::default()
};
let conv1 = Conv2dBN::new(dim, out, 1, Default::default(), vb.pp("conv1"))?;
let conv2 = Conv2dBN::new(out, out, 3, cfg2, vb.pp("conv2"))?;
let conv3 = Conv2dBN::new(out, out, 1, Default::default(), vb.pp("conv3"))?;
let span = tracing::span!(tracing::Level::TRACE, "patch-merging");
Ok(Self {
conv1,
conv2,
conv3,
input_resolution,
span,
})
}
}
impl Module for PatchMerging {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = if xs.rank() == 3 {
let (h, w) = self.input_resolution;
let b = xs.dim(0)?;
xs.reshape((b, h, w, ()))?.permute((0, 3, 1, 2))?
} else {
xs.clone()
};
xs.apply(&self.conv1)?
.gelu()?
.apply(&self.conv2)?
.gelu()?
.apply(&self.conv3)?
.flatten_from(2)?
.transpose(1, 2)
}
}
#[derive(Debug)]
struct ConvLayer {
blocks: Vec<MBConv>,
downsample: Option<PatchMerging>,
span: tracing::Span,
}
impl ConvLayer {
fn new(
dim: usize,
out: usize,
input_resolution: (usize, usize),
depth: usize,
downsample: bool,
conv_expand_ratio: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let block = MBConv::new(dim, dim, conv_expand_ratio, vb_b.pp(index))?;
blocks.push(block)
}
let downsample = if downsample {
let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?;
Some(downsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "conv-layer");
Ok(Self {
blocks,
downsample,
span,
})
}
}
impl Module for ConvLayer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
match &self.downsample {
None => Ok(xs),
Some(downsample) => downsample.forward(&xs),
}
}
}
#[derive(Debug)]
struct Mlp {
norm: candle_nn::LayerNorm,
fc1: super::Linear,
fc2: super::Linear,
span: tracing::Span,
}
impl Mlp {
fn new(in_: usize, hidden: usize, vb: VarBuilder) -> Result<Self> {
let norm = candle_nn::layer_norm(in_, 1e-5, vb.pp("norm"))?;
let fc1 = super::linear(vb.pp("fc1"), in_, hidden, true)?;
let fc2 = super::linear(vb.pp("fc2"), hidden, in_, true)?;
let span = tracing::span!(tracing::Level::TRACE, "mlp");
Ok(Self {
norm,
fc1,
fc2,
span,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.norm)?
.apply(&self.fc1)?
.gelu()?
.apply(&self.fc2)
}
}
#[derive(Debug)]
struct Attention {
norm: candle_nn::LayerNorm,
qkv: super::Linear,
proj: super::Linear,
ab: Tensor,
key_dim: usize,
num_heads: usize,
d: usize,
dh: usize,
scale: f64,
span: tracing::Span,
span_matmul: tracing::Span,
span_softmax: tracing::Span,
}
impl Attention {
fn new(
dim: usize,
key_dim: usize,
num_heads: usize,
attn_ratio: usize,
resolution: (usize, usize),
vb: VarBuilder,
) -> Result<Self> {
let d = attn_ratio * key_dim;
let dh = d * num_heads;
let nh_kd = key_dim * num_heads;
let h = dh + nh_kd * 2;
let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?;
let qkv = super::linear(vb.pp("qkv"), dim, h, true)?;
let proj = super::linear(vb.pp("proj"), dh, dim, true)?;
let points = (0..resolution.0)
.flat_map(|x| (0..resolution.1).map(move |y| (x as i64, y as i64)))
.collect::<Vec<_>>();
let mut idxs = Vec::with_capacity(points.len() * points.len());
let mut attention_offsets = std::collections::HashMap::new();
for &(x1, y1) in points.iter() {
for &(x2, y2) in points.iter() {
let offset = ((x2 - x1).abs(), (y2 - y1).abs());
let l = attention_offsets.len();
let idx = attention_offsets.entry(offset).or_insert(l);
idxs.push(*idx as u32)
}
}
let attention_biases = vb.get((num_heads, attention_offsets.len()), "attention_biases")?;
let idxs = Tensor::new(idxs, attention_biases.device())?;
let ab =
attention_biases
.index_select(&idxs, 1)?
.reshape(((), points.len(), points.len()))?;
let span = tracing::span!(tracing::Level::TRACE, "attention");
let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul");
let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm");
Ok(Self {
norm,
qkv,
proj,
ab,
key_dim,
num_heads,
d,
dh,
scale: 1f64 / (key_dim as f64).sqrt(),
span,
span_matmul,
span_softmax,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, n, _) = xs.dims3()?;
let xs = xs.apply(&self.norm)?;
let qkv = xs.apply(&self.qkv)?.reshape((b, n, self.num_heads, ()))?;
let q = qkv
.narrow(D::Minus1, 0, self.key_dim)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let k = qkv
.narrow(D::Minus1, self.key_dim, self.key_dim)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let v = qkv
.narrow(D::Minus1, 2 * self.key_dim, self.d)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let attn = {
let _enter = self.span_matmul.enter();
(q.matmul(&k.t()?)? * self.scale)?
};
let attn = attn.broadcast_add(&self.ab)?;
let attn = {
let _enter = self.span_softmax.enter();
candle_nn::ops::softmax_last_dim(&attn)?
};
let attn = {
let _enter = self.span_matmul.enter();
attn.matmul(&v)?
};
attn.transpose(1, 2)?
.reshape((b, n, self.dh))?
.apply(&self.proj)
}
}
#[derive(Debug)]
struct TinyViTBlock {
attn: Attention,
local_conv: Conv2dBN,
mlp: Mlp,
window_size: usize,
input_resolution: (usize, usize),
span: tracing::Span,
}
impl TinyViTBlock {
fn new(
dim: usize,
input_resolution: (usize, usize),
num_heads: usize,
window_size: usize,
vb: VarBuilder,
) -> Result<Self> {
let head_dim = dim / num_heads;
let attn = Attention::new(
dim,
head_dim,
num_heads,
1,
(window_size, window_size),
vb.pp("attn"),
)?;
let mlp = Mlp::new(dim, dim * MLP_RATIO, vb.pp("mlp"))?;
let cfg = candle_nn::Conv2dConfig {
padding: LOCAL_CONV_SIZE / 2,
groups: dim,
..Default::default()
};
let local_conv = Conv2dBN::new(dim, dim, LOCAL_CONV_SIZE, cfg, vb.pp("local_conv"))?;
let span = tracing::span!(tracing::Level::TRACE, "attention");
Ok(Self {
attn,
local_conv,
mlp,
window_size,
input_resolution,
span,
})
}
}
impl Module for TinyViTBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (h, w) = self.input_resolution;
let (b, l, c) = xs.dims3()?;
let res_x = xs;
let xs = if h == self.window_size && w == self.window_size {
self.attn.forward(xs)?
} else {
let xs = xs.reshape((b, h, w, c))?;
let pad_b = (self.window_size - h % self.window_size) % self.window_size;
let pad_r = (self.window_size - w % self.window_size) % self.window_size;
let xs = if pad_b > 0 {
xs.pad_with_zeros(1, 0, pad_b)?
} else {
xs
};
let xs = if pad_r > 0 {
xs.pad_with_zeros(2, 0, pad_r)?
} else {
xs
};
let (p_h, p_w) = (h + pad_b, w + pad_r);
let n_h = p_h / self.window_size;
let n_w = p_w / self.window_size;
let xs = xs
.reshape((b, n_h, self.window_size, n_w, self.window_size, c))?
.transpose(2, 3)?
.reshape((b * n_h * n_w, self.window_size * self.window_size, c))?;
let xs = self.attn.forward(&xs)?;
let xs = xs
.reshape((b, n_h, n_w, self.window_size, self.window_size, c))?
.transpose(2, 3)?
.reshape((b, p_h, p_w, c))?;
let xs = if pad_r > 0 {
xs.i((.., .., ..w))?.contiguous()?
} else {
xs
};
let xs = if pad_b > 0 {
xs.i((.., ..h, ..))?.contiguous()?
} else {
xs
};
xs.reshape((b, l, c))?
};
let xs = (xs + res_x)?;
let xs = xs
.transpose(1, 2)?
.reshape((b, c, h, w))?
.apply(&self.local_conv)?
.reshape((b, c, l))?
.transpose(1, 2)?;
&xs + self.mlp.forward(&xs)?
}
}
#[derive(Debug)]
struct BasicLayer {
blocks: Vec<TinyViTBlock>,
downsample: Option<PatchMerging>,
span: tracing::Span,
}
impl BasicLayer {
#[allow(clippy::too_many_arguments)]
fn new(
dim: usize,
input_resolution: (usize, usize),
depth: usize,
num_heads: usize,
window_size: usize,
downsample: bool,
out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let block = TinyViTBlock::new(
dim,
input_resolution,
num_heads,
window_size,
vb_b.pp(index),
)?;
blocks.push(block)
}
let downsample = if downsample {
let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?;
Some(downsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "basic-layer");
Ok(Self {
blocks,
downsample,
span,
})
}
}
impl Module for BasicLayer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
match &self.downsample {
None => Ok(xs),
Some(downsample) => downsample.forward(&xs),
}
}
}
#[derive(Debug)]
pub struct TinyViT {
patch_embed: PatchEmbed,
layer0: ConvLayer,
layers: Vec<BasicLayer>,
// norm_head: candle_nn::LayerNorm,
// head: candle_nn::Linear,
neck_conv1: candle_nn::Conv2d,
neck_ln1: super::LayerNorm2d,
neck_conv2: candle_nn::Conv2d,
neck_ln2: super::LayerNorm2d,
span: tracing::Span,
span_neck: tracing::Span,
}
impl TinyViT {
pub fn new(
embed_dims: &[usize],
depths: &[usize],
num_heads: &[usize],
window_sizes: &[usize],
_num_classes: usize,
vb: VarBuilder,
) -> Result<Self> {
let patch_embed = PatchEmbed::new(IN_CHANNELS, embed_dims[0], vb.pp("patch_embed"))?;
let patches_resolution = IMG_SIZE / 4;
let vb_l = vb.pp("layers");
let layer0 = ConvLayer::new(
/* dim */ embed_dims[0],
/* out */ embed_dims[1],
/* input_resolution */ (patches_resolution, patches_resolution),
/* depth */ depths[0],
/* downsample */ true,
/* conv_expand_ratio */ MBCONV_EXPAND_RATIO,
vb_l.pp(0),
)?;
let num_layers = embed_dims.len();
let mut layers = Vec::with_capacity(num_layers - 1);
for i_layer in 1..num_layers {
let patches_resolution = patches_resolution / (1 << usize::min(i_layer, 2));
let layer = BasicLayer::new(
/* dim */ embed_dims[i_layer],
/* input_resolution */ (patches_resolution, patches_resolution),
/* depth */ depths[i_layer],
/* num_heads */ num_heads[i_layer],
/* window_size */ window_sizes[i_layer],
/* downsample */ i_layer < num_layers - 1,
/* out */ embed_dims[usize::min(i_layer + 1, num_layers - 1)],
vb_l.pp(i_layer),
)?;
layers.push(layer)
}
let last_embed_dim = embed_dims[embed_dims.len() - 1];
// let norm_head = candle_nn::layer_norm(last_embed_dim, 1e-5, vb.pp("norm_head"))?;
// let head = candle_nn::linear(last_embed_dim, num_classes, vb.pp("head"))?;
let neck_conv1 =
candle_nn::conv2d_no_bias(last_embed_dim, 256, 1, Default::default(), vb.pp("neck.0"))?;
let neck_ln1 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.1"))?;
let cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let neck_conv2 = candle_nn::conv2d_no_bias(256, 256, 3, cfg, vb.pp("neck.2"))?;
let neck_ln2 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.3"))?;
let span = tracing::span!(tracing::Level::TRACE, "tiny-vit");
let span_neck = tracing::span!(tracing::Level::TRACE, "neck");
Ok(Self {
patch_embed,
layer0,
layers,
neck_conv1,
neck_ln1,
neck_conv2,
neck_ln2,
span,
span_neck,
})
}
}
impl Module for TinyViT {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.patch_embed.forward(xs)?;
let mut xs = self.layer0.forward(&xs)?;
for layer in self.layers.iter() {
xs = layer.forward(&xs)?
}
let (b, _, c) = xs.dims3()?;
let _enter = self.span_neck.enter();
xs.reshape((b, 64, 64, c))?
.permute((0, 3, 1, 2))?
.apply(&self.neck_conv1)?
.apply(&self.neck_ln1)?
.apply(&self.neck_conv2)?
.apply(&self.neck_ln2)
}
}
pub fn tiny_vit_5m(vb: VarBuilder) -> Result<TinyViT> {
TinyViT::new(
/* embed_dims */ &[64, 128, 160, 320],
/* depths */ &[2, 2, 6, 2],
/* num_heads */ &[2, 4, 5, 10],
/* window_sizes */ &[7, 7, 14, 7],
/* num_classes */ 1000,
vb,
)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segment_anything/mod.rs | candle-transformers/src/models/segment_anything/mod.rs | //! Segment Anything Model (SAM)
//!
//! SAM is an architecture for image segmentation, capable of segmenting any object
//! in an image based on prompts like points or boxes. //! This model provides a robust and fast image segmentation pipeline that can be tweaked via
//! some prompting (requesting some points to be in the target mask, requesting some
//! points to be part of the background so _not_ in the target mask, specifying some
//! bounding box).
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/candle-segment-anything-wasm)
//! - 💻 [GH Link](https://github.com/facebookresearch/segment-anything)
//! - 📝 [Paper](https://arxiv.org/abs/2304.02643)
//! - 💡 The default backbone can be replaced by the smaller and faster TinyViT model based on [MobileSAM](https://github.com/ChaoningZhang/MobileSAM).
//!
//!
//! ## Example
//!
//! ```bash
//! cargo run --example segment-anything --release -- \
//! --image candle-examples/examples/yolo-v8/assets/bike.jpg
//! --use-tiny --point 0.6,0.6 --point 0.6,0.55
//! ```
//!
//! <div align=center style="display: flex; justify-content: center; gap: 10px;">
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width="30%">
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/segment-anything/assets/single_pt_prompt.jpg" alt="" width="30%">
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/segment-anything/assets/two_pt_prompt.jpg" alt="" width="30%">
//! </div>
//!
//!
//! > Original; Prompt with `--point 0.6,0.55`; Prompt with `--point 0.6,0.6 --point 0.6,0.55`
//!
pub use crate::models::with_tracing::Linear;
use candle::{Result, Tensor};
use candle_nn::{Module, VarBuilder};
pub mod image_encoder;
pub mod mask_decoder;
pub mod prompt_encoder;
pub mod sam;
pub mod tiny_vit;
pub mod transformer;
pub fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
if bias {
crate::models::with_tracing::linear(in_dim, out_dim, vb)
} else {
crate::models::with_tracing::linear_no_bias(in_dim, out_dim, vb)
}
}
#[derive(Debug)]
pub struct LayerNorm2d {
weight: Tensor,
bias: Tensor,
num_channels: usize,
eps: f64,
}
impl LayerNorm2d {
pub fn new(num_channels: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(num_channels, "weight")?;
let bias = vb.get(num_channels, "bias")?;
Ok(Self {
weight,
bias,
num_channels,
eps,
})
}
}
impl Module for LayerNorm2d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let u = xs.mean_keepdim(1)?;
let xs = xs.broadcast_sub(&u)?;
let s = xs.sqr()?.mean_keepdim(1)?;
let xs = xs.broadcast_div(&(s + self.eps)?.sqrt()?)?;
xs.broadcast_mul(&self.weight.reshape((1, self.num_channels, 1, 1))?)?
.broadcast_add(&self.bias.reshape((1, self.num_channels, 1, 1))?)
}
}
#[derive(Debug)]
pub struct MlpBlock {
lin1: Linear,
lin2: Linear,
activation: candle_nn::Activation,
span: tracing::Span,
}
impl MlpBlock {
pub fn new(
embedding_dim: usize,
mlp_dim: usize,
activation: candle_nn::Activation,
vb: VarBuilder,
) -> Result<Self> {
let lin1 = linear(vb.pp("lin1"), embedding_dim, mlp_dim, true)?;
let lin2 = linear(vb.pp("lin2"), mlp_dim, embedding_dim, true)?;
let span = tracing::span!(tracing::Level::TRACE, "mlp-block");
Ok(Self {
lin1,
lin2,
activation,
span,
})
}
}
impl Module for MlpBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.lin1)?
.apply(&self.activation)?
.apply(&self.lin2)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segment_anything/sam.rs | candle-transformers/src/models/segment_anything/sam.rs | use candle::{DType, IndexOp, Result, Tensor};
use candle_nn::{Module, VarBuilder};
use super::image_encoder::ImageEncoderViT;
use super::mask_decoder::MaskDecoder;
use super::prompt_encoder::PromptEncoder;
use super::tiny_vit::{tiny_vit_5m, TinyViT};
const PROMPT_EMBED_DIM: usize = 256;
pub const IMAGE_SIZE: usize = 1024;
const VIT_PATCH_SIZE: usize = 16;
const PRED_IOU_THRESH: f32 = 0.88;
const STABILITY_SCORE_OFFSET: f32 = 1.0;
const STABILITY_SCORE_THRESHOLD: f32 = 0.95;
const MODEL_MASK_THRESHOLD: f32 = 0.0;
const CROP_NMS_THRESH: f32 = 0.7;
#[derive(Debug)]
enum ImageEncoder {
Original(Box<ImageEncoderViT>),
TinyViT(Box<TinyViT>),
}
impl Module for ImageEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Original(vit) => vit.forward(xs),
Self::TinyViT(vit) => vit.forward(xs),
}
}
}
#[derive(Debug)]
pub struct Sam {
image_encoder: ImageEncoder,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: Tensor,
pixel_std: Tensor,
}
impl Sam {
pub fn new(
encoder_embed_dim: usize,
encoder_depth: usize,
encoder_num_heads: usize,
encoder_global_attn_indexes: &[usize],
vb: VarBuilder,
) -> Result<Self> {
let image_embedding_size = IMAGE_SIZE / VIT_PATCH_SIZE;
let image_encoder = ImageEncoderViT::new(
IMAGE_SIZE,
VIT_PATCH_SIZE,
3,
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
PROMPT_EMBED_DIM,
/* qkv_bias */ true,
/* use_rel_pos */ true,
/* use_abs_pos */ true,
/* window_size */ 14,
/* global_attn_indexes */ encoder_global_attn_indexes,
vb.pp("image_encoder"),
)?;
let prompt_encoder = PromptEncoder::new(
PROMPT_EMBED_DIM,
(image_embedding_size, image_embedding_size),
(IMAGE_SIZE, IMAGE_SIZE),
16,
vb.pp("prompt_encoder"),
)?;
let mask_decoder = MaskDecoder::new(
PROMPT_EMBED_DIM,
/* num_multitask_outputs */ 3,
/* iou_head_depth */ 3,
/* iou_head_hidden_dim */ 256,
vb.pp("mask_decoder"),
)?;
let pixel_mean =
Tensor::new(&[123.675f32, 116.28, 103.53], vb.device())?.reshape((3, 1, 1))?;
let pixel_std =
Tensor::new(&[58.395f32, 57.12, 57.375], vb.device())?.reshape((3, 1, 1))?;
Ok(Self {
image_encoder: ImageEncoder::Original(image_encoder.into()),
prompt_encoder,
mask_decoder,
pixel_std,
pixel_mean,
})
}
pub fn new_tiny(vb: VarBuilder) -> Result<Self> {
let image_embedding_size = IMAGE_SIZE / VIT_PATCH_SIZE;
let image_encoder = tiny_vit_5m(vb.pp("image_encoder"))?;
let prompt_encoder = PromptEncoder::new(
PROMPT_EMBED_DIM,
(image_embedding_size, image_embedding_size),
(IMAGE_SIZE, IMAGE_SIZE),
16,
vb.pp("prompt_encoder"),
)?;
let mask_decoder = MaskDecoder::new(
PROMPT_EMBED_DIM,
/* num_multitask_outputs */ 3,
/* iou_head_depth */ 3,
/* iou_head_hidden_dim */ 256,
vb.pp("mask_decoder"),
)?;
let pixel_mean =
Tensor::new(&[123.675f32, 116.28, 103.53], vb.device())?.reshape((3, 1, 1))?;
let pixel_std =
Tensor::new(&[58.395f32, 57.12, 57.375], vb.device())?.reshape((3, 1, 1))?;
Ok(Self {
image_encoder: ImageEncoder::TinyViT(image_encoder.into()),
prompt_encoder,
mask_decoder,
pixel_std,
pixel_mean,
})
}
pub fn embeddings(&self, img: &Tensor) -> Result<Tensor> {
let img = self.preprocess(img)?.unsqueeze(0)?;
self.image_encoder.forward(&img)
}
pub fn forward(
&self,
img: &Tensor,
points: &[(f64, f64, bool)],
multimask_output: bool,
) -> Result<(Tensor, Tensor)> {
let (_c, original_h, original_w) = img.dims3()?;
let img = self.preprocess(img)?.unsqueeze(0)?;
let img_embeddings = self.image_encoder.forward(&img)?;
let (low_res_mask, iou) = self.forward_for_embeddings(
&img_embeddings,
original_h,
original_w,
points,
multimask_output,
)?;
let mask = low_res_mask
.upsample_nearest2d(IMAGE_SIZE, IMAGE_SIZE)?
.get(0)?
.i((.., ..original_h, ..original_w))?;
Ok((mask, iou))
}
/// Generate the mask and IOU predictions from some image embeddings and prompt.
///
/// The prompt is specified as a list of points `(x, y, b)`. `x` and `y` are the point
/// coordinates (between 0 and 1) and `b` is `true` for points that should be part of the mask
/// and `false` for points that should be part of the background and so excluded from the mask.
pub fn forward_for_embeddings(
&self,
img_embeddings: &Tensor,
original_h: usize,
original_w: usize,
points: &[(f64, f64, bool)],
multimask_output: bool,
) -> Result<(Tensor, Tensor)> {
let image_pe = self.prompt_encoder.get_dense_pe()?;
let points = if points.is_empty() {
None
} else {
let n_points = points.len();
let xys = points
.iter()
.flat_map(|(x, y, _b)| {
let x = (*x as f32) * (original_w as f32);
let y = (*y as f32) * (original_h as f32);
[x, y]
})
.collect::<Vec<_>>();
let labels = points
.iter()
.map(|(_x, _y, b)| if *b { 1f32 } else { 0f32 })
.collect::<Vec<_>>();
let points = Tensor::from_vec(xys, (1, n_points, 2), img_embeddings.device())?;
let labels = Tensor::from_vec(labels, (1, n_points), img_embeddings.device())?;
Some((points, labels))
};
let points = points.as_ref().map(|xy| (&xy.0, &xy.1));
let (sparse_prompt_embeddings, dense_prompt_embeddings) =
self.prompt_encoder.forward(points, None, None)?;
self.mask_decoder.forward(
img_embeddings,
&image_pe,
&sparse_prompt_embeddings,
&dense_prompt_embeddings,
multimask_output,
)
}
pub fn unpreprocess(&self, img: &Tensor) -> Result<Tensor> {
let img = img
.broadcast_mul(&self.pixel_std)?
.broadcast_add(&self.pixel_mean)?;
img.maximum(&img.zeros_like()?)?
.minimum(&(img.ones_like()? * 255.)?)
}
pub fn preprocess(&self, img: &Tensor) -> Result<Tensor> {
let (_c, h, w) = img.dims3()?;
let img = img
.to_dtype(DType::F32)?
.broadcast_sub(&self.pixel_mean)?
.broadcast_div(&self.pixel_std)?;
if h > IMAGE_SIZE || w > IMAGE_SIZE {
candle::bail!("image is too large ({w}, {h}), maximum size {IMAGE_SIZE}")
}
let img = img.pad_with_zeros(1, 0, IMAGE_SIZE - h)?;
img.pad_with_zeros(2, 0, IMAGE_SIZE - w)
}
fn process_crop(
&self,
img: &Tensor,
cb: CropBox,
point_grids: &[(f64, f64)],
) -> Result<Vec<crate::object_detection::Bbox<Tensor>>> {
// Crop the image and calculate embeddings.
let img = img.i((.., cb.y0..cb.y1, cb.x0..cb.x1))?;
let img = self.preprocess(&img)?.unsqueeze(0)?;
let img_embeddings = self.image_encoder.forward(&img)?;
let crop_w = cb.x1 - cb.x0;
let crop_h = cb.y1 - cb.y0;
// Generate masks for this crop.
let image_pe = self.prompt_encoder.get_dense_pe()?;
let points = point_grids
.iter()
.map(|&(x, y)| vec![x as f32 * crop_w as f32, y as f32 * crop_h as f32])
.collect::<Vec<_>>();
let mut bboxes = Vec::new();
for points in points.chunks(64) {
// Run the model on this batch.
let points_len = points.len();
let in_points = Tensor::new(points.to_vec(), img.device())?.unsqueeze(1)?;
let in_labels = Tensor::ones((points_len, 1), DType::F32, img.device())?;
let (sparse_prompt_embeddings, dense_prompt_embeddings) =
self.prompt_encoder
.forward(Some((&in_points, &in_labels)), None, None)?;
let (low_res_mask, iou_predictions) = self.mask_decoder.forward(
&img_embeddings,
&image_pe,
&sparse_prompt_embeddings,
&dense_prompt_embeddings,
/* multimask_output */ true,
)?;
let low_res_mask = low_res_mask.flatten(0, 1)?;
let iou_predictions = iou_predictions.flatten(0, 1)?.to_vec1::<f32>()?;
let dev = low_res_mask.device();
for (i, iou) in iou_predictions.iter().enumerate() {
// Filter by predicted IoU.
if *iou < PRED_IOU_THRESH {
continue;
}
let low_res_mask = low_res_mask.get(i)?;
// Calculate stability score.
let bound = Tensor::new(MODEL_MASK_THRESHOLD + STABILITY_SCORE_OFFSET, dev)?
.broadcast_as(low_res_mask.shape())?;
let intersections = low_res_mask
.ge(&bound)?
.to_dtype(DType::F32)?
.sum_all()?
.to_vec0::<f32>()?;
let bound = Tensor::new(MODEL_MASK_THRESHOLD - STABILITY_SCORE_OFFSET, dev)?
.broadcast_as(low_res_mask.shape())?;
let unions = low_res_mask
.ge(&bound)?
.to_dtype(DType::F32)?
.sum_all()?
.to_vec0::<f32>()?;
let stability_score = intersections / unions;
if stability_score < STABILITY_SCORE_THRESHOLD {
continue;
}
// Threshold masks and calculate boxes.
let low_res_mask = low_res_mask
.ge(&Tensor::new(0f32, dev)?.broadcast_as(low_res_mask.shape())?)?
.to_dtype(DType::U32)?;
let low_res_mask_per_x = low_res_mask.sum(0)?.to_vec1::<u32>()?;
let low_res_mask_per_y = low_res_mask.sum(1)?.to_vec1::<u32>()?;
let min_max_x = min_max_indexes(&low_res_mask_per_x);
let min_max_y = min_max_indexes(&low_res_mask_per_y);
if let Some(((x0, x1), (y0, y1))) = min_max_x.zip(min_max_y) {
let bbox = crate::object_detection::Bbox {
xmin: x0 as f32,
ymin: y0 as f32,
xmax: x1 as f32,
ymax: y1 as f32,
confidence: *iou,
data: low_res_mask,
};
bboxes.push(bbox);
}
// TODO:
// Filter boxes that touch crop boundaries
// Compress to RLE.
}
}
let mut bboxes = vec![bboxes];
// Remove duplicates within this crop.
crate::object_detection::non_maximum_suppression(&mut bboxes, CROP_NMS_THRESH);
// TODO: Return to the original image frame.
Ok(bboxes.remove(0))
}
pub fn generate_masks(
&self,
img: &Tensor,
points_per_side: usize,
crop_n_layer: usize,
crop_overlap_ratio: f64,
crop_n_points_downscale_factor: usize,
) -> Result<Vec<crate::object_detection::Bbox<Tensor>>> {
let (_c, h, w) = img.dims3()?;
let point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layer,
crop_n_points_downscale_factor,
);
let crop_boxes = generate_crop_boxes((h, w), crop_n_layer, crop_overlap_ratio);
let mut bboxes = Vec::new();
for crop_box in crop_boxes.into_iter() {
let layer_idx = crop_box.layer_idx;
let b = self.process_crop(img, crop_box, &point_grids[layer_idx])?;
bboxes.extend(b)
}
// TODO: remove duplicates
Ok(bboxes)
}
}
// Return the first and last indexes i for which values[i] > 0
fn min_max_indexes(values: &[u32]) -> Option<(usize, usize)> {
let (mut min_i, mut max_i) = (usize::MAX, usize::MIN);
for (i, &s) in values.iter().enumerate() {
if s == 0 {
continue;
}
min_i = usize::min(i, min_i);
max_i = usize::max(i, max_i);
}
if max_i < min_i {
None
} else {
Some((min_i, max_i))
}
}
#[derive(Debug)]
struct CropBox {
x0: usize,
y0: usize,
x1: usize,
y1: usize,
layer_idx: usize,
}
impl CropBox {
fn new(x0: usize, y0: usize, x1: usize, y1: usize, layer_idx: usize) -> Self {
Self {
x0,
y0,
x1,
y1,
layer_idx,
}
}
}
fn generate_crop_boxes(
(im_h, im_w): (usize, usize),
n_layers: usize,
overlap_ratio: f64,
) -> Vec<CropBox> {
fn crop_len(orig_len: usize, n_crops: usize, overlap: usize) -> usize {
f64::ceil((overlap * (n_crops - 1) + orig_len) as f64 / n_crops as f64) as usize
}
let short_side = usize::min(im_h, im_w);
let mut crop_boxes = Vec::new();
// Original image.
crop_boxes.push(CropBox::new(0, 0, im_w, im_h, 0));
for layer_idx in 1..=n_layers {
let n_crops_per_side = 1 << layer_idx;
let overlap = (overlap_ratio * short_side as f64 * 2. / n_crops_per_side as f64) as usize;
let crop_w = crop_len(im_w, n_crops_per_side, overlap);
let crop_h = crop_len(im_w, n_crops_per_side, overlap);
for i_x in 0..n_crops_per_side {
let x0 = (crop_w - overlap) * i_x;
for i_y in 0..n_crops_per_side {
let y0 = (crop_h - overlap) * i_y;
let x1 = usize::min(im_w, x0 + crop_w);
let y1 = usize::min(im_h, y0 + crop_h);
crop_boxes.push(CropBox::new(x0, y0, x1, y1, layer_idx));
}
}
}
crop_boxes
}
// Generates a 2D grid of points evenly spaced in [0,1]x[0,1].
fn build_point_grid(n_per_side: usize) -> Vec<(f64, f64)> {
let offset = 1f64 / (2 * n_per_side) as f64;
let mut points = Vec::with_capacity(n_per_side * n_per_side);
for i_x in 0..n_per_side {
let x = offset + i_x as f64 / n_per_side as f64;
for i_y in 0..n_per_side {
let y = offset + i_y as f64 / n_per_side as f64;
points.push((x, y))
}
}
points
}
fn build_all_layer_point_grids(
n_per_side: usize,
n_layers: usize,
scale_per_layer: usize,
) -> Vec<Vec<(f64, f64)>> {
let mut points_by_layer = Vec::with_capacity(n_layers + 1);
for i in 0..=n_layers {
let n_points = n_per_side / scale_per_layer.pow(i as u32);
points_by_layer.push(build_point_grid(n_points))
}
points_by_layer
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segment_anything/image_encoder.rs | candle-transformers/src/models/segment_anything/image_encoder.rs | use candle::{DType, IndexOp, Result, Tensor};
use candle_nn::{layer_norm, LayerNorm, Module, VarBuilder};
#[derive(Debug)]
struct PatchEmbed {
proj: candle_nn::Conv2d,
span: tracing::Span,
}
impl PatchEmbed {
fn new(
in_chans: usize,
embed_dim: usize,
k_size: usize,
stride: usize,
padding: usize,
vb: VarBuilder,
) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
stride,
padding,
..Default::default()
};
let proj = candle_nn::conv2d(in_chans, embed_dim, k_size, cfg, vb.pp("proj"))?;
let span = tracing::span!(tracing::Level::TRACE, "patch-embed");
Ok(Self { proj, span })
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.proj)?.permute((0, 2, 3, 1))
}
}
// A custom op to make add_decomposed_rel_pos faster. Most of the time is spent on the final
// addition in the case where b = 12, q_h = q_w = 4096, k_h = k_w = 4096
// (attn.reshape((b, q_h, q_w, k_h, k_w))?
// + rel_h.unsqueeze(4)?.broadcast_add(&rel_w.unsqueeze(3)?)?)?
// .reshape((b, q_h * q_w, k_h * k_w))
// Ideally we would perform this operation in place but this is not supported in candle at the
// moment. We should also investigate using f16 rather than f32.
struct Add3(usize, usize, usize, usize, usize);
impl candle::CustomOp3 for Add3 {
fn name(&self) -> &'static str {
"add3"
}
fn cpu_fwd(
&self,
s1: &candle::CpuStorage,
l1: &candle::Layout,
s2: &candle::CpuStorage,
l2: &candle::Layout,
s3: &candle::CpuStorage,
l3: &candle::Layout,
) -> Result<(candle::CpuStorage, candle::Shape)> {
use rayon::prelude::*;
let Add3(b, q_h, q_w, k_h, k_w) = *self;
let s1 = s1.as_slice::<f32>()?;
let s1 = match l1.contiguous_offsets() {
None => candle::bail!("input1 has to be contiguous"),
Some((o1, o2)) => &s1[o1..o2],
};
let s2 = s2.as_slice::<f32>()?;
let s2 = match l2.contiguous_offsets() {
None => candle::bail!("input2 has to be contiguous"),
Some((o1, o2)) => &s2[o1..o2],
};
let s3 = s3.as_slice::<f32>()?;
let s3 = match l3.contiguous_offsets() {
None => candle::bail!("input3 has to be contiguous"),
Some((o1, o2)) => &s3[o1..o2],
};
let mut dst = vec![0f32; b * q_h * q_w * k_h * k_w];
dst.par_chunks_exact_mut(k_h * k_w)
.enumerate()
.for_each(|(b_idx, dst)| {
let s1_idx = b_idx * k_h * k_w;
let s2_idx = b_idx * k_h;
let s3_idx = b_idx * k_w;
for h_idx in 0..k_h {
let s1_idx = s1_idx + h_idx * k_w;
let s2_idx = s2_idx + h_idx;
let dst_idx = h_idx * k_w;
for w_idx in 0..k_w {
let s1_idx = s1_idx + w_idx;
let s3_idx = s3_idx + w_idx;
let dst_idx = dst_idx + w_idx;
dst[dst_idx] = s1[s1_idx] + s2[s2_idx] + s3[s3_idx]
}
}
});
let dst = candle::WithDType::to_cpu_storage_owned(dst);
Ok((dst, (b, q_h * q_w, k_h * k_w).into()))
}
}
#[derive(Debug)]
struct Attention {
qkv: super::Linear,
proj: super::Linear,
num_heads: usize,
scale: f64,
rel_pos_hw: Option<(Tensor, Tensor)>,
span: tracing::Span,
span_matmul: tracing::Span,
span_rel_pos: tracing::Span,
span_softmax: tracing::Span,
}
impl Attention {
fn new(
dim: usize,
num_heads: usize,
qkv_bias: bool,
use_rel_pos: bool,
input_size: (usize, usize),
vb: VarBuilder,
) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "attention");
let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul");
let span_rel_pos = tracing::span!(tracing::Level::TRACE, "attn-rel-pos");
let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm");
let qkv = super::linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?;
let proj = super::linear(vb.pp("proj"), dim, dim, true)?;
let head_dim = dim / num_heads;
let scale = 1. / (head_dim as f64).sqrt();
let rel_pos_hw = if use_rel_pos {
let h = vb.get((2 * input_size.0 - 1, head_dim), "rel_pos_h")?;
let w = vb.get((2 * input_size.1 - 1, head_dim), "rel_pos_w")?;
Some((h, w))
} else {
None
};
Ok(Self {
qkv,
proj,
num_heads,
scale,
rel_pos_hw,
span,
span_matmul,
span_rel_pos,
span_softmax,
})
}
fn add_decomposed_rel_pos(
&self,
attn: Tensor,
q: &Tensor,
(q_h, q_w): (usize, usize),
(k_h, k_w): (usize, usize),
) -> Result<Tensor> {
match &self.rel_pos_hw {
Some((rel_pos_h, rel_pos_w)) => {
let r_h = get_rel_pos(q_h, k_h, rel_pos_h)?;
let r_w = get_rel_pos(q_w, k_w, rel_pos_w)?;
let (b, _, dim) = q.dims3()?;
let r_q = q.reshape((b, q_h, q_w, dim))?;
// rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
let rel_h = r_q.matmul(&r_h.broadcast_left(b)?.t()?.contiguous()?)?;
// rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
let rel_w = r_q
.transpose(1, 2)? // -> bwhc
.contiguous()?
.matmul(&r_w.broadcast_left(b)?.t()?.contiguous()?)? // bwhc,bwck -> bwhk
.transpose(1, 2)?
.contiguous()?;
if attn.device().is_cpu() {
let op = Add3(b, q_h, q_w, k_h, k_w);
attn.apply_op3_no_bwd(&rel_h, &rel_w, &op)
} else {
(attn.reshape((b, q_h, q_w, k_h, k_w))?
+ rel_h.unsqueeze(4)?.broadcast_add(&rel_w.unsqueeze(3)?)?)?
.reshape((b, q_h * q_w, k_h * k_w))
}
}
None => Ok(attn),
}
}
}
fn get_rel_pos(q_size: usize, k_size: usize, rel_pos: &Tensor) -> Result<Tensor> {
let max_rel_dist = 2 * usize::max(q_size, k_size) - 1;
let dev = rel_pos.device();
let rel_pos_resized = if rel_pos.dim(0)? != max_rel_dist {
todo!("interpolation")
} else {
rel_pos
};
let q_coords = Tensor::arange(0u32, q_size as u32, dev)?
.reshape((q_size, 1))?
.to_dtype(DType::F32)?;
let k_coords = Tensor::arange(0u32, k_size as u32, dev)?
.reshape((1, k_size))?
.to_dtype(DType::F32)?;
let q_coords = (q_coords * f64::max(1f64, k_size as f64 / q_size as f64))?;
let k_coords = (k_coords * f64::max(1f64, q_size as f64 / k_size as f64))?;
let relative_coords = (q_coords.broadcast_sub(&k_coords)?
+ (k_size as f64 - 1.) * f64::max(1f64, q_size as f64 / k_size as f64))?;
let (d1, d2) = relative_coords.dims2()?;
let relative_coords = relative_coords.to_dtype(DType::U32)?;
rel_pos_resized
.index_select(&relative_coords.reshape(d1 * d2)?, 0)?
.reshape((d1, d2, ()))
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, h, w, c) = xs.dims4()?;
let qkv = self
.qkv
.forward(&xs.flatten_to(1)?)?
.reshape((b, h * w, 3, self.num_heads, c / self.num_heads))?
.permute((2, 0, 3, 1, 4))?
.reshape((3, b * self.num_heads, h * w, c / self.num_heads))?;
let q = qkv.i(0)?;
let k = qkv.i(1)?;
let v = qkv.i(2)?;
let attn = {
let _enter = self.span_matmul.enter();
(&q * self.scale)?.matmul(&k.t()?)?
};
let attn = {
let _enter = self.span_rel_pos.enter();
self.add_decomposed_rel_pos(attn, &q, (h, w), (h, w))?
};
let attn = {
let _enter = self.span_softmax.enter();
candle_nn::ops::softmax_last_dim(&attn)?
};
let attn = {
let _enter = self.span_matmul.enter();
attn.matmul(&v)?
};
let attn = attn
.reshape((b, self.num_heads, h, w, c / self.num_heads))?
.permute((0, 2, 3, 1, 4))?
.reshape((b, h * w, c))?;
self.proj.forward(&attn)?.reshape((b, h, w, c))
}
}
#[derive(Debug)]
struct Block {
norm1: LayerNorm,
attn: Attention,
norm2: LayerNorm,
mlp: super::MlpBlock,
window_size: usize,
span: tracing::Span,
}
impl Block {
fn new(
dim: usize,
num_heads: usize,
qkv_bias: bool,
use_rel_pos: bool,
window_size: usize,
input_size: (usize, usize),
vb: VarBuilder,
) -> Result<Self> {
let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?;
let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?;
let input_size_attn = if window_size == 0 {
input_size
} else {
(window_size, window_size)
};
let attn = Attention::new(
dim,
num_heads,
qkv_bias,
use_rel_pos,
input_size_attn,
vb.pp("attn"),
)?;
let mlp = super::MlpBlock::new(dim, dim * 4, candle_nn::Activation::Gelu, vb.pp("mlp"))?;
let span = tracing::span!(tracing::Level::TRACE, "ie-block");
Ok(Self {
norm1,
attn,
norm2,
mlp,
window_size,
span,
})
}
}
fn window_partition(xs: Tensor, window_size: usize) -> Result<(Tensor, (usize, usize))> {
let (b, h, w, c) = xs.dims4()?;
let pad_h = (window_size - h % window_size) % window_size;
let pad_w = (window_size - w % window_size) % window_size;
let xs = if pad_h > 0 {
xs.pad_with_zeros(1, 0, pad_h)?
} else {
xs
};
let xs = if pad_w > 0 {
xs.pad_with_zeros(2, 0, pad_w)?
} else {
xs
};
let (h_p, w_p) = (h + pad_h, w + pad_w);
let windows = xs
.reshape((
b,
h_p / window_size,
window_size,
w_p / window_size,
window_size,
c,
))?
.transpose(2, 3)?
.contiguous()?
.flatten_to(2)?;
Ok((windows, (h_p, w_p)))
}
fn window_unpartition(
windows: Tensor,
window_size: usize,
(h_p, w_p): (usize, usize),
(h, w): (usize, usize),
) -> Result<Tensor> {
let b = windows.dim(0)? / (h_p * w_p / window_size / window_size);
let xs = windows
.reshape((
b,
h_p / window_size,
w_p / window_size,
window_size,
window_size,
windows.elem_count() / b / h_p / w_p,
))?
.transpose(2, 3)?
.contiguous()?
.reshape((b, h_p, w_p, ()))?;
let xs = if h_p > h { xs.narrow(1, 0, h)? } else { xs };
let xs = if w_p > w { xs.narrow(2, 0, w)? } else { xs };
Ok(xs)
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let shortcut = xs;
let xs = self.norm1.forward(xs)?;
let hw = (xs.dim(1)?, xs.dim(2)?);
let (xs, pad_hw) = if self.window_size > 0 {
window_partition(xs, self.window_size)?
} else {
(xs, (0, 0))
};
let xs = self.attn.forward(&xs)?;
let xs = if self.window_size > 0 {
window_unpartition(xs, self.window_size, pad_hw, hw)?
} else {
xs
};
let xs = (xs + shortcut)?;
&xs + xs.apply(&self.norm2)?.apply(&self.mlp)?
}
}
#[derive(Debug)]
pub struct ImageEncoderViT {
patch_embed: PatchEmbed,
blocks: Vec<Block>,
neck_conv1: candle_nn::Conv2d,
neck_ln1: super::LayerNorm2d,
neck_conv2: candle_nn::Conv2d,
neck_ln2: super::LayerNorm2d,
pos_embed: Option<Tensor>,
span: tracing::Span,
}
impl ImageEncoderViT {
#[allow(clippy::too_many_arguments)]
pub fn new(
img_size: usize,
patch_size: usize,
in_chans: usize,
embed_dim: usize,
depth: usize,
num_heads: usize,
out_chans: usize,
qkv_bias: bool,
use_rel_pos: bool,
use_abs_pos: bool,
window_size: usize,
global_attn_indexes: &[usize],
vb: VarBuilder,
) -> Result<Self> {
let patch_embed = PatchEmbed::new(
in_chans,
embed_dim,
patch_size,
patch_size,
0,
vb.pp("patch_embed"),
)?;
let mut blocks = Vec::with_capacity(depth);
let vb_b = vb.pp("blocks");
for i in 0..depth {
let window_size = if global_attn_indexes.contains(&i) {
0
} else {
window_size
};
let block = Block::new(
embed_dim,
num_heads,
qkv_bias,
use_rel_pos,
window_size,
(img_size / patch_size, img_size / patch_size),
vb_b.pp(i),
)?;
blocks.push(block)
}
let neck_conv1 = candle_nn::conv2d_no_bias(
embed_dim,
out_chans,
1,
Default::default(),
vb.pp("neck.0"),
)?;
let neck_ln1 = super::LayerNorm2d::new(out_chans, 1e-6, vb.pp("neck.1"))?;
let cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let neck_conv2 = candle_nn::conv2d_no_bias(out_chans, out_chans, 3, cfg, vb.pp("neck.2"))?;
let neck_ln2 = super::LayerNorm2d::new(out_chans, 1e-6, vb.pp("neck.3"))?;
let pos_embed = if use_abs_pos {
let p = vb.get(
(1, img_size / patch_size, img_size / patch_size, embed_dim),
"pos_embed",
)?;
Some(p)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "image-encoder-vit");
Ok(Self {
patch_embed,
blocks,
neck_conv1,
neck_ln1,
neck_conv2,
neck_ln2,
pos_embed,
span,
})
}
}
impl Module for ImageEncoderViT {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.patch_embed.forward(xs)?;
let mut xs = match &self.pos_embed {
Some(pos_embed) => (xs + pos_embed)?,
None => xs,
};
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
xs.permute((0, 3, 1, 2))?
.apply(&self.neck_conv1)?
.apply(&self.neck_ln1)?
.apply(&self.neck_conv2)?
.apply(&self.neck_ln2)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segment_anything/prompt_encoder.rs | candle-transformers/src/models/segment_anything/prompt_encoder.rs | use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug)]
struct PositionEmbeddingRandom {
positional_encoding_gaussian_matrix: Tensor,
}
impl PositionEmbeddingRandom {
fn new(num_pos_feats: usize, vb: VarBuilder) -> Result<Self> {
let positional_encoding_gaussian_matrix =
vb.get((2, num_pos_feats), "positional_encoding_gaussian_matrix")?;
Ok(Self {
positional_encoding_gaussian_matrix,
})
}
fn pe_encoding(&self, coords: &Tensor) -> Result<Tensor> {
let coords = coords.affine(2., -1.)?;
let coords = coords.broadcast_matmul(&self.positional_encoding_gaussian_matrix)?;
let coords = (coords * (2. * std::f64::consts::PI))?;
Tensor::cat(&[coords.sin()?, coords.cos()?], D::Minus1)
}
fn forward(&self, h: usize, w: usize) -> Result<Tensor> {
let device = self.positional_encoding_gaussian_matrix.device();
let x_embed = (Tensor::arange(0u32, w as u32, device)?.to_dtype(DType::F32)? + 0.5)?;
let y_embed = (Tensor::arange(0u32, h as u32, device)?.to_dtype(DType::F32)? + 0.5)?;
let x_embed = (x_embed / w as f64)?
.reshape((1, ()))?
.broadcast_as((h, w))?;
let y_embed = (y_embed / h as f64)?
.reshape(((), 1))?
.broadcast_as((h, w))?;
let coords = Tensor::stack(&[&x_embed, &y_embed], D::Minus1)?;
self.pe_encoding(&coords)?.permute((2, 0, 1))
}
fn forward_with_coords(
&self,
coords_input: &Tensor,
image_size: (usize, usize),
) -> Result<Tensor> {
let coords0 = (coords_input.narrow(D::Minus1, 0, 1)? / image_size.1 as f64)?;
let coords1 = (coords_input.narrow(D::Minus1, 1, 1)? / image_size.0 as f64)?;
let c = coords_input.dim(D::Minus1)?;
let coords_rest = coords_input.narrow(D::Minus1, 2, c - 2)?;
let coords = Tensor::cat(&[&coords0, &coords1, &coords_rest], D::Minus1)?;
self.pe_encoding(&coords)
}
}
#[derive(Debug)]
pub struct PromptEncoder {
pe_layer: PositionEmbeddingRandom,
point_embeddings: Vec<candle_nn::Embedding>,
not_a_point_embed: candle_nn::Embedding,
mask_downscaling_conv1: candle_nn::Conv2d,
mask_downscaling_ln1: super::LayerNorm2d,
mask_downscaling_conv2: candle_nn::Conv2d,
mask_downscaling_ln2: super::LayerNorm2d,
mask_downscaling_conv3: candle_nn::Conv2d,
no_mask_embed: candle_nn::Embedding,
image_embedding_size: (usize, usize),
input_image_size: (usize, usize),
embed_dim: usize,
span: tracing::Span,
}
impl PromptEncoder {
pub fn new(
embed_dim: usize,
image_embedding_size: (usize, usize),
input_image_size: (usize, usize),
mask_in_chans: usize,
vb: VarBuilder,
) -> Result<Self> {
let num_points_embeddings = 4;
let pe_layer = PositionEmbeddingRandom::new(embed_dim / 2, vb.pp("pe_layer"))?;
let not_a_point_embed = candle_nn::embedding(1, embed_dim, vb.pp("not_a_point_embed"))?;
let no_mask_embed = candle_nn::embedding(1, embed_dim, vb.pp("no_mask_embed"))?;
let cfg = candle_nn::Conv2dConfig {
stride: 2,
..Default::default()
};
let mask_downscaling_conv1 =
candle_nn::conv2d(1, mask_in_chans / 4, 2, cfg, vb.pp("mask_downscaling.0"))?;
let mask_downscaling_conv2 = candle_nn::conv2d(
mask_in_chans / 4,
mask_in_chans,
2,
cfg,
vb.pp("mask_downscaling.3"),
)?;
let mask_downscaling_conv3 = candle_nn::conv2d(
mask_in_chans,
embed_dim,
1,
Default::default(),
vb.pp("mask_downscaling.6"),
)?;
let mask_downscaling_ln1 =
super::LayerNorm2d::new(mask_in_chans / 4, 1e-6, vb.pp("mask_downscaling.1"))?;
let mask_downscaling_ln2 =
super::LayerNorm2d::new(mask_in_chans, 1e-6, vb.pp("mask_downscaling.4"))?;
let mut point_embeddings = Vec::with_capacity(num_points_embeddings);
let vb_e = vb.pp("point_embeddings");
for i in 0..num_points_embeddings {
let emb = candle_nn::embedding(1, embed_dim, vb_e.pp(i))?;
point_embeddings.push(emb)
}
let span = tracing::span!(tracing::Level::TRACE, "prompt-encoder");
Ok(Self {
pe_layer,
point_embeddings,
not_a_point_embed,
mask_downscaling_conv1,
mask_downscaling_ln1,
mask_downscaling_conv2,
mask_downscaling_ln2,
mask_downscaling_conv3,
no_mask_embed,
image_embedding_size,
input_image_size,
embed_dim,
span,
})
}
pub fn get_dense_pe(&self) -> Result<Tensor> {
self.pe_layer
.forward(self.image_embedding_size.0, self.image_embedding_size.1)?
.unsqueeze(0)
}
fn embed_masks(&self, masks: &Tensor) -> Result<Tensor> {
masks
.apply(&self.mask_downscaling_conv1)?
.apply(&self.mask_downscaling_ln1)?
.gelu()?
.apply(&self.mask_downscaling_conv2)?
.apply(&self.mask_downscaling_ln2)?
.gelu()?
.apply(&self.mask_downscaling_conv3)
}
fn embed_points(&self, points: &Tensor, labels: &Tensor, pad: bool) -> Result<Tensor> {
let points = (points + 0.5)?;
let dev = points.device();
let (points, labels) = if pad {
let padding_point = Tensor::zeros((points.dim(0)?, 1, 2), DType::F32, dev)?;
let padding_label = (Tensor::ones((labels.dim(0)?, 1), DType::F32, dev)? * (-1f64))?;
let points = Tensor::cat(&[&points, &padding_point], 1)?;
let labels = Tensor::cat(&[labels, &padding_label], 1)?;
(points, labels)
} else {
(points, labels.clone())
};
let point_embedding = self
.pe_layer
.forward_with_coords(&points, self.input_image_size)?;
let labels = labels.unsqueeze(2)?.broadcast_as(point_embedding.shape())?;
let zeros = point_embedding.zeros_like()?;
let point_embedding = labels.lt(0f32)?.where_cond(
&self
.not_a_point_embed
.embeddings()
.broadcast_as(zeros.shape())?,
&point_embedding,
)?;
let labels0 = labels.eq(0f32)?.where_cond(
&self.point_embeddings[0]
.embeddings()
.broadcast_as(zeros.shape())?,
&zeros,
)?;
let point_embedding = (point_embedding + labels0)?;
let labels1 = labels.eq(1f32)?.where_cond(
&self.point_embeddings[1]
.embeddings()
.broadcast_as(zeros.shape())?,
&zeros,
)?;
let point_embedding = (point_embedding + labels1)?;
Ok(point_embedding)
}
fn embed_boxes(&self, boxes: &Tensor) -> Result<Tensor> {
let boxes = (boxes + 0.5)?;
let coords = boxes.reshape(((), 2, 2))?;
let corner_embedding = self
.pe_layer
.forward_with_coords(&coords, self.input_image_size)?;
let ce1 = corner_embedding.i((.., 0))?;
let ce2 = corner_embedding.i((.., 1))?;
let ce1 = (ce1 + self.point_embeddings[2].embeddings())?;
let ce2 = (ce2 + self.point_embeddings[3].embeddings())?;
Tensor::cat(&[&ce1, &ce2], 1)
}
pub fn forward(
&self,
points: Option<(&Tensor, &Tensor)>,
boxes: Option<&Tensor>,
masks: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let _enter = self.span.enter();
let se_points = match points {
Some((coords, labels)) => Some(self.embed_points(coords, labels, boxes.is_none())?),
None => None,
};
let se_boxes = match boxes {
Some(boxes) => Some(self.embed_boxes(boxes)?),
None => None,
};
let sparse_embeddings = match (se_points, se_boxes) {
(Some(se_points), Some(se_boxes)) => Tensor::cat(&[se_points, se_boxes], 1)?,
(Some(se_points), None) => se_points,
(None, Some(se_boxes)) => se_boxes,
(None, None) => {
let dev = self.no_mask_embed.embeddings().device();
Tensor::zeros((1, 0, self.embed_dim), DType::F32, dev)?
}
};
let dense_embeddings = match masks {
None => {
let emb = self.no_mask_embed.embeddings();
emb.reshape((1, (), 1, 1))?.expand((
1,
emb.elem_count(),
self.image_embedding_size.0,
self.image_embedding_size.1,
))?
}
Some(masks) => self.embed_masks(masks)?,
};
Ok((sparse_embeddings, dense_embeddings))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segment_anything/transformer.rs | candle-transformers/src/models/segment_anything/transformer.rs | use candle::{Result, Tensor};
use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder};
#[derive(Debug)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
num_heads: usize,
}
impl Attention {
fn new(
embedding_dim: usize,
num_heads: usize,
downsample_rate: usize,
vb: VarBuilder,
) -> Result<Self> {
let internal_dim = embedding_dim / downsample_rate;
let q_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("q_proj"))?;
let k_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("k_proj"))?;
let v_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("v_proj"))?;
let out_proj = candle_nn::linear(internal_dim, embedding_dim, vb.pp("out_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
num_heads,
})
}
fn separate_heads(&self, x: &Tensor) -> Result<Tensor> {
let (b, n, c) = x.dims3()?;
x.reshape((b, n, self.num_heads, c / self.num_heads))?
.transpose(1, 2)?
.contiguous()
}
fn recombine_heads(&self, x: &Tensor) -> Result<Tensor> {
let (b, n_heads, n_tokens, c_per_head) = x.dims4()?;
x.transpose(1, 2)?
.reshape((b, n_tokens, n_heads * c_per_head))
}
fn forward(&self, q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let q = self.q_proj.forward(&q.contiguous()?)?;
let k = self.k_proj.forward(&k.contiguous()?)?;
let v = self.v_proj.forward(&v.contiguous()?)?;
let q = self.separate_heads(&q)?;
let k = self.separate_heads(&k)?;
let v = self.separate_heads(&v)?;
let (_, _, _, c_per_head) = q.dims4()?;
let attn = (q.matmul(&k.t()?)? / (c_per_head as f64).sqrt())?;
let attn = candle_nn::ops::softmax_last_dim(&attn)?;
let out = attn.matmul(&v)?;
self.recombine_heads(&out)?.apply(&self.out_proj)
}
}
#[derive(Debug)]
struct TwoWayAttentionBlock {
self_attn: Attention,
norm1: LayerNorm,
cross_attn_token_to_image: Attention,
norm2: LayerNorm,
mlp: super::MlpBlock,
norm3: LayerNorm,
norm4: LayerNorm,
cross_attn_image_to_token: Attention,
skip_first_layer_pe: bool,
}
impl TwoWayAttentionBlock {
fn new(
embedding_dim: usize,
num_heads: usize,
mlp_dim: usize,
skip_first_layer_pe: bool,
vb: VarBuilder,
) -> Result<Self> {
let norm1 = layer_norm(embedding_dim, 1e-5, vb.pp("norm1"))?;
let norm2 = layer_norm(embedding_dim, 1e-5, vb.pp("norm2"))?;
let norm3 = layer_norm(embedding_dim, 1e-5, vb.pp("norm3"))?;
let norm4 = layer_norm(embedding_dim, 1e-5, vb.pp("norm4"))?;
let self_attn = Attention::new(embedding_dim, num_heads, 1, vb.pp("self_attn"))?;
let cross_attn_token_to_image = Attention::new(
embedding_dim,
num_heads,
2,
vb.pp("cross_attn_token_to_image"),
)?;
let cross_attn_image_to_token = Attention::new(
embedding_dim,
num_heads,
2,
vb.pp("cross_attn_image_to_token"),
)?;
let mlp = super::MlpBlock::new(
embedding_dim,
mlp_dim,
candle_nn::Activation::Relu,
vb.pp("mlp"),
)?;
Ok(Self {
self_attn,
norm1,
cross_attn_image_to_token,
norm2,
mlp,
norm3,
norm4,
cross_attn_token_to_image,
skip_first_layer_pe,
})
}
fn forward(
&self,
queries: &Tensor,
keys: &Tensor,
query_pe: &Tensor,
key_pe: &Tensor,
) -> Result<(Tensor, Tensor)> {
// Self attention block
let queries = if self.skip_first_layer_pe {
self.self_attn.forward(queries, queries, queries)?
} else {
let q = (queries + query_pe)?;
let attn_out = self.self_attn.forward(&q, &q, queries)?;
(queries + attn_out)?
};
let queries = self.norm1.forward(&queries)?;
// Cross attention block, tokens attending to image embedding
let q = (&queries + query_pe)?;
let k = (keys + key_pe)?;
let attn_out = self.cross_attn_token_to_image.forward(&q, &k, keys)?;
let queries = (&queries + attn_out)?;
let queries = self.norm2.forward(&queries)?;
// MLP block
let mlp_out = self.mlp.forward(&queries);
let queries = (queries + mlp_out)?;
let queries = self.norm3.forward(&queries)?;
// Cross attention block, image embedding attending to tokens
let q = (&queries + query_pe)?;
let k = (keys + key_pe)?;
let attn_out = self.cross_attn_image_to_token.forward(&k, &q, &queries)?;
let keys = (keys + attn_out)?;
let keys = self.norm4.forward(&keys)?;
Ok((queries, keys))
}
}
#[derive(Debug)]
pub struct TwoWayTransformer {
layers: Vec<TwoWayAttentionBlock>,
final_attn_token_to_image: Attention,
norm_final_attn: LayerNorm,
}
impl TwoWayTransformer {
pub fn new(
depth: usize,
embedding_dim: usize,
num_heads: usize,
mlp_dim: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb_l = vb.pp("layers");
let mut layers = Vec::with_capacity(depth);
for i in 0..depth {
let layer =
TwoWayAttentionBlock::new(embedding_dim, num_heads, mlp_dim, i == 0, vb_l.pp(i))?;
layers.push(layer)
}
let final_attn_token_to_image = Attention::new(
embedding_dim,
num_heads,
2,
vb.pp("final_attn_token_to_image"),
)?;
let norm_final_attn = layer_norm(embedding_dim, 1e-5, vb.pp("norm_final_attn"))?;
Ok(Self {
layers,
final_attn_token_to_image,
norm_final_attn,
})
}
pub fn forward(
&self,
image_embedding: &Tensor,
image_pe: &Tensor,
point_embedding: &Tensor,
) -> Result<(Tensor, Tensor)> {
let image_embedding = image_embedding.flatten_from(2)?.permute((0, 2, 1))?;
let image_pe = image_pe.flatten_from(2)?.permute((0, 2, 1))?;
let mut queries = point_embedding.clone();
let mut keys = image_embedding;
for layer in self.layers.iter() {
(queries, keys) = layer.forward(&queries, &keys, point_embedding, &image_pe)?
}
let q = (&queries + point_embedding)?;
let k = (&keys + image_pe)?;
let attn_out = self.final_attn_token_to_image.forward(&q, &k, &keys)?;
let queries = (queries + attn_out)?.apply(&self.norm_final_attn)?;
Ok((queries, keys))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/chinese_clip/text_model.rs | candle-transformers/src/models/chinese_clip/text_model.rs | //! Chinese contrastive Language-Image Pre-Training
//!
//! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - 💻 [Chinese-CLIP](https://github.com/OFA-Sys/Chinese-CLIP)
//! - 💻 [HF](https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py)
use candle::{DType, Device, IndexOp, Module, Result, Tensor};
use candle_nn as nn;
use super::Activation;
/// Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
/// positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
/// [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
/// For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
/// with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
#[derive(Clone, Debug)]
pub enum PositionEmbeddingType {
Absolute,
RelativeKey,
RelativeKeyQuery,
}
#[derive(Clone, Debug)]
pub struct ChineseClipTextConfig {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub hidden_act: Activation,
pub hidden_dropout_prob: f32,
pub attention_probs_dropout_prob: f64,
pub max_position_embeddings: usize,
pub type_vocab_size: usize,
pub initializer_range: f64,
pub initializer_factor: f64,
pub layer_norm_eps: f64,
pub pad_token_id: usize,
pub position_embedding_type: PositionEmbeddingType,
pub use_cache: bool,
}
impl Default for ChineseClipTextConfig {
fn default() -> Self {
Self {
vocab_size: 30522,
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: Activation::Gelu,
hidden_dropout_prob: 0.1,
attention_probs_dropout_prob: 0.1,
max_position_embeddings: 512,
type_vocab_size: 2,
initializer_range: 0.02,
initializer_factor: 1.0,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
}
}
}
impl ChineseClipTextConfig {
/// [referer](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json)
pub fn clip_vit_base_patch16() -> Self {
Self {
vocab_size: 21128,
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: Activation::Gelu,
hidden_dropout_prob: 0.1,
attention_probs_dropout_prob: 0.1,
max_position_embeddings: 512,
type_vocab_size: 2,
initializer_range: 0.02,
initializer_factor: 1.0,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
}
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipTextEmbeddings {
word_embeddings: nn::Embedding,
position_embeddings: nn::Embedding,
token_type_embeddings: nn::Embedding,
layer_norm: nn::LayerNorm,
dropout: nn::Dropout,
position_embedding_type: PositionEmbeddingType,
position_ids: Tensor,
token_type_ids: Tensor,
}
impl ChineseClipTextEmbeddings {
pub fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let word_embeddings = nn::embedding(
config.vocab_size,
config.hidden_size,
var.pp("word_embeddings"),
)?;
let position_embeddings = nn::embedding(
config.max_position_embeddings,
config.hidden_size,
var.pp("position_embeddings"),
)?;
let token_type_embeddings = nn::embedding(
config.type_vocab_size,
config.hidden_size,
var.pp("token_type_embeddings"),
)?;
let layer_norm = nn::layer_norm::<f64>(
config.hidden_size,
config.layer_norm_eps,
var.pp("LayerNorm"),
)?;
let dropout = nn::Dropout::new(config.hidden_dropout_prob);
let position_ids =
Tensor::arange(0u32, config.max_position_embeddings as u32, var.device())?
.unsqueeze(0)?;
let token_type_ids = Tensor::zeros(position_ids.shape(), DType::I64, var.device())?;
Ok(Self {
word_embeddings,
position_embeddings,
token_type_embeddings,
layer_norm,
dropout,
position_embedding_type: config.position_embedding_type.clone(),
position_ids,
token_type_ids,
})
}
fn forward(&self, xs: &Tensor, token_type_ids: Option<&Tensor>) -> Result<Tensor> {
let (_batch_size, seq_length) = xs.dims2()?;
let position_ids = (0..seq_length as u32).collect::<Vec<_>>();
let position_ids = self.position_ids.index_select(
&Tensor::new(&position_ids[..], self.position_ids.device())?,
1,
)?;
let word_embeddings = self.word_embeddings.forward(xs)?;
let token_type_ids = match token_type_ids {
Some(token_type_ids) => token_type_ids,
None => &self.token_type_ids.i((.., 0..seq_length))?,
};
let token_type_ids = token_type_ids.expand(xs.shape())?;
let token_type_embeddings = self.token_type_embeddings.forward(&token_type_ids)?;
let embeddings = (&word_embeddings + token_type_embeddings)?;
let embeddings = match self.position_embedding_type {
PositionEmbeddingType::Absolute => {
let position_embeddings = self.position_embeddings.forward(&position_ids)?;
let position_embeddings = position_embeddings.expand(embeddings.shape())?;
(embeddings + position_embeddings)?
}
_ => embeddings,
};
let embeddings = self.layer_norm.forward(&embeddings)?;
let embeddings = self.dropout.forward(&embeddings, false)?;
Ok(embeddings)
}
}
/// Copied from [`crate::models::bert::BertSelfOutput`] to [`ChineseClipTextSelfOutput`]
#[derive(Clone, Debug)]
struct ChineseClipTextSelfOutput {
dense: nn::Linear,
layer_norm: nn::LayerNorm,
dropout: nn::Dropout,
span: tracing::Span,
}
impl ChineseClipTextSelfOutput {
fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let dense = nn::linear(config.hidden_size, config.hidden_size, var.pp("dense"))?;
let layer_norm = nn::layer_norm(
config.hidden_size,
config.layer_norm_eps,
var.pp("LayerNorm"),
)?;
let dropout = nn::Dropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
span: tracing::span!(tracing::Level::TRACE, "self-out"),
})
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.dropout.forward(&hidden_states, false)?;
self.layer_norm.forward(&(hidden_states + input_tensor)?)
}
}
/// Copied from [`crate::models::bert::BertSelfAttention`] to [`ChineseClipTextSelfAttention`]
#[derive(Clone, Debug)]
struct ChineseClipTextSelfAttention {
query: nn::Linear,
key: nn::Linear,
value: nn::Linear,
dropout: nn::Dropout,
num_attention_heads: usize,
attention_head_size: usize,
span: tracing::Span,
span_softmax: tracing::Span,
}
impl ChineseClipTextSelfAttention {
fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let attention_head_size = config.hidden_size / config.num_attention_heads;
let all_head_size = config.num_attention_heads * attention_head_size;
let dropout = nn::Dropout::new(config.hidden_dropout_prob);
let hidden_size = config.hidden_size;
let query = nn::linear(hidden_size, all_head_size, var.pp("query"))?;
let value = nn::linear(hidden_size, all_head_size, var.pp("value"))?;
let key = nn::linear(hidden_size, all_head_size, var.pp("key"))?;
Ok(Self {
query,
key,
value,
dropout,
num_attention_heads: config.num_attention_heads,
attention_head_size,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"),
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let mut new_x_shape = xs.dims().to_vec();
new_x_shape.pop();
new_x_shape.push(self.num_attention_heads);
new_x_shape.push(self.attention_head_size);
let xs = xs.reshape(new_x_shape.as_slice())?.transpose(1, 2)?;
xs.contiguous()
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let query_layer = self.query.forward(hidden_states)?;
let key_layer = self.key.forward(hidden_states)?;
let value_layer = self.value.forward(hidden_states)?;
let query_layer = self.transpose_for_scores(&query_layer)?;
let key_layer = self.transpose_for_scores(&key_layer)?;
let value_layer = self.transpose_for_scores(&value_layer)?;
let attention_scores = query_layer.matmul(&key_layer.t()?)?;
let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?;
let attention_scores = attention_scores.broadcast_add(attention_mask)?;
let attention_probs = {
let _enter_sm = self.span_softmax.enter();
nn::ops::softmax(&attention_scores, candle::D::Minus1)?
};
let attention_probs = self.dropout.forward(&attention_probs, false)?;
let context_layer = attention_probs.matmul(&value_layer)?;
let context_layer = context_layer.transpose(1, 2)?.contiguous()?;
let context_layer = context_layer.flatten_from(candle::D::Minus2)?;
Ok(context_layer)
}
}
/// Copied from [`crate::models::bert::BertAttention`] to [`ChineseClipTextAttention`]
#[derive(Clone, Debug)]
struct ChineseClipTextAttention {
self_attention: ChineseClipTextSelfAttention,
self_output: ChineseClipTextSelfOutput,
span: tracing::Span,
}
impl ChineseClipTextAttention {
fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let self_attention = ChineseClipTextSelfAttention::new(var.pp("self"), config)?;
let self_output = ChineseClipTextSelfOutput::new(var.pp("output"), config)?;
Ok(Self {
self_attention,
self_output,
span: tracing::span!(tracing::Level::TRACE, "attn"),
})
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let self_outputs = self.self_attention.forward(hidden_states, attention_mask)?;
let attention_output = self.self_output.forward(&self_outputs, hidden_states)?;
Ok(attention_output)
}
}
type HiddenActLayer = Activation;
/// Copied from [`crate::models::bert::BertIntermediate`] to [`ChineseClipTextIntermediate`]
#[derive(Clone, Debug)]
struct ChineseClipTextIntermediate {
dense: nn::Linear,
intermediate_act: HiddenActLayer,
span: tracing::Span,
}
impl ChineseClipTextIntermediate {
fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let dense = nn::linear(
config.hidden_size,
config.intermediate_size,
var.pp("dense"),
)?;
Ok(Self {
dense,
intermediate_act: config.hidden_act,
span: tracing::span!(tracing::Level::TRACE, "inter"),
})
}
}
impl Module for ChineseClipTextIntermediate {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let ys = self.intermediate_act.forward(&hidden_states)?;
Ok(ys)
}
}
/// Copied from [`crate::models::bert::BertOutput`] to [`ChineseClipTextOutput`]
#[derive(Clone, Debug)]
struct ChineseClipTextOutput {
dense: nn::Linear,
layer_norm: nn::LayerNorm,
dropout: nn::Dropout,
span: tracing::Span,
}
impl ChineseClipTextOutput {
fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let dense = nn::linear(
config.intermediate_size,
config.hidden_size,
var.pp("dense"),
)?;
let layer_norm = nn::layer_norm(
config.hidden_size,
config.layer_norm_eps,
var.pp("LayerNorm"),
)?;
let dropout = nn::Dropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
span: tracing::span!(tracing::Level::TRACE, "out"),
})
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.dropout.forward(&hidden_states, false)?;
self.layer_norm.forward(&(hidden_states + input_tensor)?)
}
}
/// Copied from [`crate::models::bert::BertLayer`] to [`ChineseClipTextLayer`]
#[derive(Clone, Debug)]
struct ChineseClipTextLayer {
attention: ChineseClipTextAttention,
intermediate: ChineseClipTextIntermediate,
output: ChineseClipTextOutput,
span: tracing::Span,
}
impl ChineseClipTextLayer {
fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let attention = ChineseClipTextAttention::new(var.pp("attention"), config)?;
let intermediate = ChineseClipTextIntermediate::new(var.pp("intermediate"), config)?;
let output = ChineseClipTextOutput::new(var.pp("output"), config)?;
Ok(Self {
attention,
intermediate,
output,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let attention_output = self.attention.forward(hidden_states, attention_mask)?;
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523
let intermediate_output = self.intermediate.forward(&attention_output)?;
let layer_output = self
.output
.forward(&intermediate_output, &attention_output)?;
Ok(layer_output)
}
}
#[derive(Clone, Debug)]
struct Tanh;
impl Tanh {
pub fn new() -> Self {
Self {}
}
}
impl Module for Tanh {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.tanh()
}
}
#[derive(Clone, Debug)]
struct ChineseClipTextPooler {
dense: nn::Linear,
activation: Tanh,
}
impl ChineseClipTextPooler {
pub fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let dense = nn::linear(config.hidden_size, config.hidden_size, var.pp("dense"))?;
let activation = Tanh::new();
Ok(Self { dense, activation })
}
}
impl Module for ChineseClipTextPooler {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let first_token_tensor = hidden_states.i((.., 0))?;
let pooled_output = self.dense.forward(&first_token_tensor)?;
let pooled_output = self.activation.forward(&pooled_output)?;
Ok(pooled_output)
}
}
#[derive(Clone, Debug)]
struct ChineseClipTextEncoder {
layers: Vec<ChineseClipTextLayer>,
span: tracing::Span,
}
impl ChineseClipTextEncoder {
fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let layers = (0..config.num_hidden_layers)
.map(|index| ChineseClipTextLayer::new(var.pp(format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
Ok(ChineseClipTextEncoder { layers, span })
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut hidden_states = hidden_states.clone();
// Use a loop rather than a fold as it's easier to modify when adding debug/...
for layer in self.layers.iter() {
hidden_states = layer.forward(&hidden_states, attention_mask)?
}
Ok(hidden_states)
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipTextTransformer {
embeddings: ChineseClipTextEmbeddings,
encoder: ChineseClipTextEncoder,
pooler: Option<ChineseClipTextPooler>,
pub device: Device,
span: tracing::Span,
}
impl ChineseClipTextTransformer {
pub fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> {
let embeddings = ChineseClipTextEmbeddings::new(var.pp("embeddings"), config)?;
let encoder = ChineseClipTextEncoder::new(var.pp("encoder"), config)?;
// see: https://github.com/huggingface/transformers/blob/e40bb4845e0eefb52ec1e9cac9c2446ab36aef81/src/transformers/models/chinese_clip/modeling_chinese_clip.py#L1362
// In the original Python version of the code, the pooler is not used, and there are no parameters for the pooler in the weight file.
let pooler = if var.contains_tensor("pooler") {
Some(ChineseClipTextPooler::new(var.pp("pooler"), config)?)
} else {
None
};
Ok(Self {
embeddings,
encoder,
pooler,
device: var.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
pub fn forward(
&self,
input_ids: &Tensor,
token_type_ids: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let embedding_output = self.embeddings.forward(input_ids, token_type_ids)?;
let attention_mask = match attention_mask {
Some(attention_mask) => attention_mask.clone(),
None => input_ids.ones_like()?,
};
let dtype = embedding_output.dtype();
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L995
let attention_mask = get_extended_attention_mask(&attention_mask, dtype)?;
let encoder_outputs = self.encoder.forward(&embedding_output, &attention_mask)?;
let encoder_output = encoder_outputs.i((.., 0, ..))?;
let pooled_output = match &self.pooler {
Some(pooler) => pooler.forward(&encoder_output)?,
None => encoder_output,
};
Ok(pooled_output)
}
}
fn get_extended_attention_mask(attention_mask: &Tensor, dtype: DType) -> Result<Tensor> {
let attention_mask = match attention_mask.rank() {
3 => attention_mask.unsqueeze(1)?,
2 => attention_mask.unsqueeze(1)?.unsqueeze(1)?,
_ => candle::bail!("Wrong shape for input_ids or attention_mask"),
};
let attention_mask = attention_mask.to_dtype(dtype)?;
// torch.finfo(dtype).min
(attention_mask.ones_like()? - &attention_mask)?.broadcast_mul(
&Tensor::try_from(f32::MIN)?
.to_device(attention_mask.device())?
.to_dtype(dtype)?,
)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/chinese_clip/vision_model.rs | candle-transformers/src/models/chinese_clip/vision_model.rs | //! Chinese contrastive Language-Image Pre-Training
//!
//! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - 💻 [Chinese-CLIP](https://github.com/OFA-Sys/Chinese-CLIP)
//! - 💻 [GH](https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py_
use candle::{Context, DType, IndexOp, Module, Result, Shape, Tensor, D};
use candle_nn as nn;
use super::{Activation, EncoderConfig};
#[derive(Clone, Debug)]
pub struct ChineseClipVisionConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub projection_dim: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_channels: usize,
pub image_size: usize,
pub patch_size: usize,
pub hidden_act: Activation,
pub layer_norm_eps: f64,
pub attention_dropout: f32,
pub initializer_range: f32,
pub initializer_factor: f32,
}
impl Default for ChineseClipVisionConfig {
fn default() -> Self {
ChineseClipVisionConfig {
hidden_size: 768,
intermediate_size: 3072,
projection_dim: 512,
num_hidden_layers: 12,
num_attention_heads: 12,
num_channels: 3,
image_size: 224,
patch_size: 32,
hidden_act: Activation::QuickGelu,
layer_norm_eps: 1e-5,
attention_dropout: 0.0,
initializer_range: 0.02,
initializer_factor: 1.0,
}
}
}
impl ChineseClipVisionConfig {
/// [referer](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json)
pub fn clip_vit_base_patch16() -> Self {
Self {
hidden_size: 768,
intermediate_size: 3072,
projection_dim: 512,
num_hidden_layers: 12,
num_attention_heads: 12,
num_channels: 3,
image_size: 224,
patch_size: 16,
hidden_act: Activation::QuickGelu,
layer_norm_eps: 1e-5,
attention_dropout: 0.0,
initializer_range: 0.02,
initializer_factor: 1.0,
}
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipVisionEmbeddings {
patch_embedding: nn::Conv2d,
position_ids: Tensor,
class_embedding: Tensor,
position_embedding: nn::Embedding,
}
impl ChineseClipVisionEmbeddings {
pub fn new(var: nn::VarBuilder, config: &ChineseClipVisionConfig) -> Result<Self> {
let embed_dim = config.hidden_size;
// originally nn.Parameter
let class_embedding = if var.contains_tensor("class_embedding") {
var.get(embed_dim, "class_embedding")?
} else {
Tensor::randn(0f32, 1f32, embed_dim, var.device())?
};
let num_patches = (config.image_size / config.patch_size).pow(2);
let num_positions = num_patches + 1;
let position_ids = Tensor::arange(0, num_positions as i64, var.device())?;
let conv2dconfig = nn::Conv2dConfig {
stride: config.patch_size,
..Default::default()
};
let position_embedding =
nn::embedding(num_positions, embed_dim, var.pp("position_embedding"))?;
let patch_embedding = nn::conv2d_no_bias(
config.num_channels,
embed_dim,
config.patch_size,
conv2dconfig,
var.pp("patch_embedding"),
)?;
Ok(Self {
patch_embedding,
position_ids,
class_embedding,
position_embedding,
})
}
}
impl Module for ChineseClipVisionEmbeddings {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let batch_size = xs.shape().dims();
let patch_embeds = self
.patch_embedding
.forward(xs)?
.flatten_from(2)?
.transpose(1, 2)?;
let shape = Shape::from((batch_size[0], 1, self.class_embedding.dim(D::Minus1)?));
let class_embeds = self.class_embedding.expand(shape)?;
let embeddings = Tensor::cat(&[class_embeds, patch_embeds], 1)?;
let position_embedding = self.position_embedding.forward(&self.position_ids)?;
embeddings.broadcast_add(&position_embedding)
}
}
#[derive(Clone, Debug)]
struct ChineseClipVisionAttention {
k_proj: nn::Linear,
v_proj: nn::Linear,
q_proj: nn::Linear,
out_proj: nn::Linear,
head_dim: usize,
scale: f64,
num_attention_heads: usize,
}
impl ChineseClipVisionAttention {
fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> {
let embed_dim = config.embed_dim();
let num_attention_heads = config.num_attention_heads();
let k_proj = nn::linear(embed_dim, embed_dim, var.pp("k_proj"))?;
let v_proj = nn::linear(embed_dim, embed_dim, var.pp("v_proj"))?;
let q_proj = nn::linear(embed_dim, embed_dim, var.pp("q_proj"))?;
let out_proj = nn::linear(embed_dim, embed_dim, var.pp("out_proj"))?;
let head_dim = embed_dim / num_attention_heads;
let scale = (head_dim as f64).powf(-0.5);
Ok(ChineseClipVisionAttention {
k_proj,
v_proj,
q_proj,
out_proj,
head_dim,
scale,
num_attention_heads,
})
}
fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> {
xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let in_dtype = xs.dtype();
let (bsz, seq_len, embed_dim) = xs.dims3()?;
let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim);
let query_states = self
.shape(&(self.q_proj.forward(xs)? * self.scale)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let key_states = self
.shape(&self.k_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let value_states = self
.shape(&self.v_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?;
let src_len = key_states.dim(1)?;
let attn_weights = if let Some(causal_attention_mask) = causal_attention_mask {
attn_weights
.reshape((bsz, self.num_attention_heads, seq_len, src_len))?
.broadcast_add(causal_attention_mask)?
.reshape((bsz * self.num_attention_heads, seq_len, src_len))?
} else {
attn_weights
};
let attn_weights = nn::ops::softmax(&attn_weights, D::Minus1)?;
let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?;
let attn_output = attn_output
.reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))?
.transpose(1, 2)?
.reshape((bsz, seq_len, embed_dim))?;
self.out_proj.forward(&attn_output)
}
}
#[derive(Clone, Debug)]
struct ChineseClipVisionMlp {
fc1: nn::Linear,
fc2: nn::Linear,
activation: Activation,
}
impl ChineseClipVisionMlp {
fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> {
let fc1 = nn::linear(
config.embed_dim(),
config.intermediate_size(),
var.pp("fc1"),
)?;
let fc2 = nn::linear(
config.intermediate_size(),
config.embed_dim(),
var.pp("fc2"),
)?;
Ok(ChineseClipVisionMlp {
fc1,
fc2,
activation: config.activation(),
})
}
}
impl ChineseClipVisionMlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?;
self.fc2.forward(&self.activation.forward(&xs)?)
}
}
#[derive(Clone, Debug)]
struct ChineseClipVisionEncoderLayer {
self_attn: ChineseClipVisionAttention,
layer_norm1: nn::LayerNorm,
mlp: ChineseClipVisionMlp,
layer_norm2: nn::LayerNorm,
}
impl ChineseClipVisionEncoderLayer {
fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> {
let self_attn = ChineseClipVisionAttention::new(var.pp("self_attn"), config)?;
let layer_norm1 = nn::layer_norm(
config.embed_dim(),
config.layer_norm_eps(),
var.pp("layer_norm1"),
)?;
let mlp = ChineseClipVisionMlp::new(var.pp("mlp"), config)?;
let layer_norm2 = nn::layer_norm(
config.embed_dim(),
config.layer_norm_eps(),
var.pp("layer_norm2"),
)?;
Ok(ChineseClipVisionEncoderLayer {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = self.layer_norm1.forward(xs)?;
let xs = self.self_attn.forward(&xs, causal_attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self.layer_norm2.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
xs + residual
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipVisionEncoder {
layers: Vec<ChineseClipVisionEncoderLayer>,
}
impl ChineseClipVisionEncoder {
pub fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> {
let vs = var.pp("layers");
let mut layers: Vec<ChineseClipVisionEncoderLayer> = Vec::new();
for index in 0..config.num_hidden_layers() {
let layer = ChineseClipVisionEncoderLayer::new(vs.pp(index.to_string()), config)?;
layers.push(layer)
}
Ok(ChineseClipVisionEncoder { layers })
}
pub fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, causal_attention_mask)?;
}
Ok(xs)
}
// required by LLaVA
pub fn output_hidden_states(
&self,
xs: &Tensor,
causal_attention_mask: Option<&Tensor>,
) -> Result<Vec<Tensor>> {
let mut xs = xs.clone();
let mut hidden_states = Vec::new();
for layer in self.layers.iter() {
xs = layer.forward(&xs, causal_attention_mask)?;
hidden_states.push(xs.clone());
}
Ok(hidden_states)
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipVisionTransformer {
embeddings: ChineseClipVisionEmbeddings,
encoder: ChineseClipVisionEncoder,
pre_layer_norm: nn::LayerNorm,
final_layer_norm: nn::LayerNorm,
}
impl ChineseClipVisionTransformer {
pub fn new(var: nn::VarBuilder, config: &ChineseClipVisionConfig) -> Result<Self> {
let embed_dim = config.hidden_size;
let embeddings = ChineseClipVisionEmbeddings::new(var.pp("embeddings"), config)?;
let pre_layer_norm =
nn::layer_norm(embed_dim, config.layer_norm_eps, var.pp("pre_layrnorm"))?;
let encoder = ChineseClipVisionEncoder::new(
var.pp("encoder"),
&EncoderConfig::Vision(config.clone()),
)?;
let final_layer_norm =
nn::layer_norm(embed_dim, config.layer_norm_eps, var.pp("post_layernorm"))?;
Ok(Self {
embeddings,
encoder,
final_layer_norm,
pre_layer_norm,
})
}
// required by LLaVA
pub fn output_hidden_states(&self, pixel_values: &Tensor) -> Result<Vec<Tensor>> {
let hidden_states = pixel_values
.apply(&self.embeddings)?
.apply(&self.pre_layer_norm)?;
let mut result = self.encoder.output_hidden_states(&hidden_states, None)?;
let encoder_outputs = result.last().context("no last")?;
let pooled_output = encoder_outputs.i((.., 0, ..))?;
result.push(self.final_layer_norm.forward(&pooled_output)?.clone());
Ok(result)
}
}
impl Module for ChineseClipVisionTransformer {
fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> {
let hidden_states = pixel_values
.apply(&self.embeddings)?
.apply(&self.pre_layer_norm)?;
let encoder_outputs = self.encoder.forward(&hidden_states, None)?;
// referer: https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L787
let pooled_output = encoder_outputs.i((.., 0, ..))?;
self.final_layer_norm.forward(&pooled_output)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/chinese_clip/mod.rs | candle-transformers/src/models/chinese_clip/mod.rs | //! Chinese contrastive Language-Image Pre-Training
//!
//! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - 💻 [GH Link](https://github.com/OFA-Sys/Chinese-CLIP)
//! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py)
//!
use candle::{Module, Result, Tensor, D};
use candle_nn as nn;
use text_model::ChineseClipTextTransformer;
use vision_model::ChineseClipVisionTransformer;
pub mod text_model;
pub mod vision_model;
#[derive(Debug, Clone, Copy)]
pub enum Activation {
QuickGelu,
Gelu,
GeluNew,
Relu,
}
impl From<String> for Activation {
fn from(value: String) -> Self {
match value.as_str() {
"quick_gelu" => Activation::QuickGelu,
"gelu" => Activation::Gelu,
"gelu_new" => Activation::GeluNew,
"relu" => Activation::Relu,
_ => panic!("Invalid activation function: {value}"),
}
}
}
impl Module for Activation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?,
Activation::Gelu => xs.gelu_erf(),
Activation::GeluNew => xs.gelu(),
Activation::Relu => xs.relu(),
}
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipConfig {
pub text_config: text_model::ChineseClipTextConfig,
pub vision_config: vision_model::ChineseClipVisionConfig,
pub projection_dim: usize,
pub logit_scale_init_value: f32,
pub image_size: usize,
}
impl ChineseClipConfig {
/// referer: https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json
pub fn clip_vit_base_patch16() -> Self {
let text_config = text_model::ChineseClipTextConfig::clip_vit_base_patch16();
let vision_config = vision_model::ChineseClipVisionConfig::clip_vit_base_patch16();
Self {
text_config,
vision_config,
projection_dim: 512,
logit_scale_init_value: 2.6592,
image_size: 512,
}
}
}
#[derive(Clone, Debug)]
pub enum EncoderConfig {
Text(text_model::ChineseClipTextConfig),
Vision(vision_model::ChineseClipVisionConfig),
}
impl EncoderConfig {
pub fn embed_dim(&self) -> usize {
match self {
Self::Text(c) => c.hidden_size,
Self::Vision(c) => c.hidden_size,
}
}
pub fn num_attention_heads(&self) -> usize {
match self {
Self::Text(c) => c.num_attention_heads,
Self::Vision(c) => c.num_attention_heads,
}
}
pub fn intermediate_size(&self) -> usize {
match self {
Self::Text(c) => c.intermediate_size,
Self::Vision(c) => c.intermediate_size,
}
}
pub fn num_hidden_layers(&self) -> usize {
match self {
Self::Text(c) => c.num_hidden_layers,
Self::Vision(c) => c.num_hidden_layers,
}
}
pub fn activation(&self) -> Activation {
match self {
Self::Text(c) => c.hidden_act,
Self::Vision(c) => c.hidden_act,
}
}
pub fn layer_norm_eps(&self) -> f64 {
match self {
Self::Text(c) => c.layer_norm_eps,
Self::Vision(c) => c.layer_norm_eps,
}
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipModel {
text_model: ChineseClipTextTransformer,
vision_model: ChineseClipVisionTransformer,
visual_projection: nn::Linear,
text_projection: nn::Linear,
logit_scale: Tensor,
}
impl ChineseClipModel {
pub fn new(vs: nn::VarBuilder, c: &ChineseClipConfig) -> Result<Self> {
let text_model = ChineseClipTextTransformer::new(vs.pp("text_model"), &c.text_config)?;
let vision_model =
ChineseClipVisionTransformer::new(vs.pp("vision_model"), &c.vision_config)?;
let vision_embed_dim = c.vision_config.hidden_size;
let vision_projection = nn::linear_no_bias(
vision_embed_dim,
c.projection_dim,
vs.pp("visual_projection"),
)?;
let text_embed_dim = c.text_config.hidden_size;
let text_projection =
nn::linear_no_bias(text_embed_dim, c.projection_dim, vs.pp("text_projection"))?;
let logit_scale = if vs.contains_tensor("logit_scale") {
vs.get(&[], "logit_scale")?
} else {
Tensor::new(&[c.logit_scale_init_value], vs.device())?
};
Ok(Self {
text_model,
vision_model,
visual_projection: vision_projection,
text_projection,
logit_scale,
})
}
pub fn get_text_features(
&self,
input_ids: &Tensor,
token_type_ids: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let output = self
.text_model
.forward(input_ids, token_type_ids, attention_mask)?
.contiguous()?;
self.text_projection.forward(&output)
}
pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> {
pixel_values
.apply(&self.vision_model)?
.apply(&self.visual_projection)
}
pub fn forward(
&self,
pixel_values: &Tensor,
input_ids: &Tensor,
token_type_ids: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let image_features = self.get_image_features(pixel_values)?;
let text_features = self.get_text_features(input_ids, token_type_ids, attention_mask)?;
let image_features_normalized = div_l2_norm(&image_features)?;
let text_features_normalized = div_l2_norm(&text_features)?;
let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?;
let logit_scale = self.logit_scale.exp()?;
let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?;
let logits_per_image = logits_per_text.t()?;
Ok((logits_per_text, logits_per_image))
}
}
pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> {
let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?;
v.broadcast_div(&l2_norm)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/flux/sampling.rs | candle-transformers/src/models/flux/sampling.rs | use candle::{Device, Result, Tensor};
pub fn get_noise(
num_samples: usize,
height: usize,
width: usize,
device: &Device,
) -> Result<Tensor> {
let height = height.div_ceil(16) * 2;
let width = width.div_ceil(16) * 2;
Tensor::randn(0f32, 1., (num_samples, 16, height, width), device)
}
#[derive(Debug, Clone)]
pub struct State {
pub img: Tensor,
pub img_ids: Tensor,
pub txt: Tensor,
pub txt_ids: Tensor,
pub vec: Tensor,
}
impl State {
pub fn new(t5_emb: &Tensor, clip_emb: &Tensor, img: &Tensor) -> Result<Self> {
let dtype = img.dtype();
let (bs, c, h, w) = img.dims4()?;
let dev = img.device();
let img = img.reshape((bs, c, h / 2, 2, w / 2, 2))?; // (b, c, h, ph, w, pw)
let img = img.permute((0, 2, 4, 1, 3, 5))?; // (b, h, w, c, ph, pw)
let img = img.reshape((bs, h / 2 * w / 2, c * 4))?;
let img_ids = Tensor::stack(
&[
Tensor::full(0u32, (h / 2, w / 2), dev)?,
Tensor::arange(0u32, h as u32 / 2, dev)?
.reshape(((), 1))?
.broadcast_as((h / 2, w / 2))?,
Tensor::arange(0u32, w as u32 / 2, dev)?
.reshape((1, ()))?
.broadcast_as((h / 2, w / 2))?,
],
2,
)?
.to_dtype(dtype)?;
let img_ids = img_ids.reshape((1, h / 2 * w / 2, 3))?;
let img_ids = img_ids.repeat((bs, 1, 1))?;
let txt = t5_emb.repeat(bs)?;
let txt_ids = Tensor::zeros((bs, txt.dim(1)?, 3), dtype, dev)?;
let vec = clip_emb.repeat(bs)?;
Ok(Self {
img,
img_ids,
txt,
txt_ids,
vec,
})
}
}
fn time_shift(mu: f64, sigma: f64, t: f64) -> f64 {
let e = mu.exp();
e / (e + (1. / t - 1.).powf(sigma))
}
/// `shift` is a triple `(image_seq_len, base_shift, max_shift)`.
pub fn get_schedule(num_steps: usize, shift: Option<(usize, f64, f64)>) -> Vec<f64> {
let timesteps: Vec<f64> = (0..=num_steps)
.map(|v| v as f64 / num_steps as f64)
.rev()
.collect();
match shift {
None => timesteps,
Some((image_seq_len, y1, y2)) => {
let (x1, x2) = (256., 4096.);
let m = (y2 - y1) / (x2 - x1);
let b = y1 - m * x1;
let mu = m * image_seq_len as f64 + b;
timesteps
.into_iter()
.map(|v| time_shift(mu, 1., v))
.collect()
}
}
}
pub fn unpack(xs: &Tensor, height: usize, width: usize) -> Result<Tensor> {
let (b, _h_w, c_ph_pw) = xs.dims3()?;
let height = height.div_ceil(16);
let width = width.div_ceil(16);
xs.reshape((b, height, width, c_ph_pw / 4, 2, 2))? // (b, h, w, c, ph, pw)
.permute((0, 3, 1, 4, 2, 5))? // (b, c, h, ph, w, pw)
.reshape((b, c_ph_pw / 4, height * 2, width * 2))
}
#[allow(clippy::too_many_arguments)]
pub fn denoise<M: super::WithForward>(
model: &M,
img: &Tensor,
img_ids: &Tensor,
txt: &Tensor,
txt_ids: &Tensor,
vec_: &Tensor,
timesteps: &[f64],
guidance: f64,
) -> Result<Tensor> {
let b_sz = img.dim(0)?;
let dev = img.device();
let guidance = Tensor::full(guidance as f32, b_sz, dev)?;
let mut img = img.clone();
for window in timesteps.windows(2) {
let (t_curr, t_prev) = match window {
[a, b] => (a, b),
_ => continue,
};
let t_vec = Tensor::full(*t_curr as f32, b_sz, dev)?;
let pred = model.forward(&img, img_ids, txt, txt_ids, &t_vec, vec_, Some(&guidance))?;
img = (img + pred * (t_prev - t_curr))?
}
Ok(img)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/flux/autoencoder.rs | candle-transformers/src/models/flux/autoencoder.rs | use candle::{Result, Tensor, D};
use candle_nn::{conv2d, group_norm, Conv2d, GroupNorm, VarBuilder};
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/modules/autoencoder.py#L9
#[derive(Debug, Clone)]
pub struct Config {
pub resolution: usize,
pub in_channels: usize,
pub ch: usize,
pub out_ch: usize,
pub ch_mult: Vec<usize>,
pub num_res_blocks: usize,
pub z_channels: usize,
pub scale_factor: f64,
pub shift_factor: f64,
}
impl Config {
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L47
pub fn dev() -> Self {
Self {
resolution: 256,
in_channels: 3,
ch: 128,
out_ch: 3,
ch_mult: vec![1, 2, 4, 4],
num_res_blocks: 2,
z_channels: 16,
scale_factor: 0.3611,
shift_factor: 0.1159,
}
}
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L79
pub fn schnell() -> Self {
Self {
resolution: 256,
in_channels: 3,
ch: 128,
out_ch: 3,
ch_mult: vec![1, 2, 4, 4],
num_res_blocks: 2,
z_channels: 16,
scale_factor: 0.3611,
shift_factor: 0.1159,
}
}
}
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v)
}
#[derive(Debug, Clone)]
struct AttnBlock {
q: Conv2d,
k: Conv2d,
v: Conv2d,
proj_out: Conv2d,
norm: GroupNorm,
}
impl AttnBlock {
fn new(in_c: usize, vb: VarBuilder) -> Result<Self> {
let q = conv2d(in_c, in_c, 1, Default::default(), vb.pp("q"))?;
let k = conv2d(in_c, in_c, 1, Default::default(), vb.pp("k"))?;
let v = conv2d(in_c, in_c, 1, Default::default(), vb.pp("v"))?;
let proj_out = conv2d(in_c, in_c, 1, Default::default(), vb.pp("proj_out"))?;
let norm = group_norm(32, in_c, 1e-6, vb.pp("norm"))?;
Ok(Self {
q,
k,
v,
proj_out,
norm,
})
}
}
impl candle::Module for AttnBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let init_xs = xs;
let xs = xs.apply(&self.norm)?;
let q = xs.apply(&self.q)?;
let k = xs.apply(&self.k)?;
let v = xs.apply(&self.v)?;
let (b, c, h, w) = q.dims4()?;
let q = q.flatten_from(2)?.t()?.unsqueeze(1)?;
let k = k.flatten_from(2)?.t()?.unsqueeze(1)?;
let v = v.flatten_from(2)?.t()?.unsqueeze(1)?;
let xs = scaled_dot_product_attention(&q, &k, &v)?;
let xs = xs.squeeze(1)?.t()?.reshape((b, c, h, w))?;
xs.apply(&self.proj_out)? + init_xs
}
}
#[derive(Debug, Clone)]
struct ResnetBlock {
norm1: GroupNorm,
conv1: Conv2d,
norm2: GroupNorm,
conv2: Conv2d,
nin_shortcut: Option<Conv2d>,
}
impl ResnetBlock {
fn new(in_c: usize, out_c: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let norm1 = group_norm(32, in_c, 1e-6, vb.pp("norm1"))?;
let conv1 = conv2d(in_c, out_c, 3, conv_cfg, vb.pp("conv1"))?;
let norm2 = group_norm(32, out_c, 1e-6, vb.pp("norm2"))?;
let conv2 = conv2d(out_c, out_c, 3, conv_cfg, vb.pp("conv2"))?;
let nin_shortcut = if in_c == out_c {
None
} else {
Some(conv2d(
in_c,
out_c,
1,
Default::default(),
vb.pp("nin_shortcut"),
)?)
};
Ok(Self {
norm1,
conv1,
norm2,
conv2,
nin_shortcut,
})
}
}
impl candle::Module for ResnetBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let h = xs
.apply(&self.norm1)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv1)?
.apply(&self.norm2)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv2)?;
match self.nin_shortcut.as_ref() {
None => xs + h,
Some(c) => xs.apply(c)? + h,
}
}
}
#[derive(Debug, Clone)]
struct Downsample {
conv: Conv2d,
}
impl Downsample {
fn new(in_c: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
stride: 2,
..Default::default()
};
let conv = conv2d(in_c, in_c, 3, conv_cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl candle::Module for Downsample {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.pad_with_zeros(D::Minus1, 0, 1)?;
let xs = xs.pad_with_zeros(D::Minus2, 0, 1)?;
xs.apply(&self.conv)
}
}
#[derive(Debug, Clone)]
struct Upsample {
conv: Conv2d,
}
impl Upsample {
fn new(in_c: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv = conv2d(in_c, in_c, 3, conv_cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl candle::Module for Upsample {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_, _, h, w) = xs.dims4()?;
xs.upsample_nearest2d(h * 2, w * 2)?.apply(&self.conv)
}
}
#[derive(Debug, Clone)]
struct DownBlock {
block: Vec<ResnetBlock>,
downsample: Option<Downsample>,
}
#[derive(Debug, Clone)]
pub struct Encoder {
conv_in: Conv2d,
mid_block_1: ResnetBlock,
mid_attn_1: AttnBlock,
mid_block_2: ResnetBlock,
norm_out: GroupNorm,
conv_out: Conv2d,
down: Vec<DownBlock>,
}
impl Encoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let mut block_in = cfg.ch;
let conv_in = conv2d(cfg.in_channels, block_in, 3, conv_cfg, vb.pp("conv_in"))?;
let mut down = Vec::with_capacity(cfg.ch_mult.len());
let vb_d = vb.pp("down");
for (i_level, ch_mult) in cfg.ch_mult.iter().enumerate() {
let mut block = Vec::with_capacity(cfg.num_res_blocks);
let vb_d = vb_d.pp(i_level);
let vb_b = vb_d.pp("block");
let in_ch_mult = if i_level == 0 {
1
} else {
cfg.ch_mult[i_level - 1]
};
block_in = cfg.ch * in_ch_mult;
let block_out = cfg.ch * ch_mult;
for i_block in 0..cfg.num_res_blocks {
let b = ResnetBlock::new(block_in, block_out, vb_b.pp(i_block))?;
block.push(b);
block_in = block_out;
}
let downsample = if i_level != cfg.ch_mult.len() - 1 {
Some(Downsample::new(block_in, vb_d.pp("downsample"))?)
} else {
None
};
let block = DownBlock { block, downsample };
down.push(block)
}
let mid_block_1 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_1"))?;
let mid_attn_1 = AttnBlock::new(block_in, vb.pp("mid.attn_1"))?;
let mid_block_2 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_2"))?;
let conv_out = conv2d(block_in, 2 * cfg.z_channels, 3, conv_cfg, vb.pp("conv_out"))?;
let norm_out = group_norm(32, block_in, 1e-6, vb.pp("norm_out"))?;
Ok(Self {
conv_in,
mid_block_1,
mid_attn_1,
mid_block_2,
norm_out,
conv_out,
down,
})
}
}
impl candle_nn::Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut h = xs.apply(&self.conv_in)?;
for block in self.down.iter() {
for b in block.block.iter() {
h = h.apply(b)?
}
if let Some(ds) = block.downsample.as_ref() {
h = h.apply(ds)?
}
}
h.apply(&self.mid_block_1)?
.apply(&self.mid_attn_1)?
.apply(&self.mid_block_2)?
.apply(&self.norm_out)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv_out)
}
}
#[derive(Debug, Clone)]
struct UpBlock {
block: Vec<ResnetBlock>,
upsample: Option<Upsample>,
}
#[derive(Debug, Clone)]
pub struct Decoder {
conv_in: Conv2d,
mid_block_1: ResnetBlock,
mid_attn_1: AttnBlock,
mid_block_2: ResnetBlock,
norm_out: GroupNorm,
conv_out: Conv2d,
up: Vec<UpBlock>,
}
impl Decoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let mut block_in = cfg.ch * cfg.ch_mult.last().unwrap_or(&1);
let conv_in = conv2d(cfg.z_channels, block_in, 3, conv_cfg, vb.pp("conv_in"))?;
let mid_block_1 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_1"))?;
let mid_attn_1 = AttnBlock::new(block_in, vb.pp("mid.attn_1"))?;
let mid_block_2 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_2"))?;
let mut up = Vec::with_capacity(cfg.ch_mult.len());
let vb_u = vb.pp("up");
for (i_level, ch_mult) in cfg.ch_mult.iter().enumerate().rev() {
let block_out = cfg.ch * ch_mult;
let vb_u = vb_u.pp(i_level);
let vb_b = vb_u.pp("block");
let mut block = Vec::with_capacity(cfg.num_res_blocks + 1);
for i_block in 0..=cfg.num_res_blocks {
let b = ResnetBlock::new(block_in, block_out, vb_b.pp(i_block))?;
block.push(b);
block_in = block_out;
}
let upsample = if i_level != 0 {
Some(Upsample::new(block_in, vb_u.pp("upsample"))?)
} else {
None
};
let block = UpBlock { block, upsample };
up.push(block)
}
up.reverse();
let norm_out = group_norm(32, block_in, 1e-6, vb.pp("norm_out"))?;
let conv_out = conv2d(block_in, cfg.out_ch, 3, conv_cfg, vb.pp("conv_out"))?;
Ok(Self {
conv_in,
mid_block_1,
mid_attn_1,
mid_block_2,
norm_out,
conv_out,
up,
})
}
}
impl candle_nn::Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let h = xs.apply(&self.conv_in)?;
let mut h = h
.apply(&self.mid_block_1)?
.apply(&self.mid_attn_1)?
.apply(&self.mid_block_2)?;
for block in self.up.iter().rev() {
for b in block.block.iter() {
h = h.apply(b)?
}
if let Some(us) = block.upsample.as_ref() {
h = h.apply(us)?
}
}
h.apply(&self.norm_out)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv_out)
}
}
#[derive(Debug, Clone)]
pub struct DiagonalGaussian {
sample: bool,
chunk_dim: usize,
}
impl DiagonalGaussian {
pub fn new(sample: bool, chunk_dim: usize) -> Result<Self> {
Ok(Self { sample, chunk_dim })
}
}
impl candle_nn::Module for DiagonalGaussian {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let chunks = xs.chunk(2, self.chunk_dim)?;
if self.sample {
let std = (&chunks[1] * 0.5)?.exp()?;
&chunks[0] + (std * chunks[0].randn_like(0., 1.))?
} else {
Ok(chunks[0].clone())
}
}
}
#[derive(Debug, Clone)]
pub struct AutoEncoder {
encoder: Encoder,
decoder: Decoder,
reg: DiagonalGaussian,
shift_factor: f64,
scale_factor: f64,
}
impl AutoEncoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let decoder = Decoder::new(cfg, vb.pp("decoder"))?;
let reg = DiagonalGaussian::new(true, 1)?;
Ok(Self {
encoder,
decoder,
reg,
scale_factor: cfg.scale_factor,
shift_factor: cfg.shift_factor,
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let z = xs.apply(&self.encoder)?.apply(&self.reg)?;
(z - self.shift_factor)? * self.scale_factor
}
pub fn decode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = ((xs / self.scale_factor)? + self.shift_factor)?;
xs.apply(&self.decoder)
}
}
impl candle::Module for AutoEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.decode(&self.encode(xs)?)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/flux/quantized_model.rs | candle-transformers/src/models/flux/quantized_model.rs | use super::model::{attention, timestep_embedding, Config, EmbedNd};
use crate::quantized_nn::{linear, linear_b, Linear};
use crate::quantized_var_builder::VarBuilder;
use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::{LayerNorm, RmsNorm};
fn layer_norm(dim: usize, vb: VarBuilder) -> Result<LayerNorm> {
let ws = Tensor::ones(dim, DType::F32, vb.device())?;
Ok(LayerNorm::new_no_bias(ws, 1e-6))
}
#[derive(Debug, Clone)]
pub struct MlpEmbedder {
in_layer: Linear,
out_layer: Linear,
}
impl MlpEmbedder {
fn new(in_sz: usize, h_sz: usize, vb: VarBuilder) -> Result<Self> {
let in_layer = linear(in_sz, h_sz, vb.pp("in_layer"))?;
let out_layer = linear(h_sz, h_sz, vb.pp("out_layer"))?;
Ok(Self {
in_layer,
out_layer,
})
}
}
impl candle::Module for MlpEmbedder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.in_layer)?.silu()?.apply(&self.out_layer)
}
}
#[derive(Debug, Clone)]
pub struct QkNorm {
query_norm: RmsNorm,
key_norm: RmsNorm,
}
impl QkNorm {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let query_norm = vb.get(dim, "query_norm.scale")?.dequantize(vb.device())?;
let query_norm = RmsNorm::new(query_norm, 1e-6);
let key_norm = vb.get(dim, "key_norm.scale")?.dequantize(vb.device())?;
let key_norm = RmsNorm::new(key_norm, 1e-6);
Ok(Self {
query_norm,
key_norm,
})
}
}
struct ModulationOut {
shift: Tensor,
scale: Tensor,
gate: Tensor,
}
impl ModulationOut {
fn scale_shift(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&(&self.scale + 1.)?)?
.broadcast_add(&self.shift)
}
fn gate(&self, xs: &Tensor) -> Result<Tensor> {
self.gate.broadcast_mul(xs)
}
}
#[derive(Debug, Clone)]
struct Modulation1 {
lin: Linear,
}
impl Modulation1 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = linear(dim, 3 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<ModulationOut> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(3, D::Minus1)?;
if ys.len() != 3 {
candle::bail!("unexpected len from chunk {ys:?}")
}
Ok(ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
})
}
}
#[derive(Debug, Clone)]
struct Modulation2 {
lin: Linear,
}
impl Modulation2 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = linear(dim, 6 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<(ModulationOut, ModulationOut)> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(6, D::Minus1)?;
if ys.len() != 6 {
candle::bail!("unexpected len from chunk {ys:?}")
}
let mod1 = ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
};
let mod2 = ModulationOut {
shift: ys[3].clone(),
scale: ys[4].clone(),
gate: ys[5].clone(),
};
Ok((mod1, mod2))
}
}
#[derive(Debug, Clone)]
pub struct SelfAttention {
qkv: Linear,
norm: QkNorm,
proj: Linear,
num_heads: usize,
}
impl SelfAttention {
fn new(dim: usize, num_heads: usize, qkv_bias: bool, vb: VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = linear_b(dim, dim * 3, qkv_bias, vb.pp("qkv"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let proj = linear(dim, dim, vb.pp("proj"))?;
Ok(Self {
qkv,
norm,
proj,
num_heads,
})
}
fn qkv(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let qkv = xs.apply(&self.qkv)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
Ok((q, k, v))
}
#[allow(unused)]
fn forward(&self, xs: &Tensor, pe: &Tensor) -> Result<Tensor> {
let (q, k, v) = self.qkv(xs)?;
attention(&q, &k, &v, pe)?.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct Mlp {
lin1: Linear,
lin2: Linear,
}
impl Mlp {
fn new(in_sz: usize, mlp_sz: usize, vb: VarBuilder) -> Result<Self> {
let lin1 = linear(in_sz, mlp_sz, vb.pp("0"))?;
let lin2 = linear(mlp_sz, in_sz, vb.pp("2"))?;
Ok(Self { lin1, lin2 })
}
}
impl candle::Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.lin1)?.gelu()?.apply(&self.lin2)
}
}
#[derive(Debug, Clone)]
pub struct DoubleStreamBlock {
img_mod: Modulation2,
img_norm1: LayerNorm,
img_attn: SelfAttention,
img_norm2: LayerNorm,
img_mlp: Mlp,
txt_mod: Modulation2,
txt_norm1: LayerNorm,
txt_attn: SelfAttention,
txt_norm2: LayerNorm,
txt_mlp: Mlp,
}
impl DoubleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let img_mod = Modulation2::new(h_sz, vb.pp("img_mod"))?;
let img_norm1 = layer_norm(h_sz, vb.pp("img_norm1"))?;
let img_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("img_attn"))?;
let img_norm2 = layer_norm(h_sz, vb.pp("img_norm2"))?;
let img_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("img_mlp"))?;
let txt_mod = Modulation2::new(h_sz, vb.pp("txt_mod"))?;
let txt_norm1 = layer_norm(h_sz, vb.pp("txt_norm1"))?;
let txt_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("txt_attn"))?;
let txt_norm2 = layer_norm(h_sz, vb.pp("txt_norm2"))?;
let txt_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("txt_mlp"))?;
Ok(Self {
img_mod,
img_norm1,
img_attn,
img_norm2,
img_mlp,
txt_mod,
txt_norm1,
txt_attn,
txt_norm2,
txt_mlp,
})
}
fn forward(
&self,
img: &Tensor,
txt: &Tensor,
vec_: &Tensor,
pe: &Tensor,
) -> Result<(Tensor, Tensor)> {
let (img_mod1, img_mod2) = self.img_mod.forward(vec_)?; // shift, scale, gate
let (txt_mod1, txt_mod2) = self.txt_mod.forward(vec_)?; // shift, scale, gate
let img_modulated = img.apply(&self.img_norm1)?;
let img_modulated = img_mod1.scale_shift(&img_modulated)?;
let (img_q, img_k, img_v) = self.img_attn.qkv(&img_modulated)?;
let txt_modulated = txt.apply(&self.txt_norm1)?;
let txt_modulated = txt_mod1.scale_shift(&txt_modulated)?;
let (txt_q, txt_k, txt_v) = self.txt_attn.qkv(&txt_modulated)?;
let q = Tensor::cat(&[txt_q, img_q], 2)?;
let k = Tensor::cat(&[txt_k, img_k], 2)?;
let v = Tensor::cat(&[txt_v, img_v], 2)?;
let attn = attention(&q, &k, &v, pe)?;
let txt_attn = attn.narrow(1, 0, txt.dim(1)?)?;
let img_attn = attn.narrow(1, txt.dim(1)?, attn.dim(1)? - txt.dim(1)?)?;
let img = (img + img_mod1.gate(&img_attn.apply(&self.img_attn.proj)?))?;
let img = (&img
+ img_mod2.gate(
&img_mod2
.scale_shift(&img.apply(&self.img_norm2)?)?
.apply(&self.img_mlp)?,
)?)?;
let txt = (txt + txt_mod1.gate(&txt_attn.apply(&self.txt_attn.proj)?))?;
let txt = (&txt
+ txt_mod2.gate(
&txt_mod2
.scale_shift(&txt.apply(&self.txt_norm2)?)?
.apply(&self.txt_mlp)?,
)?)?;
Ok((img, txt))
}
}
#[derive(Debug, Clone)]
pub struct SingleStreamBlock {
linear1: Linear,
linear2: Linear,
norm: QkNorm,
pre_norm: LayerNorm,
modulation: Modulation1,
h_sz: usize,
mlp_sz: usize,
num_heads: usize,
}
impl SingleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let head_dim = h_sz / cfg.num_heads;
let linear1 = linear(h_sz, h_sz * 3 + mlp_sz, vb.pp("linear1"))?;
let linear2 = linear(h_sz + mlp_sz, h_sz, vb.pp("linear2"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let pre_norm = layer_norm(h_sz, vb.pp("pre_norm"))?;
let modulation = Modulation1::new(h_sz, vb.pp("modulation"))?;
Ok(Self {
linear1,
linear2,
norm,
pre_norm,
modulation,
h_sz,
mlp_sz,
num_heads: cfg.num_heads,
})
}
fn forward(&self, xs: &Tensor, vec_: &Tensor, pe: &Tensor) -> Result<Tensor> {
let mod_ = self.modulation.forward(vec_)?;
let x_mod = mod_.scale_shift(&xs.apply(&self.pre_norm)?)?;
let x_mod = x_mod.apply(&self.linear1)?;
let qkv = x_mod.narrow(D::Minus1, 0, 3 * self.h_sz)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let mlp = x_mod.narrow(D::Minus1, 3 * self.h_sz, self.mlp_sz)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
let attn = attention(&q, &k, &v, pe)?;
let output = Tensor::cat(&[attn, mlp.gelu()?], 2)?.apply(&self.linear2)?;
xs + mod_.gate(&output)
}
}
#[derive(Debug, Clone)]
pub struct LastLayer {
norm_final: LayerNorm,
linear: Linear,
ada_ln_modulation: Linear,
}
impl LastLayer {
fn new(h_sz: usize, p_sz: usize, out_c: usize, vb: VarBuilder) -> Result<Self> {
let norm_final = layer_norm(h_sz, vb.pp("norm_final"))?;
let linear_ = linear(h_sz, p_sz * p_sz * out_c, vb.pp("linear"))?;
let ada_ln_modulation = linear(h_sz, 2 * h_sz, vb.pp("adaLN_modulation.1"))?;
Ok(Self {
norm_final,
linear: linear_,
ada_ln_modulation,
})
}
fn forward(&self, xs: &Tensor, vec: &Tensor) -> Result<Tensor> {
let chunks = vec.silu()?.apply(&self.ada_ln_modulation)?.chunk(2, 1)?;
let (shift, scale) = (&chunks[0], &chunks[1]);
let xs = xs
.apply(&self.norm_final)?
.broadcast_mul(&(scale.unsqueeze(1)? + 1.0)?)?
.broadcast_add(&shift.unsqueeze(1)?)?;
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
pub struct Flux {
img_in: Linear,
txt_in: Linear,
time_in: MlpEmbedder,
vector_in: MlpEmbedder,
guidance_in: Option<MlpEmbedder>,
pe_embedder: EmbedNd,
double_blocks: Vec<DoubleStreamBlock>,
single_blocks: Vec<SingleStreamBlock>,
final_layer: LastLayer,
}
impl Flux {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let img_in = linear(cfg.in_channels, cfg.hidden_size, vb.pp("img_in"))?;
let txt_in = linear(cfg.context_in_dim, cfg.hidden_size, vb.pp("txt_in"))?;
let mut double_blocks = Vec::with_capacity(cfg.depth);
let vb_d = vb.pp("double_blocks");
for idx in 0..cfg.depth {
let db = DoubleStreamBlock::new(cfg, vb_d.pp(idx))?;
double_blocks.push(db)
}
let mut single_blocks = Vec::with_capacity(cfg.depth_single_blocks);
let vb_s = vb.pp("single_blocks");
for idx in 0..cfg.depth_single_blocks {
let sb = SingleStreamBlock::new(cfg, vb_s.pp(idx))?;
single_blocks.push(sb)
}
let time_in = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("time_in"))?;
let vector_in = MlpEmbedder::new(cfg.vec_in_dim, cfg.hidden_size, vb.pp("vector_in"))?;
let guidance_in = if cfg.guidance_embed {
let mlp = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("guidance_in"))?;
Some(mlp)
} else {
None
};
let final_layer =
LastLayer::new(cfg.hidden_size, 1, cfg.in_channels, vb.pp("final_layer"))?;
let pe_dim = cfg.hidden_size / cfg.num_heads;
let pe_embedder = EmbedNd::new(pe_dim, cfg.theta, cfg.axes_dim.to_vec());
Ok(Self {
img_in,
txt_in,
time_in,
vector_in,
guidance_in,
pe_embedder,
double_blocks,
single_blocks,
final_layer,
})
}
}
impl super::WithForward for Flux {
#[allow(clippy::too_many_arguments)]
fn forward(
&self,
img: &Tensor,
img_ids: &Tensor,
txt: &Tensor,
txt_ids: &Tensor,
timesteps: &Tensor,
y: &Tensor,
guidance: Option<&Tensor>,
) -> Result<Tensor> {
if txt.rank() != 3 {
candle::bail!("unexpected shape for txt {:?}", txt.shape())
}
if img.rank() != 3 {
candle::bail!("unexpected shape for img {:?}", img.shape())
}
let dtype = img.dtype();
let pe = {
let ids = Tensor::cat(&[txt_ids, img_ids], 1)?;
ids.apply(&self.pe_embedder)?
};
let mut txt = txt.apply(&self.txt_in)?;
let mut img = img.apply(&self.img_in)?;
let vec_ = timestep_embedding(timesteps, 256, dtype)?.apply(&self.time_in)?;
let vec_ = match (self.guidance_in.as_ref(), guidance) {
(Some(g_in), Some(guidance)) => {
(vec_ + timestep_embedding(guidance, 256, dtype)?.apply(g_in))?
}
_ => vec_,
};
let vec_ = (vec_ + y.apply(&self.vector_in))?;
// Double blocks
for block in self.double_blocks.iter() {
(img, txt) = block.forward(&img, &txt, &vec_, &pe)?
}
// Single blocks
let mut img = Tensor::cat(&[&txt, &img], 1)?;
for block in self.single_blocks.iter() {
img = block.forward(&img, &vec_, &pe)?;
}
let img = img.i((.., txt.dim(1)?..))?;
self.final_layer.forward(&img, &vec_)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/flux/model.rs | candle-transformers/src/models/flux/model.rs | use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::{LayerNorm, Linear, RmsNorm, VarBuilder};
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/model.py#L12
#[derive(Debug, Clone)]
pub struct Config {
pub in_channels: usize,
pub vec_in_dim: usize,
pub context_in_dim: usize,
pub hidden_size: usize,
pub mlp_ratio: f64,
pub num_heads: usize,
pub depth: usize,
pub depth_single_blocks: usize,
pub axes_dim: Vec<usize>,
pub theta: usize,
pub qkv_bias: bool,
pub guidance_embed: bool,
}
impl Config {
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L32
pub fn dev() -> Self {
Self {
in_channels: 64,
vec_in_dim: 768,
context_in_dim: 4096,
hidden_size: 3072,
mlp_ratio: 4.0,
num_heads: 24,
depth: 19,
depth_single_blocks: 38,
axes_dim: vec![16, 56, 56],
theta: 10_000,
qkv_bias: true,
guidance_embed: true,
}
}
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L64
pub fn schnell() -> Self {
Self {
in_channels: 64,
vec_in_dim: 768,
context_in_dim: 4096,
hidden_size: 3072,
mlp_ratio: 4.0,
num_heads: 24,
depth: 19,
depth_single_blocks: 38,
axes_dim: vec![16, 56, 56],
theta: 10_000,
qkv_bias: true,
guidance_embed: false,
}
}
}
fn layer_norm(dim: usize, vb: VarBuilder) -> Result<LayerNorm> {
let ws = Tensor::ones(dim, vb.dtype(), vb.device())?;
Ok(LayerNorm::new_no_bias(ws, 1e-6))
}
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let mut batch_dims = q.dims().to_vec();
batch_dims.pop();
batch_dims.pop();
let q = q.flatten_to(batch_dims.len() - 1)?;
let k = k.flatten_to(batch_dims.len() - 1)?;
let v = v.flatten_to(batch_dims.len() - 1)?;
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
let attn_scores = candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(&v)?;
batch_dims.push(attn_scores.dim(D::Minus2)?);
batch_dims.push(attn_scores.dim(D::Minus1)?);
attn_scores.reshape(batch_dims)
}
fn rope(pos: &Tensor, dim: usize, theta: usize) -> Result<Tensor> {
if dim % 2 == 1 {
candle::bail!("dim {dim} is odd")
}
let dev = pos.device();
let theta = theta as f64;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, 1, inv_freq_len), dev)?;
let inv_freq = inv_freq.to_dtype(pos.dtype())?;
let freqs = pos.unsqueeze(2)?.broadcast_mul(&inv_freq)?;
let cos = freqs.cos()?;
let sin = freqs.sin()?;
let out = Tensor::stack(&[&cos, &sin.neg()?, &sin, &cos], 3)?;
let (b, n, d, _ij) = out.dims4()?;
out.reshape((b, n, d, 2, 2))
}
fn apply_rope(x: &Tensor, freq_cis: &Tensor) -> Result<Tensor> {
let dims = x.dims();
let (b_sz, n_head, seq_len, n_embd) = x.dims4()?;
let x = x.reshape((b_sz, n_head, seq_len, n_embd / 2, 2))?;
let x0 = x.narrow(D::Minus1, 0, 1)?;
let x1 = x.narrow(D::Minus1, 1, 1)?;
let fr0 = freq_cis.get_on_dim(D::Minus1, 0)?;
let fr1 = freq_cis.get_on_dim(D::Minus1, 1)?;
(fr0.broadcast_mul(&x0)? + fr1.broadcast_mul(&x1)?)?.reshape(dims.to_vec())
}
pub(crate) fn attention(q: &Tensor, k: &Tensor, v: &Tensor, pe: &Tensor) -> Result<Tensor> {
let q = apply_rope(q, pe)?.contiguous()?;
let k = apply_rope(k, pe)?.contiguous()?;
let x = scaled_dot_product_attention(&q, &k, v)?;
x.transpose(1, 2)?.flatten_from(2)
}
pub(crate) fn timestep_embedding(t: &Tensor, dim: usize, dtype: DType) -> Result<Tensor> {
const TIME_FACTOR: f64 = 1000.;
const MAX_PERIOD: f64 = 10000.;
if dim % 2 == 1 {
candle::bail!("{dim} is odd")
}
let dev = t.device();
let half = dim / 2;
let t = (t * TIME_FACTOR)?;
let arange = Tensor::arange(0, half as u32, dev)?.to_dtype(candle::DType::F32)?;
let freqs = (arange * (-MAX_PERIOD.ln() / half as f64))?.exp()?;
let args = t
.unsqueeze(1)?
.to_dtype(candle::DType::F32)?
.broadcast_mul(&freqs.unsqueeze(0)?)?;
let emb = Tensor::cat(&[args.cos()?, args.sin()?], D::Minus1)?.to_dtype(dtype)?;
Ok(emb)
}
#[derive(Debug, Clone)]
pub struct EmbedNd {
#[allow(unused)]
dim: usize,
theta: usize,
axes_dim: Vec<usize>,
}
impl EmbedNd {
pub fn new(dim: usize, theta: usize, axes_dim: Vec<usize>) -> Self {
Self {
dim,
theta,
axes_dim,
}
}
}
impl candle::Module for EmbedNd {
fn forward(&self, ids: &Tensor) -> Result<Tensor> {
let n_axes = ids.dim(D::Minus1)?;
let mut emb = Vec::with_capacity(n_axes);
for idx in 0..n_axes {
let r = rope(
&ids.get_on_dim(D::Minus1, idx)?,
self.axes_dim[idx],
self.theta,
)?;
emb.push(r)
}
let emb = Tensor::cat(&emb, 2)?;
emb.unsqueeze(1)
}
}
#[derive(Debug, Clone)]
pub struct MlpEmbedder {
in_layer: Linear,
out_layer: Linear,
}
impl MlpEmbedder {
fn new(in_sz: usize, h_sz: usize, vb: VarBuilder) -> Result<Self> {
let in_layer = candle_nn::linear(in_sz, h_sz, vb.pp("in_layer"))?;
let out_layer = candle_nn::linear(h_sz, h_sz, vb.pp("out_layer"))?;
Ok(Self {
in_layer,
out_layer,
})
}
}
impl candle::Module for MlpEmbedder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.in_layer)?.silu()?.apply(&self.out_layer)
}
}
#[derive(Debug, Clone)]
pub struct QkNorm {
query_norm: RmsNorm,
key_norm: RmsNorm,
}
impl QkNorm {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let query_norm = vb.get(dim, "query_norm.scale")?;
let query_norm = RmsNorm::new(query_norm, 1e-6);
let key_norm = vb.get(dim, "key_norm.scale")?;
let key_norm = RmsNorm::new(key_norm, 1e-6);
Ok(Self {
query_norm,
key_norm,
})
}
}
struct ModulationOut {
shift: Tensor,
scale: Tensor,
gate: Tensor,
}
impl ModulationOut {
fn scale_shift(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&(&self.scale + 1.)?)?
.broadcast_add(&self.shift)
}
fn gate(&self, xs: &Tensor) -> Result<Tensor> {
self.gate.broadcast_mul(xs)
}
}
#[derive(Debug, Clone)]
struct Modulation1 {
lin: Linear,
}
impl Modulation1 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = candle_nn::linear(dim, 3 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<ModulationOut> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(3, D::Minus1)?;
if ys.len() != 3 {
candle::bail!("unexpected len from chunk {ys:?}")
}
Ok(ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
})
}
}
#[derive(Debug, Clone)]
struct Modulation2 {
lin: Linear,
}
impl Modulation2 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = candle_nn::linear(dim, 6 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<(ModulationOut, ModulationOut)> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(6, D::Minus1)?;
if ys.len() != 6 {
candle::bail!("unexpected len from chunk {ys:?}")
}
let mod1 = ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
};
let mod2 = ModulationOut {
shift: ys[3].clone(),
scale: ys[4].clone(),
gate: ys[5].clone(),
};
Ok((mod1, mod2))
}
}
#[derive(Debug, Clone)]
pub struct SelfAttention {
qkv: Linear,
norm: QkNorm,
proj: Linear,
num_heads: usize,
}
impl SelfAttention {
fn new(dim: usize, num_heads: usize, qkv_bias: bool, vb: VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = candle_nn::linear_b(dim, dim * 3, qkv_bias, vb.pp("qkv"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let proj = candle_nn::linear(dim, dim, vb.pp("proj"))?;
Ok(Self {
qkv,
norm,
proj,
num_heads,
})
}
fn qkv(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let qkv = xs.apply(&self.qkv)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
Ok((q, k, v))
}
#[allow(unused)]
fn forward(&self, xs: &Tensor, pe: &Tensor) -> Result<Tensor> {
let (q, k, v) = self.qkv(xs)?;
attention(&q, &k, &v, pe)?.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct Mlp {
lin1: Linear,
lin2: Linear,
}
impl Mlp {
fn new(in_sz: usize, mlp_sz: usize, vb: VarBuilder) -> Result<Self> {
let lin1 = candle_nn::linear(in_sz, mlp_sz, vb.pp("0"))?;
let lin2 = candle_nn::linear(mlp_sz, in_sz, vb.pp("2"))?;
Ok(Self { lin1, lin2 })
}
}
impl candle::Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.lin1)?.gelu()?.apply(&self.lin2)
}
}
#[derive(Debug, Clone)]
pub struct DoubleStreamBlock {
img_mod: Modulation2,
img_norm1: LayerNorm,
img_attn: SelfAttention,
img_norm2: LayerNorm,
img_mlp: Mlp,
txt_mod: Modulation2,
txt_norm1: LayerNorm,
txt_attn: SelfAttention,
txt_norm2: LayerNorm,
txt_mlp: Mlp,
}
impl DoubleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let img_mod = Modulation2::new(h_sz, vb.pp("img_mod"))?;
let img_norm1 = layer_norm(h_sz, vb.pp("img_norm1"))?;
let img_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("img_attn"))?;
let img_norm2 = layer_norm(h_sz, vb.pp("img_norm2"))?;
let img_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("img_mlp"))?;
let txt_mod = Modulation2::new(h_sz, vb.pp("txt_mod"))?;
let txt_norm1 = layer_norm(h_sz, vb.pp("txt_norm1"))?;
let txt_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("txt_attn"))?;
let txt_norm2 = layer_norm(h_sz, vb.pp("txt_norm2"))?;
let txt_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("txt_mlp"))?;
Ok(Self {
img_mod,
img_norm1,
img_attn,
img_norm2,
img_mlp,
txt_mod,
txt_norm1,
txt_attn,
txt_norm2,
txt_mlp,
})
}
fn forward(
&self,
img: &Tensor,
txt: &Tensor,
vec_: &Tensor,
pe: &Tensor,
) -> Result<(Tensor, Tensor)> {
let (img_mod1, img_mod2) = self.img_mod.forward(vec_)?; // shift, scale, gate
let (txt_mod1, txt_mod2) = self.txt_mod.forward(vec_)?; // shift, scale, gate
let img_modulated = img.apply(&self.img_norm1)?;
let img_modulated = img_mod1.scale_shift(&img_modulated)?;
let (img_q, img_k, img_v) = self.img_attn.qkv(&img_modulated)?;
let txt_modulated = txt.apply(&self.txt_norm1)?;
let txt_modulated = txt_mod1.scale_shift(&txt_modulated)?;
let (txt_q, txt_k, txt_v) = self.txt_attn.qkv(&txt_modulated)?;
let q = Tensor::cat(&[txt_q, img_q], 2)?;
let k = Tensor::cat(&[txt_k, img_k], 2)?;
let v = Tensor::cat(&[txt_v, img_v], 2)?;
let attn = attention(&q, &k, &v, pe)?;
let txt_attn = attn.narrow(1, 0, txt.dim(1)?)?;
let img_attn = attn.narrow(1, txt.dim(1)?, attn.dim(1)? - txt.dim(1)?)?;
let img = (img + img_mod1.gate(&img_attn.apply(&self.img_attn.proj)?))?;
let img = (&img
+ img_mod2.gate(
&img_mod2
.scale_shift(&img.apply(&self.img_norm2)?)?
.apply(&self.img_mlp)?,
)?)?;
let txt = (txt + txt_mod1.gate(&txt_attn.apply(&self.txt_attn.proj)?))?;
let txt = (&txt
+ txt_mod2.gate(
&txt_mod2
.scale_shift(&txt.apply(&self.txt_norm2)?)?
.apply(&self.txt_mlp)?,
)?)?;
Ok((img, txt))
}
}
#[derive(Debug, Clone)]
pub struct SingleStreamBlock {
linear1: Linear,
linear2: Linear,
norm: QkNorm,
pre_norm: LayerNorm,
modulation: Modulation1,
h_sz: usize,
mlp_sz: usize,
num_heads: usize,
}
impl SingleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let head_dim = h_sz / cfg.num_heads;
let linear1 = candle_nn::linear(h_sz, h_sz * 3 + mlp_sz, vb.pp("linear1"))?;
let linear2 = candle_nn::linear(h_sz + mlp_sz, h_sz, vb.pp("linear2"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let pre_norm = layer_norm(h_sz, vb.pp("pre_norm"))?;
let modulation = Modulation1::new(h_sz, vb.pp("modulation"))?;
Ok(Self {
linear1,
linear2,
norm,
pre_norm,
modulation,
h_sz,
mlp_sz,
num_heads: cfg.num_heads,
})
}
fn forward(&self, xs: &Tensor, vec_: &Tensor, pe: &Tensor) -> Result<Tensor> {
let mod_ = self.modulation.forward(vec_)?;
let x_mod = mod_.scale_shift(&xs.apply(&self.pre_norm)?)?;
let x_mod = x_mod.apply(&self.linear1)?;
let qkv = x_mod.narrow(D::Minus1, 0, 3 * self.h_sz)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let mlp = x_mod.narrow(D::Minus1, 3 * self.h_sz, self.mlp_sz)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
let attn = attention(&q, &k, &v, pe)?;
let output = Tensor::cat(&[attn, mlp.gelu()?], 2)?.apply(&self.linear2)?;
xs + mod_.gate(&output)
}
}
#[derive(Debug, Clone)]
pub struct LastLayer {
norm_final: LayerNorm,
linear: Linear,
ada_ln_modulation: Linear,
}
impl LastLayer {
fn new(h_sz: usize, p_sz: usize, out_c: usize, vb: VarBuilder) -> Result<Self> {
let norm_final = layer_norm(h_sz, vb.pp("norm_final"))?;
let linear = candle_nn::linear(h_sz, p_sz * p_sz * out_c, vb.pp("linear"))?;
let ada_ln_modulation = candle_nn::linear(h_sz, 2 * h_sz, vb.pp("adaLN_modulation.1"))?;
Ok(Self {
norm_final,
linear,
ada_ln_modulation,
})
}
fn forward(&self, xs: &Tensor, vec: &Tensor) -> Result<Tensor> {
let chunks = vec.silu()?.apply(&self.ada_ln_modulation)?.chunk(2, 1)?;
let (shift, scale) = (&chunks[0], &chunks[1]);
let xs = xs
.apply(&self.norm_final)?
.broadcast_mul(&(scale.unsqueeze(1)? + 1.0)?)?
.broadcast_add(&shift.unsqueeze(1)?)?;
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
pub struct Flux {
img_in: Linear,
txt_in: Linear,
time_in: MlpEmbedder,
vector_in: MlpEmbedder,
guidance_in: Option<MlpEmbedder>,
pe_embedder: EmbedNd,
double_blocks: Vec<DoubleStreamBlock>,
single_blocks: Vec<SingleStreamBlock>,
final_layer: LastLayer,
}
impl Flux {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let img_in = candle_nn::linear(cfg.in_channels, cfg.hidden_size, vb.pp("img_in"))?;
let txt_in = candle_nn::linear(cfg.context_in_dim, cfg.hidden_size, vb.pp("txt_in"))?;
let mut double_blocks = Vec::with_capacity(cfg.depth);
let vb_d = vb.pp("double_blocks");
for idx in 0..cfg.depth {
let db = DoubleStreamBlock::new(cfg, vb_d.pp(idx))?;
double_blocks.push(db)
}
let mut single_blocks = Vec::with_capacity(cfg.depth_single_blocks);
let vb_s = vb.pp("single_blocks");
for idx in 0..cfg.depth_single_blocks {
let sb = SingleStreamBlock::new(cfg, vb_s.pp(idx))?;
single_blocks.push(sb)
}
let time_in = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("time_in"))?;
let vector_in = MlpEmbedder::new(cfg.vec_in_dim, cfg.hidden_size, vb.pp("vector_in"))?;
let guidance_in = if cfg.guidance_embed {
let mlp = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("guidance_in"))?;
Some(mlp)
} else {
None
};
let final_layer =
LastLayer::new(cfg.hidden_size, 1, cfg.in_channels, vb.pp("final_layer"))?;
let pe_dim = cfg.hidden_size / cfg.num_heads;
let pe_embedder = EmbedNd::new(pe_dim, cfg.theta, cfg.axes_dim.to_vec());
Ok(Self {
img_in,
txt_in,
time_in,
vector_in,
guidance_in,
pe_embedder,
double_blocks,
single_blocks,
final_layer,
})
}
}
impl super::WithForward for Flux {
#[allow(clippy::too_many_arguments)]
fn forward(
&self,
img: &Tensor,
img_ids: &Tensor,
txt: &Tensor,
txt_ids: &Tensor,
timesteps: &Tensor,
y: &Tensor,
guidance: Option<&Tensor>,
) -> Result<Tensor> {
if txt.rank() != 3 {
candle::bail!("unexpected shape for txt {:?}", txt.shape())
}
if img.rank() != 3 {
candle::bail!("unexpected shape for img {:?}", img.shape())
}
let dtype = img.dtype();
let pe = {
let ids = Tensor::cat(&[txt_ids, img_ids], 1)?;
ids.apply(&self.pe_embedder)?
};
let mut txt = txt.apply(&self.txt_in)?;
let mut img = img.apply(&self.img_in)?;
let vec_ = timestep_embedding(timesteps, 256, dtype)?.apply(&self.time_in)?;
let vec_ = match (self.guidance_in.as_ref(), guidance) {
(Some(g_in), Some(guidance)) => {
(vec_ + timestep_embedding(guidance, 256, dtype)?.apply(g_in))?
}
_ => vec_,
};
let vec_ = (vec_ + y.apply(&self.vector_in))?;
// Double blocks
for block in self.double_blocks.iter() {
(img, txt) = block.forward(&img, &txt, &vec_, &pe)?
}
// Single blocks
let mut img = Tensor::cat(&[&txt, &img], 1)?;
for block in self.single_blocks.iter() {
img = block.forward(&img, &vec_, &pe)?;
}
let img = img.i((.., txt.dim(1)?..))?;
self.final_layer.forward(&img, &vec_)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/flux/mod.rs | candle-transformers/src/models/flux/mod.rs | //! Flux Model
//!
//! Flux is a 12B rectified flow transformer capable of generating images from text descriptions.
//!
//! - 🤗 [Hugging Face Model](https://huggingface.co/black-forest-labs/FLUX.1-schnell)
//! - 💻 [GitHub Repository](https://github.com/black-forest-labs/flux)
//! - 📝 [Blog Post](https://blackforestlabs.ai/announcing-black-forest-labs/)
//!
//! # Usage
//!
//! ```bash
//! cargo run --features cuda \
//! --example flux -r -- \
//! --height 1024 --width 1024 \
//! --prompt "a rusty robot walking on a beach holding a small torch, \
//! the robot has the word \"rust\" written on it, high quality, 4k"
//! ```
//!
//! <div align=center>
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/flux/assets/flux-robot.jpg" alt="" width=320>
//! </div>
//!
use candle::{Result, Tensor};
pub trait WithForward {
#[allow(clippy::too_many_arguments)]
fn forward(
&self,
img: &Tensor,
img_ids: &Tensor,
txt: &Tensor,
txt_ids: &Tensor,
timesteps: &Tensor,
y: &Tensor,
guidance: Option<&Tensor>,
) -> Result<Tensor>;
}
pub mod autoencoder;
pub mod model;
pub mod quantized_model;
pub mod sampling;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/voxtral/voxtral_llama.rs | candle-transformers/src/models/voxtral/voxtral_llama.rs | use crate::models::with_tracing::{linear_no_bias as linear, Linear, RmsNorm};
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use serde::Deserialize;
use std::collections::HashMap;
pub const DEFAULT_MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct VoxtralLlamaConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub head_dim: Option<usize>, // explicit head_dim from config
pub use_flash_attn: bool,
pub rms_norm_eps: f64,
pub rope_theta: f32,
pub max_position_embeddings: usize,
pub tie_word_embeddings: bool,
}
impl VoxtralLlamaConfig {
/// Voxtral 3B text model configuration
pub fn voxtral_3b() -> Self {
Self {
hidden_size: 3072,
intermediate_size: 8192,
vocab_size: 131072,
num_hidden_layers: 30,
num_attention_heads: 32,
num_key_value_heads: 8,
head_dim: Some(128), // Voxtral uses explicit head_dim=128
use_flash_attn: true,
rms_norm_eps: 1e-5,
rope_theta: 100_000_000.0,
max_position_embeddings: 131072,
tie_word_embeddings: false,
}
}
/// Voxtral 24B text model configuration
pub fn voxtral_24b() -> Self {
Self {
hidden_size: 5120,
intermediate_size: 32768,
vocab_size: 131072,
num_hidden_layers: 40,
num_attention_heads: 32,
num_key_value_heads: 8,
head_dim: Some(128), // Voxtral uses explicit head_dim=128
use_flash_attn: true,
rms_norm_eps: 1e-5,
rope_theta: 100_000_000.0,
max_position_embeddings: 131072,
tie_word_embeddings: false,
}
}
}
#[derive(Debug, Clone)]
pub struct VoxtralLlamaCache {
masks: HashMap<usize, Tensor>,
pub use_kv_cache: bool,
kvs: Vec<Option<(Tensor, Tensor)>>,
cos: Tensor,
sin: Tensor,
device: Device,
}
fn calculate_default_inv_freq(cfg: &VoxtralLlamaConfig) -> Vec<f32> {
let head_dim = cfg
.head_dim
.unwrap_or(cfg.hidden_size / cfg.num_attention_heads);
(0..head_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / head_dim as f32))
.collect()
}
impl VoxtralLlamaCache {
pub fn new(
use_kv_cache: bool,
dtype: DType,
config: &VoxtralLlamaConfig,
device: &Device,
) -> Result<Self> {
// precompute freqs_cis
let theta = calculate_default_inv_freq(config);
let theta = Tensor::new(theta, device)?;
let idx_theta = Tensor::arange(0, config.max_position_embeddings as u32, device)?
.to_dtype(DType::F32)?
.reshape((config.max_position_embeddings, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
// This is different from the paper, see:
// https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112 # trufflehog:ignore
let cos = idx_theta.cos()?.to_dtype(dtype)?;
let sin = idx_theta.sin()?.to_dtype(dtype)?;
Ok(Self {
masks: HashMap::new(),
use_kv_cache,
kvs: vec![None; config.num_hidden_layers],
device: device.clone(),
cos,
sin,
})
}
fn mask(&mut self, t: usize) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), &self.device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
}
#[derive(Debug, Clone)]
struct CausalSelfAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: usize,
use_flash_attn: bool,
span: tracing::Span,
span_rot: tracing::Span,
max_position_embeddings: usize,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl CausalSelfAttention {
fn apply_rotary_emb(
&self,
x: &Tensor,
index_pos: usize,
cache: &VoxtralLlamaCache,
) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _, seq_len, _hidden_size) = x.dims4()?;
let cos = cache.cos.narrow(0, index_pos, seq_len)?;
let sin = cache.sin.narrow(0, index_pos, seq_len)?;
// Ensure dtype consistency between input tensor and position embeddings
let x_dtype = x.dtype();
let cos = if cos.dtype() != x_dtype {
cos.to_dtype(x_dtype)?
} else {
cos
};
let sin = if sin.dtype() != x_dtype {
sin.to_dtype(x_dtype)?
} else {
sin
};
candle_nn::rotary_emb::rope(x, &cos, &sin)
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut VoxtralLlamaCache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seq_len, _hidden_size) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let mut v = v
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let q = self.apply_rotary_emb(&q, index_pos, cache)?;
let mut k = self.apply_rotary_emb(&k, index_pos, cache)?;
if cache.use_kv_cache {
if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] {
k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?;
let k_seq_len = k.dims()[1];
if k_seq_len > self.max_position_embeddings {
k = k
.narrow(
D::Minus1,
k_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
let v_seq_len = v.dims()[1];
if v_seq_len > 2 * self.max_position_embeddings {
v = v
.narrow(
D::Minus1,
v_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
}
cache.kvs[block_idx] = Some((k.clone(), v.clone()))
}
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let y = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?.transpose(1, 2)?
} else {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if seq_len == 1 {
att
} else {
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
masked_fill(&att, &mask, f32::NEG_INFINITY)?
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?
};
// Use the actual tensor dimensions from attention computation
let actual_hidden_size = self.num_attention_heads * self.head_dim;
let y = y
.transpose(1, 2)?
.reshape(&[b_sz, seq_len, actual_hidden_size])?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
crate::utils::repeat_kv(x, self.num_attention_heads / self.num_key_value_heads)
}
fn load(vb: VarBuilder, cfg: &VoxtralLlamaConfig) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let size_in = cfg.hidden_size;
// Use explicit head_dim if provided, otherwise calculate from hidden_size
let head_dim = cfg
.head_dim
.unwrap_or(cfg.hidden_size / cfg.num_attention_heads);
let size_q = head_dim * cfg.num_attention_heads;
let size_kv = head_dim * cfg.num_key_value_heads;
let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?;
let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?;
let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?;
let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_attention_heads: cfg.num_attention_heads,
num_key_value_heads: cfg.num_key_value_heads,
head_dim, // use the calculated head_dim from above
use_flash_attn: cfg.use_flash_attn,
span,
span_rot,
max_position_embeddings: cfg.max_position_embeddings,
})
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct Mlp {
c_fc1: Linear,
c_fc2: Linear,
c_proj: Linear,
span: tracing::Span,
}
impl Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let x = (candle_nn::ops::silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?;
self.c_proj.forward(&x)
}
fn load(vb: VarBuilder, cfg: &VoxtralLlamaConfig) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "mlp");
let h_size = cfg.hidden_size;
let i_size = cfg.intermediate_size;
let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?;
let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?;
let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?;
Ok(Self {
c_fc1,
c_fc2,
c_proj,
span,
})
}
}
#[derive(Debug, Clone)]
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
mlp: Mlp,
span: tracing::Span,
}
impl Block {
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut VoxtralLlamaCache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = x;
let x = self.rms_1.forward(x)?;
let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?;
let residual = &x;
let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cfg: &VoxtralLlamaConfig) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "block");
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?;
let mlp = Mlp::load(vb.pp("mlp"), cfg)?;
let rms_1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let rms_2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
rms_1,
attn,
rms_2,
mlp,
span,
})
}
}
#[derive(Debug, Clone)]
pub struct VoxtralLlama {
wte: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
lm_head: Linear,
}
impl VoxtralLlama {
// required by LLaVA
pub fn embed(&self, x: &Tensor) -> Result<Tensor> {
self.wte.forward(x)
}
// required by LLaVA
pub fn forward_input_embed(
&self,
input_embed: &Tensor,
index_pos: usize,
cache: &mut VoxtralLlamaCache,
) -> Result<Tensor> {
let (_, seq_len, _) = input_embed.dims3()?;
let mut x = input_embed.clone();
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
// Handle both single token and multi-token sequences properly
let x = if seq_len == 1 {
x.i((.., 0, ..))?
} else {
x.i((.., seq_len - 1, ..))?
}
.contiguous()?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn forward(
&self,
x: &Tensor,
index_pos: usize,
cache: &mut VoxtralLlamaCache,
) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mut x = self.wte.forward(x)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?.contiguous()?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn load(vb: VarBuilder, cfg: &VoxtralLlamaConfig) -> Result<Self> {
let wte = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(wte.embeddings().clone(), None)
} else {
linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.num_hidden_layers)
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cfg).unwrap())
.collect();
Ok(Self {
wte,
blocks,
ln_f,
lm_head,
})
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/voxtral/model.rs | candle-transformers/src/models/voxtral/model.rs | use super::voxtral_llama::{VoxtralLlama, VoxtralLlamaCache, VoxtralLlamaConfig};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{
layer_norm, linear, linear_no_bias, Conv1d, Dropout, LayerNorm, Linear, VarBuilder,
};
use rand::Rng;
#[derive(Debug, Clone)]
pub struct VoxtralEncoderConfig {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub head_dim: usize,
pub scale_embedding: bool,
pub activation_function: String,
pub num_mel_bins: usize,
pub max_source_positions: usize,
pub initializer_range: f64,
pub attention_dropout: f64,
// These are set to 0.0 for compatibility with Whisper modular architecture
pub dropout: f64,
pub layerdrop: f64,
pub activation_dropout: f64,
}
#[derive(Debug, Clone)]
pub struct VoxtralConfig {
pub audio_config: VoxtralEncoderConfig,
pub text_config: VoxtralLlamaConfig,
pub audio_token_id: usize,
pub projector_hidden_act: String,
}
impl Default for VoxtralConfig {
fn default() -> Self {
Self {
audio_config: VoxtralEncoderConfig::default(),
text_config: VoxtralLlamaConfig::voxtral_3b(),
audio_token_id: 24,
projector_hidden_act: "gelu".to_string(),
}
}
}
impl Default for VoxtralEncoderConfig {
fn default() -> Self {
Self {
vocab_size: 51866,
hidden_size: 1280,
intermediate_size: 5120,
num_hidden_layers: 32,
num_attention_heads: 20,
num_key_value_heads: 20,
head_dim: 64,
scale_embedding: false,
activation_function: "gelu".to_string(),
num_mel_bins: 128,
max_source_positions: 1500,
initializer_range: 0.02,
attention_dropout: 0.0,
// Set for Whisper compatibility
dropout: 0.0,
layerdrop: 0.0,
activation_dropout: 0.0,
}
}
}
impl VoxtralEncoderConfig {
/// Ensures dropout values are properly set for Whisper compatibility
pub fn with_whisper_compatibility(mut self) -> Self {
self.dropout = 0.0;
self.layerdrop = 0.0;
self.activation_dropout = 0.0;
self
}
}
/// Custom cache for multimodal inputs
#[derive(Debug, Clone)]
pub struct VoxtralCache {
cache: VoxtralLlamaCache,
audio_processed: bool,
cached_audio_embeds: Option<Tensor>,
cached_audio_positions: Option<Vec<(usize, usize)>>,
}
#[derive(Debug, Clone)]
pub struct VoxtralGenerationConfig {
pub max_new_tokens: usize,
pub temperature: f64,
pub top_p: Option<f64>,
pub device: Device,
/// If cache is None, the model will create a new cache.
pub cache: Option<VoxtralCache>,
}
impl VoxtralGenerationConfig {
pub fn new(device: Device) -> Self {
Self {
max_new_tokens: 500,
temperature: 0.0,
top_p: None,
device,
cache: None,
}
}
}
impl VoxtralCache {
pub fn new(
use_kv_cache: bool,
dtype: DType,
config: &VoxtralLlamaConfig,
device: &Device,
) -> Result<Self> {
Ok(Self {
cache: VoxtralLlamaCache::new(use_kv_cache, dtype, config, device)?,
audio_processed: false,
cached_audio_embeds: None,
cached_audio_positions: None,
})
}
pub fn reset(&mut self) {
// Reset the audio cache state
self.audio_processed = false;
self.cached_audio_embeds = None;
self.cached_audio_positions = None;
// Note: LlamaCache reset needs to be handled at a higher level
// as it requires device access
}
}
/// Safely clamp tensor values for different dtypes
fn safe_clamp(x: &Tensor) -> Result<Tensor> {
match x.dtype() {
DType::F16 => {
// Match PyTorch exactly: torch.finfo(torch.float16).max - 1000 = 64504.0
let max_val = 64504.0;
x.clamp(-max_val, max_val)
}
DType::BF16 => {
// BF16 has larger range, typically doesn't need clamping
Ok(x.clone())
}
_ => Ok(x.clone()),
}
}
/// Replace audio tokens in embeddings with projected audio features
pub fn replace_audio_tokens(
inputs_embeds: &Tensor,
audio_embeds: &Tensor,
audio_positions: &[(usize, usize)],
device: &Device,
) -> Result<Tensor> {
if audio_positions.is_empty() {
return Ok(inputs_embeds.clone());
}
let (batch_size, seq_len, hidden_size) = inputs_embeds.dims3()?;
let num_audio_tokens = audio_positions.len();
// HF-style: audio_embeds shape is (total_audio_seq_len, hidden_size)
let audio_embeds_dims = audio_embeds.dims2()?;
let total_audio_embeds = audio_embeds_dims.0;
// HF-style: Use audio embeddings one-to-one with audio tokens
// We should now have the right number of audio tokens in the input sequence
let audio_embeds = if total_audio_embeds >= num_audio_tokens {
// Take the first num_audio_tokens embeddings to match the audio tokens
if num_audio_tokens == total_audio_embeds {
audio_embeds.clone()
} else {
audio_embeds.i(0..num_audio_tokens)?
}
} else {
candle::bail!(
"Not enough audio embeddings: need {}, got {}. Input sequence should have {} audio tokens.",
num_audio_tokens,
total_audio_embeds,
total_audio_embeds
);
};
// Create result tensor starting with text embeddings
let mut result = inputs_embeds.clone();
// Replace audio tokens with audio embeddings
// Since we don't have scatter operations, we'll do this manually
for (idx, &(batch_idx, seq_idx)) in audio_positions.iter().enumerate() {
if batch_idx >= batch_size || seq_idx >= seq_len {
candle::bail!(
"Invalid audio position: ({}, {}) for tensor shape ({}, {}, {})",
batch_idx,
seq_idx,
batch_size,
seq_len,
hidden_size
);
}
// Get the audio embedding for this position
let audio_embed = audio_embeds.i(idx)?;
// Create a mask for this specific position
let mut position_mask = vec![0f32; batch_size * seq_len];
position_mask[batch_idx * seq_len + seq_idx] = 1.0;
let position_mask = Tensor::new(position_mask.as_slice(), device)?
.reshape((batch_size, seq_len, 1))?
.to_dtype(inputs_embeds.dtype())?;
// Broadcast audio embedding to full tensor shape
let audio_embed_broadcast = audio_embed.unsqueeze(0)?.unsqueeze(0)?.broadcast_as((
batch_size,
seq_len,
hidden_size,
))?;
// Update result: keep original where mask is 0, use audio where mask is 1
let inverse_mask = (1.0 - &position_mask)?;
result = (result.broadcast_mul(&inverse_mask)?
+ audio_embed_broadcast.broadcast_mul(&position_mask)?)?;
}
Ok(result)
}
/// Find positions of audio tokens in input sequences
pub fn find_audio_token_positions(
input_ids: &Tensor,
audio_token_id: usize,
) -> Result<Vec<(usize, usize)>> {
// Handle both i64 and u32 token types by converting to i64 first if needed
let input_ids = if input_ids.dtype() == candle::DType::U32 {
input_ids.to_dtype(candle::DType::I64)?
} else {
input_ids.clone()
};
let input_ids = input_ids.to_vec2::<i64>()?;
let mut positions = Vec::new();
for (batch_idx, sequence) in input_ids.iter().enumerate() {
for (seq_idx, &token_id) in sequence.iter().enumerate() {
if token_id as usize == audio_token_id {
positions.push((batch_idx, seq_idx));
}
}
}
Ok(positions)
}
#[derive(Debug, Clone)]
struct VoxtralAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
num_heads: usize,
head_dim: usize,
scaling: f64,
attention_dropout: Dropout,
}
impl VoxtralAttention {
fn new(cfg: &VoxtralEncoderConfig, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let head_dim = embed_dim / num_heads;
if head_dim * num_heads != embed_dim {
candle::bail!(
"embed_dim must be divisible by num_heads ({} % {} != 0)",
embed_dim,
num_heads
);
}
let scaling = (head_dim as f64).powf(-0.5);
let q_proj = linear(embed_dim, embed_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("k_proj"))?;
let v_proj = linear(embed_dim, embed_dim, vb.pp("v_proj"))?;
let out_proj = linear(embed_dim, embed_dim, vb.pp("out_proj"))?;
let attention_dropout = Dropout::new(cfg.attention_dropout as f32);
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
num_heads,
head_dim,
scaling,
attention_dropout,
})
}
fn reshape_for_scores(&self, x: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> {
x.reshape((bsz, seq_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
}
impl Module for VoxtralAttention {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let (bsz, seq_len, _) = x.dims3()?;
// Project queries, keys, and values - apply scaling to queries to match PyTorch SDPA
let q = (self.q_proj.forward(x)? * self.scaling)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
// Reshape for multi-head attention: (batch, seq_len, num_heads, head_dim) -> (batch, num_heads, seq_len, head_dim)
let q = self.reshape_for_scores(&q, seq_len, bsz)?;
let k = self.reshape_for_scores(&k, seq_len, bsz)?;
let v = self.reshape_for_scores(&v, seq_len, bsz)?;
// Manual SDPA-like implementation to match Python's numerical behavior exactly
// Use F16 precision throughout to match PyTorch's F16 model
let scores = q.matmul(&k.transpose(D::Minus2, D::Minus1)?)?;
// Apply softmax in same precision as input (F16) to match Python
let attn_weights = candle_nn::ops::softmax_last_dim(&scores)?;
// Apply attention dropout (disabled during inference)
let attn_weights = self.attention_dropout.forward(&attn_weights, false)?;
// Apply attention to values
let attn_output = attn_weights.matmul(&v)?;
// Reshape back to (batch, seq_len, embed_dim)
let attn_output = attn_output.transpose(1, 2)?.contiguous()?.reshape((
bsz,
seq_len,
self.num_heads * self.head_dim,
))?;
self.out_proj.forward(&attn_output)
}
}
#[derive(Debug, Clone)]
struct VoxtralEncoderLayer {
self_attn: VoxtralAttention,
self_attn_layer_norm: LayerNorm,
fc1: Linear,
fc2: Linear,
final_layer_norm: LayerNorm,
activation: candle_nn::Activation,
dropout: Dropout,
activation_dropout: Dropout,
}
impl VoxtralEncoderLayer {
fn new(cfg: &VoxtralEncoderConfig, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.hidden_size;
let self_attn = VoxtralAttention::new(cfg, vb.pp("self_attn"))?;
let self_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("self_attn_layer_norm"))?;
let fc1 = linear(embed_dim, cfg.intermediate_size, vb.pp("fc1"))?;
let fc2 = linear(cfg.intermediate_size, embed_dim, vb.pp("fc2"))?;
let final_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("final_layer_norm"))?;
let activation = match cfg.activation_function.as_str() {
"gelu" => candle_nn::Activation::Gelu,
"relu" => candle_nn::Activation::Relu,
_ => candle::bail!(
"Unsupported activation function: {}",
cfg.activation_function
),
};
let dropout = Dropout::new(cfg.dropout as f32);
let activation_dropout = Dropout::new(cfg.activation_dropout as f32);
Ok(Self {
self_attn,
self_attn_layer_norm,
fc1,
fc2,
final_layer_norm,
activation,
dropout,
activation_dropout,
})
}
pub fn get_fc1_out_dim(&self) -> usize {
// Return the intermediate size from the config
// Since Linear doesn't expose out_dim
self.fc1.weight().dims()[0]
}
fn forward(&self, x: &Tensor, training: bool) -> Result<Tensor> {
// Self-attention with residual connection
let residual = x;
let x = self.self_attn_layer_norm.forward(x)?;
let x = self.self_attn.forward(&x)?;
let x = self.dropout.forward(&x, training)?;
let x = (x + residual)?;
// Feed-forward network with residual connection
let residual = &x;
let x = self.final_layer_norm.forward(&x)?;
let x = self.fc1.forward(&x)?;
let x = x.apply(&self.activation)?;
let x = self.activation_dropout.forward(&x, training)?;
let x = self.fc2.forward(&x)?;
let x = self.dropout.forward(&x, training)?;
let x = (x + residual)?;
// Safe clamping for numerical stability
safe_clamp(&x)
}
}
#[derive(Debug, Clone)]
pub struct VoxtralEncoder {
conv1: Conv1d,
conv2: Conv1d,
embed_positions: Tensor,
layers: Vec<VoxtralEncoderLayer>,
layer_norm: LayerNorm,
dropout: Dropout,
layerdrop: f64,
}
impl VoxtralEncoder {
pub fn new(cfg: &VoxtralEncoderConfig, vb: VarBuilder) -> Result<Self> {
// Ensure Whisper compatibility
let cfg = cfg.clone().with_whisper_compatibility();
let embed_dim = cfg.hidden_size;
// Convolutional layers for processing mel features
let conv1 = candle_nn::conv1d(
cfg.num_mel_bins,
embed_dim,
3,
candle_nn::Conv1dConfig {
padding: 1,
..Default::default()
},
vb.pp("conv1"),
)?;
let conv2 = candle_nn::conv1d(
embed_dim,
embed_dim,
3,
candle_nn::Conv1dConfig {
stride: 2,
padding: 1,
..Default::default()
},
vb.pp("conv2"),
)?;
// Position embeddings
let embed_positions = vb.get(
(cfg.max_source_positions, embed_dim),
"embed_positions.weight",
)?;
// Transformer layers
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for i in 0..cfg.num_hidden_layers {
layers.push(VoxtralEncoderLayer::new(
&cfg,
vb.pp(format!("layers.{i}")),
)?);
}
let layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("layer_norm"))?;
let dropout = Dropout::new(cfg.dropout as f32);
Ok(Self {
conv1,
conv2,
embed_positions,
layers,
layer_norm,
dropout,
layerdrop: cfg.layerdrop,
})
}
pub fn forward(&self, input_features: &Tensor) -> Result<Tensor> {
self.forward_with_training(input_features, false)
}
pub fn forward_with_training(&self, input_features: &Tensor, training: bool) -> Result<Tensor> {
// Keep conv layers in F16 to avoid shape issues
let expected_dtype = self.conv1.weight().dtype();
let input_features = if input_features.dtype() != expected_dtype {
input_features.to_dtype(expected_dtype)?
} else {
input_features.clone()
};
// Apply convolutional layers with GELU activation
let x = if false {
// Keep conv layers in F16
// Convert conv1 weights to F32 for computation
let conv1_weight_f32 = self.conv1.weight().to_dtype(DType::F32)?;
let conv1_bias_f32 = if let Some(bias) = self.conv1.bias() {
Some(bias.to_dtype(DType::F32)?)
} else {
None
};
// Manual conv1d operation with F32 precision - conv1 has stride=1, padding=1
let mut conv_result = input_features.conv1d(&conv1_weight_f32, 1, 1, 1, 1)?;
if let Some(bias) = conv1_bias_f32 {
conv_result = conv_result.broadcast_add(&bias.unsqueeze(0)?.unsqueeze(2)?)?;
}
conv_result
} else {
self.conv1.forward(&input_features)?
};
// Apply GELU activation after conv1 (matches Python: conv1 -> GELU)
let x = x.gelu()?;
// Apply conv2 (matches Python: conv2)
let x = if false {
// Keep conv layers in F16
// Convert conv2 weights to F32 for computation
let conv2_weight_f32 = self.conv2.weight().to_dtype(DType::F32)?;
let conv2_bias_f32 = if let Some(bias) = self.conv2.bias() {
Some(bias.to_dtype(DType::F32)?)
} else {
None
};
// Manual conv1d operation with F32 precision - conv2 has stride=2, padding=1
let mut conv_result = x.conv1d(&conv2_weight_f32, 2, 1, 1, 1)?;
if let Some(bias) = conv2_bias_f32 {
conv_result = conv_result.broadcast_add(&bias.unsqueeze(0)?.unsqueeze(2)?)?;
}
conv_result
} else {
self.conv2.forward(&x)?
};
// Apply GELU activation after conv2 (FIX: matches Python: conv2 -> GELU)
let x = x.gelu()?;
// Reshape: (batch, embed_dim, seq_len) -> (batch, seq_len, embed_dim)
let x = x.transpose(1, 2)?;
// Add position embeddings - handle F32 position embeddings + F16 hidden states like PyTorch
let seq_len = x.dim(1)?;
let positions = self.embed_positions.i(..seq_len)?;
// PyTorch automatically promotes F16 + F32 -> F32, then converts back to original dtype
// We need to match this behavior exactly
let x = if false {
// Keep position embeddings in mixed precision
// Force F32 computation for position embeddings
let x_f32 = x.to_dtype(candle::DType::F32)?;
let positions_f32 = positions.to_dtype(candle::DType::F32)?;
x_f32.broadcast_add(&positions_f32)? // Keep result in F32
} else if x.dtype() != positions.dtype() {
// Convert hidden states to F32 for addition (positions are already F32)
let x_f32 = x.to_dtype(candle::DType::F32)?;
let result_f32 = x_f32.broadcast_add(&positions)?;
// Convert back to original hidden states dtype (F16)
result_f32.to_dtype(x.dtype())?
} else {
x.broadcast_add(&positions)?
};
// Apply dropout
let mut x = self.dropout.forward(&x, training)?;
for (idx, layer) in self.layers.iter().enumerate() {
// Keep all computation in F16
x = self.forward_layer_with_dropout(&x, layer, idx, training)?;
}
// Apply final layer normalization (critical for proper output values!)
let x = self.layer_norm.forward(&x)?;
Ok(x)
}
/// Forward a single layer with stochastic depth (layer dropout)
fn forward_layer_with_dropout(
&self,
x: &Tensor,
layer: &VoxtralEncoderLayer,
_layer_idx: usize,
training: bool,
) -> Result<Tensor> {
if training && self.layerdrop > 0.0 {
// Apply stochastic depth with proper randomization
let mut rng = rand::rng();
let keep_prob = 1.0 - self.layerdrop;
let keep: bool = rng.random::<f64>() < keep_prob;
if !keep {
// Skip layer entirely (identity mapping)
return Ok(x.clone());
}
}
layer.forward(x, training)
}
/// Get the output dimension of the first FC layer (needed for projector)
pub fn get_intermediate_size(&self) -> usize {
if !self.layers.is_empty() {
self.layers[0].get_fc1_out_dim()
} else {
// Fallback to config value
5120 // Default intermediate size
}
}
/// Process long audio sequences in chunks to save memory
pub fn process_long_audio(
&self,
input_features: &Tensor,
chunk_size: usize,
overlap: usize,
) -> Result<Tensor> {
let (_batch_size, _num_mel, seq_len) = input_features.dims3()?;
if seq_len <= chunk_size {
return self.forward(input_features);
}
let mut outputs = Vec::new();
let step = chunk_size - overlap;
for start in (0..seq_len).step_by(step) {
let end = (start + chunk_size).min(seq_len);
let chunk = input_features.i((.., .., start..end))?;
// Process chunk
let output = self.forward(&chunk)?;
// Handle overlap by averaging
if !outputs.is_empty() && overlap > 0 {
let overlap_frames = overlap / 2; // Account for conv2 stride
let last_output: &mut Tensor = outputs.last_mut().unwrap();
let last_len = last_output.dim(1)?;
// Average overlapping regions
let overlap_start = last_len.saturating_sub(overlap_frames);
let overlap_new = output.i((.., ..overlap_frames, ..))?;
let overlap_old = last_output.i((.., overlap_start.., ..))?;
let averaged = ((overlap_old + overlap_new)? * 0.5)?;
// Update last output
*last_output =
Tensor::cat(&[&last_output.i((.., ..overlap_start, ..))?, &averaged], 1)?;
// Add non-overlapping part of current chunk
outputs.push(output.i((.., overlap_frames.., ..))?);
} else {
outputs.push(output);
}
}
// Concatenate all outputs
let outputs_ref: Vec<&Tensor> = outputs.iter().collect();
Tensor::cat(&outputs_ref, 1)
}
}
#[derive(Debug, Clone)]
pub struct VoxtralMultiModalProjector {
linear_1: Linear,
linear_2: Linear,
activation: candle_nn::Activation,
}
impl VoxtralMultiModalProjector {
pub fn new(cfg: &VoxtralConfig, vb: VarBuilder) -> Result<Self> {
let linear_1 = linear_no_bias(
cfg.audio_config.intermediate_size,
cfg.text_config.hidden_size,
vb.pp("linear_1"),
)?;
let linear_2 = linear_no_bias(
cfg.text_config.hidden_size,
cfg.text_config.hidden_size,
vb.pp("linear_2"),
)?;
let activation = match cfg.projector_hidden_act.as_str() {
"gelu" => candle_nn::Activation::Gelu,
"relu" => candle_nn::Activation::Relu,
_ => candle::bail!(
"Unsupported projector activation: {}",
cfg.projector_hidden_act
),
};
Ok(Self {
linear_1,
linear_2,
activation,
})
}
pub fn forward(&self, audio_features: &Tensor) -> Result<Tensor> {
let x = self.linear_1.forward(audio_features)?;
let x = x.apply(&self.activation)?;
self.linear_2.forward(&x)
}
}
#[derive(Debug, Clone)]
pub struct VoxtralForConditionalGeneration {
audio_tower: VoxtralEncoder,
language_model: VoxtralLlama,
multi_modal_projector: VoxtralMultiModalProjector,
audio_token_id: usize,
audio_config: VoxtralEncoderConfig,
text_config: VoxtralLlamaConfig,
}
impl VoxtralForConditionalGeneration {
pub fn new(cfg: &VoxtralConfig, vb: VarBuilder) -> Result<Self> {
let audio_tower = VoxtralEncoder::new(&cfg.audio_config, vb.pp("audio_tower"))?;
let language_model = VoxtralLlama::load(vb.pp("language_model"), &cfg.text_config)?;
let multi_modal_projector =
VoxtralMultiModalProjector::new(cfg, vb.pp("multi_modal_projector"))?;
Ok(Self {
audio_tower,
language_model,
multi_modal_projector,
audio_token_id: cfg.audio_token_id,
audio_config: cfg.audio_config.clone(),
text_config: cfg.text_config.clone(),
})
}
/// Get the audio token ID used for this model
pub fn audio_token_id(&self) -> usize {
self.audio_token_id
}
/// Get the text model configuration
pub fn text_config(&self) -> &VoxtralLlamaConfig {
&self.text_config
}
/// Get the audio encoder configuration
pub fn audio_config(&self) -> &VoxtralEncoderConfig {
&self.audio_config
}
/// Process audio features through encoder and projector
pub fn get_audio_embeds(&self, input_features: &Tensor) -> Result<Tensor> {
let audio_outputs = self.audio_tower.forward(input_features)?;
// Following HF implementation: reshape to (-1, config.intermediate_size) before projection
// Python: audio_hidden_states.reshape(-1, self.config.audio_config.intermediate_size)
// This transforms [1, 1500, 1280] -> [375, 5120] using intermediate_size from config
let (batch_size, seq_len, hidden_size) = audio_outputs.dims3()?;
// The key insight: Python reshapes from [1, 1500, 1280] to [375, 5120]
// This means 1500 * 1280 = 375 * 5120 (1920000 elements)
// So we need: new_batch_size = (batch_size * seq_len * hidden_size) / intermediate_size
let total_elements = batch_size * seq_len * hidden_size;
let new_batch_size = total_elements / self.audio_config.intermediate_size;
// Verify the division is exact
if total_elements % self.audio_config.intermediate_size != 0 {
return Err(candle::Error::DimOutOfRange {
shape: candle::Shape::from_dims(&[batch_size, seq_len, hidden_size]),
dim: 0,
op: "reshape",
});
}
let audio_hidden =
audio_outputs.reshape((new_batch_size, self.audio_config.intermediate_size))?;
// Project to text space - this gives us embeddings for each audio position
let projected = self.multi_modal_projector.forward(&audio_hidden)?;
// Return shape: (batch_size * seq_len, text_hidden_size)
// This matches HF implementation - no pooling, keep all audio token embeddings
Ok(projected)
}
/// Process long audio sequences efficiently
pub fn get_audio_embeds_chunked(
&self,
input_features: &Tensor,
chunk_size: usize,
overlap: usize,
) -> Result<Tensor> {
let audio_outputs =
self.audio_tower
.process_long_audio(input_features, chunk_size, overlap)?;
// Reshape and project (now outputs hidden_size, needs reshape to intermediate_size)
let (batch_size, seq_len, hidden_size) = audio_outputs.dims3()?;
// Apply same reshape logic as get_audio_embeds
let total_elements = batch_size * seq_len * hidden_size;
let new_batch_size = total_elements / self.audio_config.intermediate_size;
let audio_hidden =
audio_outputs.reshape((new_batch_size, self.audio_config.intermediate_size))?;
let projected = self.multi_modal_projector.forward(&audio_hidden)?;
// Reshape back to (batch_size, seq_len, text_hidden_size) for pooling
let text_hidden_size = self.text_config.hidden_size;
let projected = projected.reshape((batch_size, seq_len, text_hidden_size))?;
// Apply mean pooling to reduce to single audio embedding per batch
let pooled = projected.mean(1)?; // Mean across sequence dimension
// Return shape: (batch_size, text_hidden_size)
Ok(pooled)
}
/// Forward pass with audio features and text input
pub fn forward(
&self,
input_ids: &Tensor,
input_features: Option<&Tensor>,
cache: &mut VoxtralCache,
index_pos: usize,
) -> Result<Tensor> {
// Get text embeddings
let mut inputs_embeds = self.language_model.embed(input_ids)?;
// If audio features are provided and not yet processed
if let Some(features) = input_features {
if !cache.audio_processed {
let audio_embeds = self.get_audio_embeds(features)?;
let audio_positions = find_audio_token_positions(input_ids, self.audio_token_id)?;
// Cache for future use
cache.cached_audio_embeds = Some(audio_embeds.clone());
cache.cached_audio_positions = Some(audio_positions.clone());
cache.audio_processed = true;
inputs_embeds = replace_audio_tokens(
&inputs_embeds,
&audio_embeds,
&audio_positions,
input_ids.device(),
)?;
}
}
// Forward through language model using forward_input_embed
self.language_model
.forward_input_embed(&inputs_embeds, index_pos, &mut cache.cache)
}
/// Generate text given audio input
pub fn generate(
&self,
input_ids: &Tensor,
input_features: Option<&Tensor>,
config: VoxtralGenerationConfig,
) -> Result<Vec<u32>> {
// Validate inputs
if config.max_new_tokens == 0 {
return input_ids.i(0)?.to_vec1::<u32>(); // Get first batch
}
if config.temperature < 0.0 {
candle::bail!(
"Temperature must be non-negative, got {}",
config.temperature
);
}
if let Some(p) = config.top_p {
if !(0.0..=1.0).contains(&p) {
candle::bail!("top_p must be between 0 and 1, got {}", p);
}
}
let mut final_cache = if let Some(cache) = config.cache {
cache
} else {
// Get the dtype from the language model by creating a small embedding
let dummy_token = Tensor::new(&[1u32], &config.device)?;
let dummy_embed = self.language_model.embed(&dummy_token)?;
let model_dtype = dummy_embed.dtype();
VoxtralCache::new(true, model_dtype, &self.text_config, &config.device)?
};
let mut tokens = input_ids.i(0)?.to_vec1::<u32>()?; // Get first batch
let initial_len = tokens.len();
for idx in 0..config.max_new_tokens {
let (input, index_pos) = if idx == 0 {
(input_ids.clone(), 0)
} else {
// For subsequent generation steps, use only the last token
let last_token = tokens[tokens.len() - 1];
let calculated_pos = initial_len + idx - 1;
(
Tensor::new(&[last_token], &config.device)?.unsqueeze(0)?,
calculated_pos,
)
};
let logits = if idx == 0 {
// First pass - include audio features
match self.forward(&input, input_features, &mut final_cache, index_pos) {
Ok(logits) => logits,
Err(e) => {
return Err(candle::Error::Msg(format!(
"Failed to generate tokens: {e}"
)));
}
}
} else {
// Subsequent passes - text only
match self.forward(&input, None, &mut final_cache, index_pos) {
Ok(logits) => logits,
Err(e) => {
return Err(candle::Error::Msg(format!(
"Failed to generate tokens: {e}"
)));
}
}
};
// Handle both 2D [batch, vocab] and 3D [batch, seq_len, vocab] logits
let logits = if logits.dims().len() == 3 {
// 3D case: [batch, seq_len, vocab] -> get last token
logits.i((.., logits.dim(1)? - 1, ..))?
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/voxtral/mod.rs | candle-transformers/src/models/voxtral/mod.rs | pub mod audio;
pub mod model;
pub mod voxtral_llama;
pub use audio::extract_features;
pub use model::{
VoxtralCache, VoxtralConfig, VoxtralEncoder, VoxtralEncoderConfig,
VoxtralForConditionalGeneration, VoxtralGenerationConfig, VoxtralMultiModalProjector,
};
pub use voxtral_llama::{VoxtralLlama, VoxtralLlamaCache, VoxtralLlamaConfig};
pub const N_FFT: usize = 400;
pub const HOP_LENGTH: usize = 160;
pub const N_MELS: usize = 128;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/voxtral/audio.rs | candle-transformers/src/models/voxtral/audio.rs | use candle::{DType, Device, Error, Tensor};
use crate::models::whisper::audio::{log_mel_spectrogram_, Float};
pub fn pcm_to_mel<T: Float>(samples: &[T], filters: &[T]) -> Vec<T> {
log_mel_spectrogram_(
samples,
filters,
super::N_FFT,
super::HOP_LENGTH,
super::N_MELS,
false,
)
}
/// Process audio using exact WhisperFeatureExtractor algorithm then apply VoxtralProcessor chunking
pub fn extract_features(audio: &[f32], filters: &[f32], device: &Device) -> Result<Tensor, Error> {
const N_MELS: usize = super::N_MELS;
// Use the exact WhisperFeatureExtractor algorithm
// Use the whisper implementation from the parent module
let mel_vec = pcm_to_mel(audio, filters);
// The whisper implementation returns Vec<f32> in shape (n_mel * n_len)
// We need to reshape it to match the expected tensor format
let n_mel = super::N_MELS;
let n_len = mel_vec.len() / n_mel;
// Create tensor with shape (n_mel, n_len) then add batch dimension
let mel_tensor = Tensor::from_vec(mel_vec, (n_mel, n_len), device)?;
let mel_tensor = mel_tensor.unsqueeze(0)?; // Add batch dimension -> (1, n_mel, n_len)
// Convert tensor back to Vec<f32> for compatibility with existing code
let mel = mel_tensor.flatten_all()?.to_vec1::<f32>()?;
let mel_len = mel.len();
// Apply VoxtralProcessor chunking exactly like Python
let total_frames = mel_len / N_MELS;
let max_source_positions = 3000; // From VoxtralProcessor defaults
// Python approach: reshape (feature_size, total_frames) -> (feature_size, -1, max_source_positions)
// First, create mel tensor with shape (N_MELS, total_frames)
let mel_tensor = Tensor::from_vec(mel, (N_MELS, total_frames), device)
.map_err(|e| Error::Msg(format!("Failed to create mel tensor: {e}")))?;
// Calculate number of chunks (equivalent to Python's -1 dimension in reshape)
let num_chunks = total_frames.div_ceil(max_source_positions);
// Pad the mel tensor to be divisible by max_source_positions
let padded_frames = num_chunks * max_source_positions;
let padding_needed = padded_frames - total_frames;
let mel_padded = if padding_needed > 0 {
let padding = Tensor::zeros((N_MELS, padding_needed), DType::F32, device)?;
Tensor::cat(&[&mel_tensor, &padding], 1)?
} else {
mel_tensor
};
// Reshape to (N_MELS, num_chunks, max_source_positions)
let reshaped = mel_padded.reshape((N_MELS, num_chunks, max_source_positions))?;
// Transpose to (num_chunks, N_MELS, max_source_positions) - matching Python's transpose(0,1)
let audio_features = reshaped.transpose(0, 1)?;
Ok(audio_features)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/llava/config.rs | candle-transformers/src/models/llava/config.rs | use std::collections::HashMap;
use crate::models::{
clip::{text_model::Activation, vision_model::ClipVisionConfig},
llama::{Config, LlamaEosToks},
};
use serde::{Deserialize, Serialize};
// original config from liuhaotian/llava
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LLaVAConfig {
pub architectures: Vec<String>,
pub bos_token_id: usize,
pub eos_token_id: usize,
pub hidden_size: usize,
#[serde(default = "default_image_aspect_ratio")]
pub image_aspect_ratio: String,
pub image_crop_resolution: usize,
pub image_grid_pinpoints: Vec<(u32, u32)>,
pub image_split_resolution: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub mm_hidden_size: usize,
#[serde(default = "default_mm_patch_merge_type")]
pub mm_patch_merge_type: String,
pub mm_projector_type: String,
pub mm_use_im_start_end: bool,
pub mm_vision_select_feature: String,
pub mm_vision_select_layer: isize,
pub mm_vision_tower: Option<String>,
pub model_type: String,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub pad_token_id: usize,
pub rms_norm_eps: f32,
pub rope_theta: f32,
pub tokenizer_model_max_length: Option<usize>,
pub torch_dtype: String,
pub use_cache: bool,
pub vocab_size: usize,
#[serde(default = "default_image_token_index")]
pub image_token_index: isize,
#[serde(default = "default_hf")]
pub hf: bool,
pub tie_word_embeddings: Option<bool>,
}
fn default_hf() -> bool {
false
}
fn default_image_token_index() -> isize {
-200
}
fn default_mm_patch_merge_type() -> String {
"flat".to_string()
}
fn default_image_aspect_ratio() -> String {
"square".to_string()
}
impl LLaVAConfig {
pub fn to_llama_config(&self) -> Config {
Config {
hidden_size: self.hidden_size,
intermediate_size: self.intermediate_size,
vocab_size: self.vocab_size,
num_hidden_layers: self.num_hidden_layers,
num_attention_heads: self.num_attention_heads,
num_key_value_heads: self.num_key_value_heads,
rms_norm_eps: self.rms_norm_eps as f64,
rope_theta: self.rope_theta,
bos_token_id: Some(self.bos_token_id as u32),
eos_token_id: Some(LlamaEosToks::Single(self.eos_token_id as u32)),
use_flash_attn: false,
rope_scaling: None, // Assume we don't have LLaVA for Llama 3.1
max_position_embeddings: self.max_position_embeddings,
tie_word_embeddings: self.tie_word_embeddings.unwrap_or(false),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFLLaVATextConfig {
pub architectures: Vec<String>,
#[serde(default = "default_hidden_size")]
pub hidden_size: usize,
#[serde(default = "default_intermediate_size")]
pub intermediate_size: usize,
#[serde(default = "default_max_length")]
pub max_length: usize,
pub max_position_embeddings: usize,
pub model_type: String,
#[serde(default = "default_num_attention_heads")]
pub num_attention_heads: usize,
#[serde(default = "default_num_hidden_layers")]
pub num_hidden_layers: usize,
#[serde(default = "default_num_key_value_heads")]
pub num_key_value_heads: usize,
pub pad_token_id: usize,
pub rms_norm_eps: f32,
#[serde(default = "default_rope_theta")]
pub rope_theta: f32,
pub torch_dtype: String,
#[serde(default = "default_use_cache")]
pub use_cache: bool,
pub vocab_size: usize,
}
fn default_num_hidden_layers() -> usize {
32
}
fn default_use_cache() -> bool {
true
}
fn default_hidden_size() -> usize {
4096
}
fn default_intermediate_size() -> usize {
11008
}
fn default_max_length() -> usize {
4096
}
fn default_num_attention_heads() -> usize {
32
}
fn default_num_key_value_heads() -> usize {
32
}
fn default_rope_theta() -> f32 {
10000.0
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFLLaVAVisionConfig {
pub hidden_size: usize,
pub image_size: usize,
pub intermediate_size: usize,
pub model_type: String,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub patch_size: usize,
pub projection_dim: usize,
pub vocab_size: usize,
}
// config from llava-v1.6-vicuna-7b-hf
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFLLaVAConfig {
pub architectures: Vec<String>,
pub ignore_index: isize,
pub image_grid_pinpoints: Vec<(u32, u32)>,
pub image_token_index: isize,
pub model_type: String,
pub projector_hidden_act: String,
pub text_config: HFLLaVATextConfig,
pub torch_dtype: String,
pub use_image_newline_parameter: bool,
pub vision_config: HFLLaVAVisionConfig,
pub vision_feature_layer: isize,
pub vision_feature_select_strategy: String,
pub vocab_size: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFGenerationConfig {
pub bos_token_id: usize,
pub eos_token_id: usize,
#[serde(default = "default_max_length")]
pub max_length: usize,
pub pad_token_id: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFPreProcessorConfig {
pub aspect_ratio_setting: String,
pub crop_size: HashMap<String, usize>,
pub do_center_crop: bool,
pub do_convert_rgb: bool,
pub do_normalize: bool,
pub do_rescale: bool,
pub do_resize: bool,
pub image_mean: Vec<f32>,
pub image_std: Vec<f32>,
pub resample: u32,
pub rescale_factor: f32,
pub size: HashMap<String, f32>,
}
impl HFLLaVAConfig {
pub fn to_clip_vision_config(&self) -> ClipVisionConfig {
ClipVisionConfig {
embed_dim: self.vision_config.hidden_size,
activation: Activation::QuickGelu,
intermediate_size: self.vision_config.intermediate_size,
num_hidden_layers: self.vision_config.num_hidden_layers,
num_attention_heads: self.vision_config.num_attention_heads,
projection_dim: self.vision_config.projection_dim,
num_channels: 3,
image_size: self.vision_config.image_size,
patch_size: self.vision_config.patch_size,
}
}
fn map_projector_type(s: &str) -> String {
if s == "gelu" {
"mlp2x_gelu".to_string()
} else {
s.to_string()
}
}
fn map_select_feature(s: &str) -> String {
if s == "default" {
"patch".to_string()
} else {
"cls_patch".to_string()
}
}
pub fn to_llava_config(
&self,
generation_config: &HFGenerationConfig,
preprocessor_config: &HFPreProcessorConfig,
) -> LLaVAConfig {
LLaVAConfig {
hf: true,
architectures: self.architectures.clone(),
bos_token_id: generation_config.bos_token_id,
eos_token_id: generation_config.eos_token_id,
hidden_size: self.text_config.hidden_size,
image_aspect_ratio: preprocessor_config.aspect_ratio_setting.clone(),
image_crop_resolution: 224,
image_grid_pinpoints: self.image_grid_pinpoints.clone(),
image_split_resolution: 224,
intermediate_size: self.text_config.intermediate_size,
max_position_embeddings: self.text_config.max_position_embeddings,
mm_hidden_size: 1024,
mm_patch_merge_type: "spatial_unpad".to_string(),
mm_projector_type: Self::map_projector_type(&self.projector_hidden_act),
mm_use_im_start_end: false,
mm_vision_select_feature: Self::map_select_feature(
&self.vision_feature_select_strategy,
),
mm_vision_select_layer: self.vision_feature_layer,
mm_vision_tower: None,
model_type: self.model_type.clone(),
num_attention_heads: self.text_config.num_attention_heads,
num_hidden_layers: self.text_config.num_hidden_layers,
num_key_value_heads: self.text_config.num_key_value_heads,
pad_token_id: self.text_config.pad_token_id,
rms_norm_eps: self.text_config.rms_norm_eps,
rope_theta: self.text_config.rope_theta,
tokenizer_model_max_length: Some(4096),
torch_dtype: self.torch_dtype.clone(),
use_cache: self.text_config.use_cache,
vocab_size: self.vocab_size,
image_token_index: self.image_token_index,
tie_word_embeddings: None,
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/llava/utils.rs | candle-transformers/src/models/llava/utils.rs | pub fn get_anyres_image_grid_shape(
image_size: (u32, u32),
grid_pinpoints: &[(u32, u32)],
patch_size: u32,
) -> (u32, u32) {
let (width, height) = select_best_resolution(image_size, grid_pinpoints);
(width / patch_size, height / patch_size)
}
pub fn select_best_resolution(
original_size: (u32, u32),
possible_resolutions: &[(u32, u32)],
) -> (u32, u32) {
let (original_width, original_height) = original_size;
let mut best_fit = (0, 0);
let original_width_f = original_width as f32;
let original_height_f = original_height as f32;
let mut max_effective_resolution = 0_u32;
let mut min_wasted_resolution = u32::MAX;
for (width, height) in possible_resolutions {
let width_f = *width as f32;
let height_f = *height as f32;
let scale = (width_f / original_width_f).min(height_f / original_height_f);
let (downscaled_width, downscaled_height) = (
(original_width_f * scale) as u32,
(original_height_f * scale) as u32,
);
let effective_resolution =
std::cmp::min((*width) * (*height), downscaled_width * downscaled_height);
let wasted_resolution = (*width) * (*height) - effective_resolution;
if effective_resolution > max_effective_resolution
|| (effective_resolution == max_effective_resolution
&& wasted_resolution < min_wasted_resolution)
{
best_fit = (*width, *height);
max_effective_resolution = effective_resolution;
min_wasted_resolution = wasted_resolution;
}
}
best_fit
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/llava/mod.rs | candle-transformers/src/models/llava/mod.rs | //! The LLaVA (Large Language and Vision Assistant) model.
//!
//! This provides the main model implementation combining a vision tower (CLIP) with
//! language model (Llama) for multimodal capabilities. The architecture implements the training-free projection technique.
//!
//! - 💻[GH Link](https://github.com/haotian-liu/LLaVA/tree/main)
//! - 📝 [Paper](https://arxiv.org/abs/2304.08485)/ Visual Instruction Tuning
//!
pub mod config;
pub mod utils;
use crate::models::clip::vision_model::{ClipVisionConfig, ClipVisionTransformer};
use crate::models::llama::{Cache, Llama};
use crate::models::with_tracing::linear;
use candle::{bail, Context, Device, IndexOp, Result, Tensor};
use candle_nn::{seq, Activation, Module, Sequential, VarBuilder};
use fancy_regex::Regex;
use utils::get_anyres_image_grid_shape;
use config::LLaVAConfig;
fn mlp_gelu_match(mm_projector_type: &str) -> Option<usize> {
let mlp_gelu_regex = Regex::new(r"^mlp(\d+)x_gelu$").unwrap();
if let Ok(Some(captures)) = mlp_gelu_regex.captures(mm_projector_type) {
if let Some(match_str) = captures.get(1) {
let match_str = match_str.as_str();
match_str.parse::<usize>().ok()
} else {
None
}
} else {
None
}
}
fn unpad_image(tensor: &Tensor, original_size: &(u32, u32)) -> Result<Tensor> {
assert_eq!(tensor.dims().len(), 3);
let (original_width, original_height) = *original_size;
let tensor_dims = tensor.dims();
let current_height = tensor_dims[1];
let current_width = tensor_dims[2];
let original_aspect_ratio = (original_width as f32) / (original_height as f32);
let current_aspect_ratio = (current_width as f32) / (current_height as f32);
if original_aspect_ratio > current_aspect_ratio {
let scale_factor = (current_width as f32) / (original_width as f32);
let new_height = (original_height as f32 * scale_factor).floor() as usize;
let padding = (current_height - new_height) / 2;
tensor.i((.., padding..current_width - padding, ..))
} else {
let scale_factor = (current_height as f32) / (original_height as f32);
let new_width = (original_width as f32 * scale_factor).floor() as usize;
let padding = (current_width - new_width) / 2;
tensor.i((.., .., padding..current_width - padding))
}
}
pub struct IdentityMap {}
impl Module for IdentityMap {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
Ok(x.clone())
}
}
pub struct MMProjector {
pub modules: Sequential,
}
impl MMProjector {
pub fn load(vb: &VarBuilder, config: &LLaVAConfig) -> Result<Self> {
if config.mm_projector_type == "linear" {
let vb_prefix = if config.hf {
"multi_modal_projector.linear_1"
} else {
"model.mm_projector.0"
};
let linear = linear(config.mm_hidden_size, config.hidden_size, vb.pp(vb_prefix))?;
let modules = seq().add(linear);
Ok(Self { modules })
} else if let Some(mlp_depth) = mlp_gelu_match(&config.mm_projector_type) {
let modules = if config.hf {
let mut modules = seq().add(linear(
config.mm_hidden_size,
config.hidden_size,
vb.pp("multi_modal_projector.linear_1"),
)?);
for i in 1..mlp_depth {
modules = modules.add(Activation::Gelu).add(linear(
config.hidden_size,
config.hidden_size,
vb.pp(format!("multi_modal_projector.linear_{}", i + 1)),
)?);
}
modules
} else {
let mut modules = seq().add(linear(
config.mm_hidden_size,
config.hidden_size,
vb.pp("model.mm_projector.0"),
)?);
for i in 1..mlp_depth {
modules = modules.add(Activation::Gelu).add(linear(
config.hidden_size,
config.hidden_size,
vb.pp(format!("model.mm_projector.{}", i * 2)),
)?);
}
modules
};
Ok(Self { modules })
} else if config.mm_projector_type == "identity" {
Ok(Self {
modules: seq().add(IdentityMap {}),
})
} else {
bail!(
"Unsupported MM projector type: {}",
config.mm_projector_type
)
}
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.modules.forward(x)
}
}
pub struct ClipVisionTower {
model: ClipVisionTransformer,
select_layer: isize,
select_feature_method: String,
pub config: ClipVisionConfig,
}
impl ClipVisionTower {
pub fn new(
vb: VarBuilder,
select_layer: isize,
select_feature_method: &str,
config: &Option<ClipVisionConfig>,
) -> Result<Self> {
let config = if config.is_none() {
ClipVisionConfig::clip_vit_large_patch14_336()
} else {
config.clone().context("no config")?
};
let select_layer = match select_layer {
-1 | -2 => select_layer,
_ => bail!("Unsupported select layer: {}", select_layer),
};
let model = ClipVisionTransformer::new(vb, &config)?;
Ok(Self {
model,
select_layer,
select_feature_method: select_feature_method.to_string(),
config,
})
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
let result = self.model.output_hidden_states(x)?;
let index = result.len() as isize + self.select_layer;
let result = result[index as usize].clone();
if self.select_feature_method == "cls_patch" {
Ok(result)
} else {
result.i((.., 1..))
}
}
pub fn num_patches_per_side(&self) -> usize {
self.config.image_size / self.config.patch_size
}
}
pub struct LLaVA {
pub clip_vision_tower: ClipVisionTower,
pub image_newline: Tensor,
pub mm_projector: MMProjector,
pub llama: Llama,
config: LLaVAConfig,
device: Device,
}
impl LLaVA {
pub fn load(
vb: VarBuilder,
config: &LLaVAConfig,
clip_vision_config: Option<ClipVisionConfig>,
) -> Result<Self> {
let device = vb.device().clone();
let llama_config = config.to_llama_config();
let mm_projector = MMProjector::load(&vb, config)?;
let (clip_vision_tower, image_newline, llama) = if config.hf {
(
ClipVisionTower::new(
vb.pp("vision_tower.vision_model"),
config.mm_vision_select_layer,
&config.mm_vision_select_feature,
&clip_vision_config,
)?,
vb.get(&[config.hidden_size], "image_newline")?
.to_device(&device)?,
Llama::load(vb.pp("language_model"), &llama_config)?,
)
} else {
(
ClipVisionTower::new(
vb.pp("model.vision_tower.vision_tower.vision_model"),
config.mm_vision_select_layer,
&config.mm_vision_select_feature,
&clip_vision_config,
)?,
vb.get(&[config.hidden_size], "model.image_newline")?
.to_device(&device)?,
Llama::load(vb, &llama_config)?,
)
};
Ok(Self {
clip_vision_tower,
image_newline,
mm_projector,
llama,
config: (*config).clone(),
device,
})
}
pub fn encode_images(&self, x: &Tensor) -> Result<Tensor> {
let image_features = self.clip_vision_tower.forward(x)?;
let image_features = self.mm_projector.forward(&image_features)?;
Ok(image_features)
}
// currently only for single image, 4 dim tensor
pub fn prepare_inputs_labels_for_multimodal(
&self,
input_ids: &Tensor,
images: &[Tensor],
image_sizes: &[(u32, u32)],
) -> Result<Tensor> {
//TODO: process of multiple images/ new line
// 576: 336(input size)/14(patch size)=24 24*24+1(class)=577 577-1=576
let concat_images = Tensor::cat(images, 0)?;
let image_features_together = self.encode_images(&concat_images)?;
let split_sizes = images
.iter()
.map(|x| x.shape().dims()[0])
.collect::<Vec<usize>>();
// can be replaced by split
let mut index_pos = 0;
let mut image_features = Vec::new();
for split_size in split_sizes.iter() {
image_features.push(image_features_together.i(index_pos..index_pos + (*split_size))?);
index_pos += *split_size;
}
let mm_patch_merge_type = &self.config.mm_patch_merge_type;
let image_aspect_ratio = &self.config.image_aspect_ratio;
let image_features = if mm_patch_merge_type == "flat" {
image_features
.iter()
.map(|x| x.flatten(0, 1))
.collect::<Result<Vec<Tensor>>>()?
} else if mm_patch_merge_type.starts_with("spatial") {
let mut new_image_features = Vec::new();
for (image_idx, image_feature) in image_features.iter().enumerate() {
let new_image_feature = if image_feature.dims()[0] > 1 {
let base_image_feature = image_feature.get(0)?;
let patch_image_feature = image_feature.i(1..)?;
let height = self.clip_vision_tower.num_patches_per_side();
let width = height;
assert_eq!(height * width, base_image_feature.dims()[0]);
let image_size = image_sizes[image_idx];
let new_image_feature = if image_aspect_ratio == "anyres" {
let (num_patch_width, num_patch_height) = get_anyres_image_grid_shape(
image_size,
&self.config.image_grid_pinpoints,
self.clip_vision_tower.config.image_size as u32,
);
patch_image_feature.reshape((
num_patch_height as usize,
num_patch_width as usize,
height,
width,
(),
))?
} else {
bail!("not implemented in original python LLaVA yet")
};
let new_image_feature = if mm_patch_merge_type.contains("unpad") {
let new_image_feature = new_image_feature
.permute((4, 0, 2, 1, 3))?
.flatten(1, 2)?
.flatten(2, 3)?;
let new_image_feature = unpad_image(&new_image_feature, &image_size)?;
let new_image_feature_dims = new_image_feature.dims();
let image_new_line = self
.image_newline
.reshape((self.config.hidden_size, 1, 1))?
.broadcast_as((
new_image_feature_dims[0],
new_image_feature_dims[1],
1,
))?;
let new_image_feature =
Tensor::cat(&[new_image_feature, image_new_line], 2)?;
new_image_feature.flatten(1, 2)?.transpose(0, 1)?
} else {
new_image_feature.permute((0, 2, 1, 3, 4))?.flatten(0, 3)?
};
Tensor::cat(&[base_image_feature, new_image_feature], 0)?
} else {
let new_image_feature = image_feature.get(0)?;
if mm_patch_merge_type.contains("unpad") {
Tensor::cat(
&[new_image_feature, self.image_newline.clone().unsqueeze(0)?],
0,
)?
} else {
new_image_feature
}
};
new_image_features.push(new_image_feature);
}
new_image_features
} else {
bail!("Unexpected mm_patch_merge_type: {mm_patch_merge_type}")
};
// can easily be replaced by nonzero if it is implemented in candle
let input_ids_vec = input_ids.squeeze(0)?.to_vec1::<i64>()?;
let mut image_indices = {
let mut image_indices = vec![0_i64];
image_indices.extend(
input_ids_vec
.iter()
.enumerate()
.filter_map(|(i, x)| {
if *x == self.config.image_token_index as i64 {
Some(i as i64)
} else {
None
}
})
.collect::<Vec<i64>>(),
);
image_indices
};
if image_indices.len() == 1 {
//no image, only [0],
return self.llama.embed(input_ids);
}
let input_ids_noim = input_ids_vec
.iter()
.filter_map(|x| {
if *x != self.config.image_token_index as i64 {
Some(*x)
} else {
None
}
})
.collect::<Vec<i64>>();
let input_ids_noim_len = input_ids_noim.len();
image_indices.push((input_ids_noim_len) as i64);
let input_ids_noim = Tensor::from_vec(input_ids_noim, input_ids_noim_len, &self.device)?;
let cur_input_embeds = self.llama.embed(&input_ids_noim)?;
// can be replace by split if it is implemented in candle
let input_embed_no_ims = {
let mut input_embeds = Vec::new();
for i in 0..image_indices.len() - 1 {
let start = (image_indices[i]) as usize;
let end = image_indices[i + 1] as usize;
input_embeds.push(cur_input_embeds.i((start..end, ..))?)
}
input_embeds
};
let mut cur_new_input_embeds = Vec::new();
for (i, image_feature) in image_features.iter().enumerate() {
cur_new_input_embeds.push(input_embed_no_ims[i].clone());
cur_new_input_embeds.push(image_feature.clone());
}
cur_new_input_embeds.push(input_embed_no_ims[image_features.len()].clone());
let new_input_embeds = Tensor::cat(&cur_new_input_embeds, 0)?;
//truncate
let new_input_embeds =
if let Some(tokenizer_model_max_length) = self.config.tokenizer_model_max_length {
let (new_input_embeds_length, _) = new_input_embeds.shape().dims2()?;
if new_input_embeds_length > tokenizer_model_max_length {
new_input_embeds.i((..tokenizer_model_max_length, ..))?
} else {
new_input_embeds
}
} else {
new_input_embeds
};
new_input_embeds.unsqueeze(0)
}
pub fn forward(
&self,
input_embeds: &Tensor,
position_id: usize,
cache: &mut Cache,
) -> Result<Tensor> {
self.llama
.forward_input_embed(input_embeds, position_id, cache)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mmdit/projections.rs | candle-transformers/src/models/mmdit/projections.rs | use candle::{Module, Result, Tensor};
use candle_nn as nn;
pub struct Qkv {
pub q: Tensor,
pub k: Tensor,
pub v: Tensor,
}
pub struct Mlp {
fc1: nn::Linear,
act: nn::Activation,
fc2: nn::Linear,
}
impl Mlp {
pub fn new(
in_features: usize,
hidden_features: usize,
vb: candle_nn::VarBuilder,
) -> Result<Self> {
let fc1 = nn::linear(in_features, hidden_features, vb.pp("fc1"))?;
let act = nn::Activation::GeluPytorchTanh;
let fc2 = nn::linear(hidden_features, in_features, vb.pp("fc2"))?;
Ok(Self { fc1, act, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.fc1.forward(x)?;
let x = self.act.forward(&x)?;
self.fc2.forward(&x)
}
}
pub struct QkvOnlyAttnProjections {
qkv: nn::Linear,
head_dim: usize,
}
impl QkvOnlyAttnProjections {
pub fn new(dim: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = nn::linear(dim, dim * 3, vb.pp("qkv"))?;
Ok(Self { qkv, head_dim })
}
pub fn pre_attention(&self, x: &Tensor) -> Result<Qkv> {
let qkv = self.qkv.forward(x)?;
split_qkv(&qkv, self.head_dim)
}
}
pub struct AttnProjections {
head_dim: usize,
qkv: nn::Linear,
ln_k: Option<candle_nn::RmsNorm>,
ln_q: Option<candle_nn::RmsNorm>,
proj: nn::Linear,
}
impl AttnProjections {
pub fn new(dim: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = nn::linear(dim, dim * 3, vb.pp("qkv"))?;
let proj = nn::linear(dim, dim, vb.pp("proj"))?;
let (ln_k, ln_q) = if vb.contains_tensor("ln_k.weight") {
let ln_k = candle_nn::rms_norm(head_dim, 1e-6, vb.pp("ln_k"))?;
let ln_q = candle_nn::rms_norm(head_dim, 1e-6, vb.pp("ln_q"))?;
(Some(ln_k), Some(ln_q))
} else {
(None, None)
};
Ok(Self {
head_dim,
qkv,
proj,
ln_k,
ln_q,
})
}
pub fn pre_attention(&self, x: &Tensor) -> Result<Qkv> {
let qkv = self.qkv.forward(x)?;
let Qkv { q, k, v } = split_qkv(&qkv, self.head_dim)?;
let q = match self.ln_q.as_ref() {
None => q,
Some(l) => {
let (b, t, h) = q.dims3()?;
l.forward(&q.reshape((b, t, (), self.head_dim))?)?
.reshape((b, t, h))?
}
};
let k = match self.ln_k.as_ref() {
None => k,
Some(l) => {
let (b, t, h) = k.dims3()?;
l.forward(&k.reshape((b, t, (), self.head_dim))?)?
.reshape((b, t, h))?
}
};
Ok(Qkv { q, k, v })
}
pub fn post_attention(&self, x: &Tensor) -> Result<Tensor> {
self.proj.forward(x)
}
}
fn split_qkv(qkv: &Tensor, head_dim: usize) -> Result<Qkv> {
let (batch_size, seq_len, _) = qkv.dims3()?;
let qkv = qkv.reshape((batch_size, seq_len, 3, (), head_dim))?;
let q = qkv.get_on_dim(2, 0)?;
let q = q.reshape((batch_size, seq_len, ()))?;
let k = qkv.get_on_dim(2, 1)?;
let k = k.reshape((batch_size, seq_len, ()))?;
let v = qkv.get_on_dim(2, 2)?;
Ok(Qkv { q, k, v })
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mmdit/embedding.rs | candle-transformers/src/models/mmdit/embedding.rs | use candle::{bail, DType, Module, Result, Tensor};
use candle_nn as nn;
pub struct PatchEmbedder {
proj: nn::Conv2d,
}
impl PatchEmbedder {
pub fn new(
patch_size: usize,
in_channels: usize,
embed_dim: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let proj = nn::conv2d(
in_channels,
embed_dim,
patch_size,
nn::Conv2dConfig {
stride: patch_size,
..Default::default()
},
vb.pp("proj"),
)?;
Ok(Self { proj })
}
}
impl Module for PatchEmbedder {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.proj.forward(x)?;
// flatten spatial dim and transpose to channels last
let (b, c, h, w) = x.dims4()?;
x.reshape((b, c, h * w))?.transpose(1, 2)
}
}
pub struct Unpatchifier {
patch_size: usize,
out_channels: usize,
}
impl Unpatchifier {
pub fn new(patch_size: usize, out_channels: usize) -> Result<Self> {
Ok(Self {
patch_size,
out_channels,
})
}
pub fn unpatchify(&self, x: &Tensor, h: usize, w: usize) -> Result<Tensor> {
let h = (h + 1) / self.patch_size;
let w = (w + 1) / self.patch_size;
let x = x.reshape((
x.dim(0)?,
h,
w,
self.patch_size,
self.patch_size,
self.out_channels,
))?;
let x = x.permute((0, 5, 1, 3, 2, 4))?; // "nhwpqc->nchpwq"
x.reshape((
x.dim(0)?,
self.out_channels,
self.patch_size * h,
self.patch_size * w,
))
}
}
pub struct PositionEmbedder {
pos_embed: Tensor,
patch_size: usize,
pos_embed_max_size: usize,
}
impl PositionEmbedder {
pub fn new(
hidden_size: usize,
patch_size: usize,
pos_embed_max_size: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let pos_embed = vb.get(
(1, pos_embed_max_size * pos_embed_max_size, hidden_size),
"pos_embed",
)?;
Ok(Self {
pos_embed,
patch_size,
pos_embed_max_size,
})
}
pub fn get_cropped_pos_embed(&self, h: usize, w: usize) -> Result<Tensor> {
let h = (h + 1) / self.patch_size;
let w = (w + 1) / self.patch_size;
if h > self.pos_embed_max_size || w > self.pos_embed_max_size {
bail!("Input size is too large for the position embedding")
}
let top = (self.pos_embed_max_size - h) / 2;
let left = (self.pos_embed_max_size - w) / 2;
let pos_embed =
self.pos_embed
.reshape((1, self.pos_embed_max_size, self.pos_embed_max_size, ()))?;
let pos_embed = pos_embed.narrow(1, top, h)?.narrow(2, left, w)?;
pos_embed.reshape((1, h * w, ()))
}
}
pub struct TimestepEmbedder {
mlp: nn::Sequential,
frequency_embedding_size: usize,
}
impl TimestepEmbedder {
pub fn new(
hidden_size: usize,
frequency_embedding_size: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let mlp = nn::seq()
.add(nn::linear(
frequency_embedding_size,
hidden_size,
vb.pp("mlp.0"),
)?)
.add(nn::Activation::Silu)
.add(nn::linear(hidden_size, hidden_size, vb.pp("mlp.2"))?);
Ok(Self {
mlp,
frequency_embedding_size,
})
}
fn timestep_embedding(t: &Tensor, dim: usize, max_period: f64) -> Result<Tensor> {
if !dim.is_multiple_of(2) {
bail!("Embedding dimension must be even")
}
if t.dtype() != DType::F32 && t.dtype() != DType::F64 {
bail!("Input tensor must be floating point")
}
let half = dim / 2;
let freqs = Tensor::arange(0f32, half as f32, t.device())?
.to_dtype(candle::DType::F32)?
.mul(&Tensor::full(
(-f64::ln(max_period) / half as f64) as f32,
half,
t.device(),
)?)?
.exp()?;
let args = t
.unsqueeze(1)?
.to_dtype(candle::DType::F32)?
.matmul(&freqs.unsqueeze(0)?)?;
let embedding = Tensor::cat(&[args.cos()?, args.sin()?], 1)?;
embedding.to_dtype(candle::DType::F16)
}
}
impl Module for TimestepEmbedder {
fn forward(&self, t: &Tensor) -> Result<Tensor> {
let t_freq = Self::timestep_embedding(t, self.frequency_embedding_size, 10000.0)?;
self.mlp.forward(&t_freq)
}
}
pub struct VectorEmbedder {
mlp: nn::Sequential,
}
impl VectorEmbedder {
pub fn new(input_dim: usize, hidden_size: usize, vb: nn::VarBuilder) -> Result<Self> {
let mlp = nn::seq()
.add(nn::linear(input_dim, hidden_size, vb.pp("mlp.0"))?)
.add(nn::Activation::Silu)
.add(nn::linear(hidden_size, hidden_size, vb.pp("mlp.2"))?);
Ok(Self { mlp })
}
}
impl Module for VectorEmbedder {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.mlp.forward(x)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mmdit/model.rs | candle-transformers/src/models/mmdit/model.rs | // Implement the MMDiT model originally introduced for Stable Diffusion 3 (https://arxiv.org/abs/2403.03206),
// as well as the MMDiT-X variant introduced for Stable Diffusion 3.5-medium (https://huggingface.co/stabilityai/stable-diffusion-3.5-medium)
// This follows the implementation of the MMDiT model in the ComfyUI repository.
// https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py#L1
// with MMDiT-X support following the Stability-AI/sd3.5 repository.
// https://github.com/Stability-AI/sd3.5/blob/4e484e05308d83fb77ae6f680028e6c313f9da54/mmditx.py#L1
use candle::{Module, Result, Tensor, D};
use candle_nn as nn;
use super::blocks::{
ContextQkvOnlyJointBlock, FinalLayer, JointBlock, MMDiTJointBlock, MMDiTXJointBlock,
};
use super::embedding::{
PatchEmbedder, PositionEmbedder, TimestepEmbedder, Unpatchifier, VectorEmbedder,
};
#[derive(Debug, Clone)]
pub struct Config {
pub patch_size: usize,
pub in_channels: usize,
pub out_channels: usize,
pub depth: usize,
pub head_size: usize,
pub adm_in_channels: usize,
pub pos_embed_max_size: usize,
pub context_embed_size: usize,
pub frequency_embedding_size: usize,
}
impl Config {
pub fn sd3_medium() -> Self {
Self {
patch_size: 2,
in_channels: 16,
out_channels: 16,
depth: 24,
head_size: 64,
adm_in_channels: 2048,
pos_embed_max_size: 192,
context_embed_size: 4096,
frequency_embedding_size: 256,
}
}
pub fn sd3_5_medium() -> Self {
Self {
patch_size: 2,
in_channels: 16,
out_channels: 16,
depth: 24,
head_size: 64,
adm_in_channels: 2048,
pos_embed_max_size: 384,
context_embed_size: 4096,
frequency_embedding_size: 256,
}
}
pub fn sd3_5_large() -> Self {
Self {
patch_size: 2,
in_channels: 16,
out_channels: 16,
depth: 38,
head_size: 64,
adm_in_channels: 2048,
pos_embed_max_size: 192,
context_embed_size: 4096,
frequency_embedding_size: 256,
}
}
}
pub struct MMDiT {
core: MMDiTCore,
patch_embedder: PatchEmbedder,
pos_embedder: PositionEmbedder,
timestep_embedder: TimestepEmbedder,
vector_embedder: VectorEmbedder,
context_embedder: nn::Linear,
unpatchifier: Unpatchifier,
}
impl MMDiT {
pub fn new(cfg: &Config, use_flash_attn: bool, vb: nn::VarBuilder) -> Result<Self> {
let hidden_size = cfg.head_size * cfg.depth;
let core = MMDiTCore::new(
cfg.depth,
hidden_size,
cfg.depth,
cfg.patch_size,
cfg.out_channels,
use_flash_attn,
vb.clone(),
)?;
let patch_embedder = PatchEmbedder::new(
cfg.patch_size,
cfg.in_channels,
hidden_size,
vb.pp("x_embedder"),
)?;
let pos_embedder = PositionEmbedder::new(
hidden_size,
cfg.patch_size,
cfg.pos_embed_max_size,
vb.clone(),
)?;
let timestep_embedder = TimestepEmbedder::new(
hidden_size,
cfg.frequency_embedding_size,
vb.pp("t_embedder"),
)?;
let vector_embedder =
VectorEmbedder::new(cfg.adm_in_channels, hidden_size, vb.pp("y_embedder"))?;
let context_embedder = nn::linear(
cfg.context_embed_size,
hidden_size,
vb.pp("context_embedder"),
)?;
let unpatchifier = Unpatchifier::new(cfg.patch_size, cfg.out_channels)?;
Ok(Self {
core,
patch_embedder,
pos_embedder,
timestep_embedder,
vector_embedder,
context_embedder,
unpatchifier,
})
}
pub fn forward(
&self,
x: &Tensor,
t: &Tensor,
y: &Tensor,
context: &Tensor,
skip_layers: Option<&[usize]>,
) -> Result<Tensor> {
// Following the convention of the ComfyUI implementation.
// https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py#L919
//
// Forward pass of DiT.
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
// t: (N,) tensor of diffusion timesteps
// y: (N,) tensor of class labels
let h = x.dim(D::Minus2)?;
let w = x.dim(D::Minus1)?;
let cropped_pos_embed = self.pos_embedder.get_cropped_pos_embed(h, w)?;
let x = self
.patch_embedder
.forward(x)?
.broadcast_add(&cropped_pos_embed)?;
let c = self.timestep_embedder.forward(t)?;
let y = self.vector_embedder.forward(y)?;
let c = (c + y)?;
let context = self.context_embedder.forward(context)?;
let x = self.core.forward(&context, &x, &c, skip_layers)?;
let x = self.unpatchifier.unpatchify(&x, h, w)?;
x.narrow(2, 0, h)?.narrow(3, 0, w)
}
}
pub struct MMDiTCore {
joint_blocks: Vec<Box<dyn JointBlock>>,
context_qkv_only_joint_block: ContextQkvOnlyJointBlock,
final_layer: FinalLayer,
}
impl MMDiTCore {
pub fn new(
depth: usize,
hidden_size: usize,
num_heads: usize,
patch_size: usize,
out_channels: usize,
use_flash_attn: bool,
vb: nn::VarBuilder,
) -> Result<Self> {
let mut joint_blocks = Vec::with_capacity(depth - 1);
for i in 0..depth - 1 {
let joint_block_vb_pp = format!("joint_blocks.{i}");
let joint_block: Box<dyn JointBlock> =
if vb.contains_tensor(&format!("{joint_block_vb_pp}.x_block.attn2.qkv.weight")) {
Box::new(MMDiTXJointBlock::new(
hidden_size,
num_heads,
use_flash_attn,
vb.pp(&joint_block_vb_pp),
)?)
} else {
Box::new(MMDiTJointBlock::new(
hidden_size,
num_heads,
use_flash_attn,
vb.pp(&joint_block_vb_pp),
)?)
};
joint_blocks.push(joint_block);
}
Ok(Self {
joint_blocks,
context_qkv_only_joint_block: ContextQkvOnlyJointBlock::new(
hidden_size,
num_heads,
use_flash_attn,
vb.pp(format!("joint_blocks.{}", depth - 1)),
)?,
final_layer: FinalLayer::new(
hidden_size,
patch_size,
out_channels,
vb.pp("final_layer"),
)?,
})
}
pub fn forward(
&self,
context: &Tensor,
x: &Tensor,
c: &Tensor,
skip_layers: Option<&[usize]>,
) -> Result<Tensor> {
let (mut context, mut x) = (context.clone(), x.clone());
for (i, joint_block) in self.joint_blocks.iter().enumerate() {
if let Some(skip_layers) = &skip_layers {
if skip_layers.contains(&i) {
continue;
}
}
(context, x) = joint_block.forward(&context, &x, c)?;
}
let x = self.context_qkv_only_joint_block.forward(&context, &x, c)?;
self.final_layer.forward(&x, c)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mmdit/mod.rs | candle-transformers/src/models/mmdit/mod.rs | //! Mix of Multi-scale Dilated and Traditional Convolutions
//!
//! Mix of Multi-scale Dilated and Traditional Convolutions (MMDiT) is an architecture
//! introduced for Stable Diffusion 3, with the MMDiT-X variant used in Stable Diffusion 3.5.
//!
//! - 📝 [Research Paper](https://arxiv.org/abs/2403.03206)
//! - 💻 ComfyUI [reference implementation](https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py)
//! - 💻 Stability-AI [MMDiT-X implementation](https://github.com/Stability-AI/sd3.5/blob/4e484e05308d83fb77ae6f680028e6c313f9da54/mmditx.py)
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning)
//! - 💻 [GH Link](https://github.com/salesforce/BLIP)
//! - 🤗 [HF Link](https://huggingface.co/Salesforce/blip-image-captioning-base)
//! - 📝 [Paper](https://arxiv.org/abs/2201.12086)
//!
pub mod blocks;
pub mod embedding;
pub mod model;
pub mod projections;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mmdit/blocks.rs | candle-transformers/src/models/mmdit/blocks.rs | use candle::{Module, Result, Tensor, D};
use candle_nn as nn;
use super::projections::{AttnProjections, Mlp, Qkv, QkvOnlyAttnProjections};
pub struct ModulateIntermediates {
gate_msa: Tensor,
shift_mlp: Tensor,
scale_mlp: Tensor,
gate_mlp: Tensor,
}
pub struct DiTBlock {
norm1: LayerNormNoAffine,
attn: AttnProjections,
norm2: LayerNormNoAffine,
mlp: Mlp,
ada_ln_modulation: nn::Sequential,
}
pub struct LayerNormNoAffine {
eps: f64,
}
impl LayerNormNoAffine {
pub fn new(eps: f64) -> Self {
Self { eps }
}
}
impl Module for LayerNormNoAffine {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
nn::LayerNorm::new_no_bias(Tensor::ones_like(x)?, self.eps).forward(x)
}
}
impl DiTBlock {
pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoAffine::new(1e-6);
let attn = AttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?;
let norm2 = LayerNormNoAffine::new(1e-6);
let mlp_ratio = 4;
let mlp = Mlp::new(hidden_size, hidden_size * mlp_ratio, vb.pp("mlp"))?;
let n_mods = 6;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
n_mods * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm1,
attn,
norm2,
mlp,
ada_ln_modulation,
})
}
pub fn pre_attention(&self, x: &Tensor, c: &Tensor) -> Result<(Qkv, ModulateIntermediates)> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(6, D::Minus1)?;
let (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp) = (
chunks[0].clone(),
chunks[1].clone(),
chunks[2].clone(),
chunks[3].clone(),
chunks[4].clone(),
chunks[5].clone(),
);
let norm_x = self.norm1.forward(x)?;
let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?;
let qkv = self.attn.pre_attention(&modulated_x)?;
Ok((
qkv,
ModulateIntermediates {
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
},
))
}
pub fn post_attention(
&self,
attn: &Tensor,
x: &Tensor,
mod_interm: &ModulateIntermediates,
) -> Result<Tensor> {
let attn_out = self.attn.post_attention(attn)?;
let x = x.add(&attn_out.broadcast_mul(&mod_interm.gate_msa.unsqueeze(1)?)?)?;
let norm_x = self.norm2.forward(&x)?;
let modulated_x = modulate(&norm_x, &mod_interm.shift_mlp, &mod_interm.scale_mlp)?;
let mlp_out = self.mlp.forward(&modulated_x)?;
let x = x.add(&mlp_out.broadcast_mul(&mod_interm.gate_mlp.unsqueeze(1)?)?)?;
Ok(x)
}
}
pub struct SelfAttnModulateIntermediates {
gate_msa: Tensor,
shift_mlp: Tensor,
scale_mlp: Tensor,
gate_mlp: Tensor,
gate_msa2: Tensor,
}
pub struct SelfAttnDiTBlock {
norm1: LayerNormNoAffine,
attn: AttnProjections,
attn2: AttnProjections,
norm2: LayerNormNoAffine,
mlp: Mlp,
ada_ln_modulation: nn::Sequential,
}
impl SelfAttnDiTBlock {
pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoAffine::new(1e-6);
let attn = AttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?;
let attn2 = AttnProjections::new(hidden_size, num_heads, vb.pp("attn2"))?;
let norm2 = LayerNormNoAffine::new(1e-6);
let mlp_ratio = 4;
let mlp = Mlp::new(hidden_size, hidden_size * mlp_ratio, vb.pp("mlp"))?;
let n_mods = 9;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
n_mods * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm1,
attn,
attn2,
norm2,
mlp,
ada_ln_modulation,
})
}
pub fn pre_attention(
&self,
x: &Tensor,
c: &Tensor,
) -> Result<(Qkv, Qkv, SelfAttnModulateIntermediates)> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(9, D::Minus1)?;
let (
shift_msa,
scale_msa,
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
shift_msa2,
scale_msa2,
gate_msa2,
) = (
chunks[0].clone(),
chunks[1].clone(),
chunks[2].clone(),
chunks[3].clone(),
chunks[4].clone(),
chunks[5].clone(),
chunks[6].clone(),
chunks[7].clone(),
chunks[8].clone(),
);
let norm_x = self.norm1.forward(x)?;
let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?;
let qkv = self.attn.pre_attention(&modulated_x)?;
let modulated_x2 = modulate(&norm_x, &shift_msa2, &scale_msa2)?;
let qkv2 = self.attn2.pre_attention(&modulated_x2)?;
Ok((
qkv,
qkv2,
SelfAttnModulateIntermediates {
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
gate_msa2,
},
))
}
pub fn post_attention(
&self,
attn: &Tensor,
attn2: &Tensor,
x: &Tensor,
mod_interm: &SelfAttnModulateIntermediates,
) -> Result<Tensor> {
let attn_out = self.attn.post_attention(attn)?;
let x = x.add(&attn_out.broadcast_mul(&mod_interm.gate_msa.unsqueeze(1)?)?)?;
let attn_out2 = self.attn2.post_attention(attn2)?;
let x = x.add(&attn_out2.broadcast_mul(&mod_interm.gate_msa2.unsqueeze(1)?)?)?;
let norm_x = self.norm2.forward(&x)?;
let modulated_x = modulate(&norm_x, &mod_interm.shift_mlp, &mod_interm.scale_mlp)?;
let mlp_out = self.mlp.forward(&modulated_x)?;
let x = x.add(&mlp_out.broadcast_mul(&mod_interm.gate_mlp.unsqueeze(1)?)?)?;
Ok(x)
}
}
pub struct QkvOnlyDiTBlock {
norm1: LayerNormNoAffine,
attn: QkvOnlyAttnProjections,
ada_ln_modulation: nn::Sequential,
}
impl QkvOnlyDiTBlock {
pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoAffine::new(1e-6);
let attn = QkvOnlyAttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?;
let n_mods = 2;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
n_mods * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm1,
attn,
ada_ln_modulation,
})
}
pub fn pre_attention(&self, x: &Tensor, c: &Tensor) -> Result<Qkv> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(2, D::Minus1)?;
let (shift_msa, scale_msa) = (chunks[0].clone(), chunks[1].clone());
let norm_x = self.norm1.forward(x)?;
let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?;
self.attn.pre_attention(&modulated_x)
}
}
pub struct FinalLayer {
norm_final: LayerNormNoAffine,
linear: nn::Linear,
ada_ln_modulation: nn::Sequential,
}
impl FinalLayer {
pub fn new(
hidden_size: usize,
patch_size: usize,
out_channels: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let norm_final = LayerNormNoAffine::new(1e-6);
let linear = nn::linear(
hidden_size,
patch_size * patch_size * out_channels,
vb.pp("linear"),
)?;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
2 * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm_final,
linear,
ada_ln_modulation,
})
}
pub fn forward(&self, x: &Tensor, c: &Tensor) -> Result<Tensor> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(2, D::Minus1)?;
let (shift, scale) = (chunks[0].clone(), chunks[1].clone());
let norm_x = self.norm_final.forward(x)?;
let modulated_x = modulate(&norm_x, &shift, &scale)?;
let output = self.linear.forward(&modulated_x)?;
Ok(output)
}
}
fn modulate(x: &Tensor, shift: &Tensor, scale: &Tensor) -> Result<Tensor> {
let shift = shift.unsqueeze(1)?;
let scale = scale.unsqueeze(1)?;
let scale_plus_one = scale.add(&Tensor::ones_like(&scale)?)?;
shift.broadcast_add(&x.broadcast_mul(&scale_plus_one)?)
}
pub trait JointBlock {
fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<(Tensor, Tensor)>;
}
pub struct MMDiTJointBlock {
x_block: DiTBlock,
context_block: DiTBlock,
num_heads: usize,
use_flash_attn: bool,
}
impl MMDiTJointBlock {
pub fn new(
hidden_size: usize,
num_heads: usize,
use_flash_attn: bool,
vb: nn::VarBuilder,
) -> Result<Self> {
let x_block = DiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?;
let context_block = DiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?;
Ok(Self {
x_block,
context_block,
num_heads,
use_flash_attn,
})
}
}
impl JointBlock for MMDiTJointBlock {
fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<(Tensor, Tensor)> {
let (context_qkv, context_interm) = self.context_block.pre_attention(context, c)?;
let (x_qkv, x_interm) = self.x_block.pre_attention(x, c)?;
let (context_attn, x_attn) =
joint_attn(&context_qkv, &x_qkv, self.num_heads, self.use_flash_attn)?;
let context_out =
self.context_block
.post_attention(&context_attn, context, &context_interm)?;
let x_out = self.x_block.post_attention(&x_attn, x, &x_interm)?;
Ok((context_out, x_out))
}
}
pub struct MMDiTXJointBlock {
x_block: SelfAttnDiTBlock,
context_block: DiTBlock,
num_heads: usize,
use_flash_attn: bool,
}
impl MMDiTXJointBlock {
pub fn new(
hidden_size: usize,
num_heads: usize,
use_flash_attn: bool,
vb: nn::VarBuilder,
) -> Result<Self> {
let x_block = SelfAttnDiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?;
let context_block = DiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?;
Ok(Self {
x_block,
context_block,
num_heads,
use_flash_attn,
})
}
}
impl JointBlock for MMDiTXJointBlock {
fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<(Tensor, Tensor)> {
let (context_qkv, context_interm) = self.context_block.pre_attention(context, c)?;
let (x_qkv, x_qkv2, x_interm) = self.x_block.pre_attention(x, c)?;
let (context_attn, x_attn) =
joint_attn(&context_qkv, &x_qkv, self.num_heads, self.use_flash_attn)?;
let x_attn2 = attn(&x_qkv2, self.num_heads, self.use_flash_attn)?;
let context_out =
self.context_block
.post_attention(&context_attn, context, &context_interm)?;
let x_out = self
.x_block
.post_attention(&x_attn, &x_attn2, x, &x_interm)?;
Ok((context_out, x_out))
}
}
pub struct ContextQkvOnlyJointBlock {
x_block: DiTBlock,
context_block: QkvOnlyDiTBlock,
num_heads: usize,
use_flash_attn: bool,
}
impl ContextQkvOnlyJointBlock {
pub fn new(
hidden_size: usize,
num_heads: usize,
use_flash_attn: bool,
vb: nn::VarBuilder,
) -> Result<Self> {
let x_block = DiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?;
let context_block = QkvOnlyDiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?;
Ok(Self {
x_block,
context_block,
num_heads,
use_flash_attn,
})
}
pub fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<Tensor> {
let context_qkv = self.context_block.pre_attention(context, c)?;
let (x_qkv, x_interm) = self.x_block.pre_attention(x, c)?;
let (_, x_attn) = joint_attn(&context_qkv, &x_qkv, self.num_heads, self.use_flash_attn)?;
let x_out = self.x_block.post_attention(&x_attn, x, &x_interm)?;
Ok(x_out)
}
}
// A QKV-attention that is compatible with the interface of candle_flash_attn::flash_attn
// Flash attention regards q, k, v dimensions as (batch_size, seqlen, nheads, headdim)
fn flash_compatible_attention(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
) -> Result<Tensor> {
let q_dims_for_matmul = q.transpose(1, 2)?.dims().to_vec();
let rank = q_dims_for_matmul.len();
let q = q.transpose(1, 2)?.flatten_to(rank - 3)?;
let k = k.transpose(1, 2)?.flatten_to(rank - 3)?;
let v = v.transpose(1, 2)?.flatten_to(rank - 3)?;
let attn_weights = (q.matmul(&k.t()?)? * softmax_scale as f64)?;
let attn_scores = candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(&v)?;
attn_scores.reshape(q_dims_for_matmul)?.transpose(1, 2)
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
fn joint_attn(
context_qkv: &Qkv,
x_qkv: &Qkv,
num_heads: usize,
use_flash_attn: bool,
) -> Result<(Tensor, Tensor)> {
let qkv = Qkv {
q: Tensor::cat(&[&context_qkv.q, &x_qkv.q], 1)?,
k: Tensor::cat(&[&context_qkv.k, &x_qkv.k], 1)?,
v: Tensor::cat(&[&context_qkv.v, &x_qkv.v], 1)?,
};
let seqlen = qkv.q.dim(1)?;
let attn = attn(&qkv, num_heads, use_flash_attn)?;
let context_qkv_seqlen = context_qkv.q.dim(1)?;
let context_attn = attn.narrow(1, 0, context_qkv_seqlen)?;
let x_attn = attn.narrow(1, context_qkv_seqlen, seqlen - context_qkv_seqlen)?;
Ok((context_attn, x_attn))
}
fn attn(qkv: &Qkv, num_heads: usize, use_flash_attn: bool) -> Result<Tensor> {
let batch_size = qkv.q.dim(0)?;
let seqlen = qkv.q.dim(1)?;
let qkv = Qkv {
q: qkv.q.reshape((batch_size, seqlen, num_heads, ()))?,
k: qkv.k.reshape((batch_size, seqlen, num_heads, ()))?,
v: qkv.v.clone(),
};
let headdim = qkv.q.dim(D::Minus1)?;
let softmax_scale = 1.0 / (headdim as f64).sqrt();
let attn = if use_flash_attn {
flash_attn(&qkv.q, &qkv.k, &qkv.v, softmax_scale as f32, false)?
} else {
flash_compatible_attention(&qkv.q, &qkv.k, &qkv.v, softmax_scale as f32)?
};
attn.reshape((batch_size, seqlen, ()))
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/whisper/quantized_model.rs | candle-transformers/src/models/whisper/quantized_model.rs | use super::Config;
use crate::quantized_nn::{layer_norm, linear, linear_no_bias, Embedding, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{Device, IndexOp, Result, Tensor, D};
use candle_nn::{Conv1d, Conv1dConfig, LayerNorm, Module};
fn conv1d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
config: Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight = vb
.get((out_channels, in_channels, kernel_size), "weight")?
.dequantize(vb.device())?;
let bias = vb.get(out_channels, "bias")?.dequantize(vb.device())?;
Ok(Conv1d::new(weight, Some(bias), config))
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L62
#[derive(Debug, Clone)]
struct MultiHeadAttention {
query: Linear,
key: Linear,
value: Linear,
out: Linear,
n_head: usize,
span: tracing::Span,
softmax_span: tracing::Span,
matmul_span: tracing::Span,
kv_cache: Option<(Tensor, Tensor)>,
}
impl MultiHeadAttention {
fn load(n_state: usize, n_head: usize, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "multi-head-attn");
let softmax_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-softmax");
let matmul_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-matmul");
let query = linear(n_state, n_state, vb.pp("q_proj"))?;
let value = linear(n_state, n_state, vb.pp("v_proj"))?;
let key = linear_no_bias(n_state, n_state, vb.pp("k_proj"))?;
let out = linear(n_state, n_state, vb.pp("out_proj"))?;
Ok(Self {
query,
key,
value,
out,
n_head,
span,
softmax_span,
matmul_span,
kv_cache: None,
})
}
fn forward(
&mut self,
x: &Tensor,
xa: Option<&Tensor>,
mask: Option<&Tensor>,
flush_cache: bool,
) -> Result<Tensor> {
let _enter = self.span.enter();
let q = self.query.forward(x)?;
let (k, v) = match xa {
None => {
let k = self.key.forward(x)?;
let v = self.value.forward(x)?;
(k, v)
}
Some(x) => {
if flush_cache {
self.kv_cache = None;
}
if let Some((k, v)) = &self.kv_cache {
(k.clone(), v.clone())
} else {
let k = self.key.forward(x)?;
let v = self.value.forward(x)?;
self.kv_cache = Some((k.clone(), v.clone()));
(k, v)
}
}
};
let wv = self.qkv_attention(&q, &k, &v, mask)?;
let out = self.out.forward(&wv)?;
Ok(out)
}
fn reshape_head(&self, x: &Tensor) -> Result<Tensor> {
let (n_batch, n_ctx, n_state) = x.dims3()?;
let target_dims = &[n_batch, n_ctx, self.n_head, n_state / self.n_head];
x.reshape(target_dims)?.transpose(1, 2)
}
fn qkv_attention(
&self,
q: &Tensor,
k: &Tensor,
v: &Tensor,
mask: Option<&Tensor>,
) -> Result<Tensor> {
let (_, n_ctx, n_state) = q.dims3()?;
let scale = ((n_state / self.n_head) as f64).powf(-0.25);
let q = (self.reshape_head(q)? * scale)?;
let k = (self.reshape_head(k)?.transpose(2, 3)? * scale)?;
let v = self.reshape_head(v)?.contiguous()?;
let mut qk = {
let _enter = self.matmul_span.enter();
q.matmul(&k)?
};
if let Some(mask) = mask {
let mask = mask.i((0..n_ctx, 0..n_ctx))?;
qk = qk.broadcast_add(&mask)?
}
let w = {
let _enter = self.softmax_span.enter();
candle_nn::ops::softmax_last_dim(&qk)?
};
let wv = {
let _enter = self.matmul_span.enter();
w.matmul(&v)?
}
.transpose(1, 2)?
.flatten_from(2)?;
Ok(wv)
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None;
}
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L111
#[derive(Debug, Clone)]
struct ResidualAttentionBlock {
attn: MultiHeadAttention,
attn_ln: LayerNorm,
cross_attn: Option<(MultiHeadAttention, LayerNorm)>,
mlp_linear1: Linear,
mlp_linear2: Linear,
mlp_ln: LayerNorm,
span: tracing::Span,
}
impl ResidualAttentionBlock {
fn load(n_state: usize, n_head: usize, ca: bool, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "residual-attn");
let attn = MultiHeadAttention::load(n_state, n_head, vb.pp("self_attn"))?;
let attn_ln = layer_norm(n_state, 1e-5, vb.pp("self_attn_layer_norm"))?;
let cross_attn = if ca {
let cross_attn = MultiHeadAttention::load(n_state, n_head, vb.pp("encoder_attn"))?;
let cross_attn_ln = layer_norm(n_state, 1e-5, vb.pp("encoder_attn_layer_norm"))?;
Some((cross_attn, cross_attn_ln))
} else {
None
};
let n_mlp = n_state * 4;
let mlp_linear1 = linear(n_state, n_mlp, vb.pp("fc1"))?;
let mlp_linear2 = linear(n_mlp, n_state, vb.pp("fc2"))?;
let mlp_ln = layer_norm(n_state, 1e-5, vb.pp("final_layer_norm"))?;
Ok(Self {
attn,
attn_ln,
cross_attn,
mlp_linear1,
mlp_linear2,
mlp_ln,
span,
})
}
fn forward(
&mut self,
x: &Tensor,
xa: Option<&Tensor>,
mask: Option<&Tensor>,
flush_kv_cache: bool,
) -> Result<Tensor> {
let _enter = self.span.enter();
let attn = self
.attn
.forward(&self.attn_ln.forward(x)?, None, mask, flush_kv_cache)?;
let mut x = (x + attn)?;
if let Some((attn, ln)) = &mut self.cross_attn {
x = (&x + attn.forward(&ln.forward(&x)?, xa, None, flush_kv_cache)?)?;
}
let mlp = x
.apply(&self.mlp_ln)?
.apply(&self.mlp_linear1)?
.gelu()?
.apply(&self.mlp_linear2)?;
x + mlp
}
fn reset_kv_cache(&mut self) {
self.attn.reset_kv_cache();
if let Some((attn, _)) = &mut self.cross_attn {
attn.reset_kv_cache();
}
}
}
fn sinusoids(length: usize, channels: usize, device: &Device) -> Result<Tensor> {
let max_timescale = 10000f32;
let log_timescale_increment = max_timescale.ln() / (channels / 2 - 1) as f32;
let inv_timescales: Vec<_> = (0..channels / 2)
.map(|i| (i as f32 * (-log_timescale_increment)).exp())
.collect();
let inv_timescales = Tensor::new(inv_timescales.as_slice(), device)?.unsqueeze(0)?;
let arange = Tensor::arange(0, length as u32, device)?
.to_dtype(candle::DType::F32)?
.unsqueeze(1)?;
let sh = (length, channels / 2);
let scaled_time = (arange.broadcast_as(sh)? * inv_timescales.broadcast_as(sh)?)?;
let sincos = Tensor::cat(&[scaled_time.sin()?, scaled_time.cos()?], 1)?;
Ok(sincos)
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L143
#[derive(Debug, Clone)]
pub struct AudioEncoder {
conv1: Conv1d,
conv2: Conv1d,
positional_embedding: Tensor,
blocks: Vec<ResidualAttentionBlock>,
ln_post: LayerNorm,
span: tracing::Span,
conv1_span: tracing::Span,
conv2_span: tracing::Span,
}
impl AudioEncoder {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "audio-encoder");
let conv1_span = tracing::span!(tracing::Level::TRACE, "conv1");
let conv2_span = tracing::span!(tracing::Level::TRACE, "conv2");
let n_state = cfg.d_model;
let n_head = cfg.encoder_attention_heads;
let n_ctx = cfg.max_source_positions;
let cfg1 = Conv1dConfig {
padding: 1,
stride: 1,
groups: 1,
dilation: 1,
cudnn_fwd_algo: None,
};
let cfg2 = Conv1dConfig {
padding: 1,
stride: 2,
groups: 1,
dilation: 1,
cudnn_fwd_algo: None,
};
let conv1 = conv1d(cfg.num_mel_bins, n_state, 3, cfg1, vb.pp("conv1"))?;
let conv2 = conv1d(n_state, n_state, 3, cfg2, vb.pp("conv2"))?;
let positional_embedding = sinusoids(n_ctx, n_state, vb.device())?;
let blocks = (0..cfg.encoder_layers)
.map(|i| {
ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(format!("layers.{i}")))
})
.collect::<Result<Vec<_>>>()?;
let ln_post = layer_norm(n_state, 1e-5, vb.pp("layer_norm"))?;
Ok(Self {
conv1,
conv2,
positional_embedding,
blocks,
ln_post,
conv1_span,
conv2_span,
span,
})
}
pub fn forward(&mut self, x: &Tensor, flush_kv_cache: bool) -> Result<Tensor> {
let _enter = self.span.enter();
let x = {
let _enter = self.conv1_span.enter();
self.conv1.forward(x)?.gelu()?
};
let x = {
let _enter = self.conv2_span.enter();
self.conv2.forward(&x)?.gelu()?
};
let x = x.transpose(1, 2)?;
let (_bsize, seq_len, _hidden) = x.dims3()?;
let positional_embedding = self.positional_embedding.narrow(0, 0, seq_len)?;
let mut x = x.broadcast_add(&positional_embedding)?;
for block in self.blocks.iter_mut() {
x = block.forward(&x, None, None, flush_kv_cache)?
}
let x = self.ln_post.forward(&x)?;
Ok(x)
}
pub fn reset_kv_cache(&mut self) {
for block in self.blocks.iter_mut() {
block.reset_kv_cache();
}
}
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L176
#[derive(Debug, Clone)]
pub struct TextDecoder {
token_embedding: Embedding,
positional_embedding: Tensor,
blocks: Vec<ResidualAttentionBlock>,
ln: LayerNorm,
mask: Tensor,
span: tracing::Span,
span_final: tracing::Span,
}
impl TextDecoder {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "text-decoder");
let span_final = tracing::span!(tracing::Level::TRACE, "text-decoder-final");
let n_state = cfg.d_model;
let n_head = cfg.decoder_attention_heads;
let n_ctx = cfg.max_target_positions;
let token_embedding = Embedding::new(cfg.vocab_size, n_state, vb.pp("embed_tokens"))?;
let positional_embedding = vb
.get((n_ctx, n_state), "embed_positions.weight")?
.dequantize(vb.device())?;
let blocks = (0..cfg.decoder_layers)
.map(|i| {
ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(format!("layers.{i}")))
})
.collect::<Result<Vec<_>>>()?;
let ln = layer_norm(n_state, 1e-5, vb.pp("layer_norm"))?;
let mask: Vec<_> = (0..n_ctx)
.flat_map(|i| (0..n_ctx).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_vec(mask, (n_ctx, n_ctx), vb.device())?;
Ok(Self {
token_embedding,
positional_embedding,
blocks,
ln,
mask,
span,
span_final,
})
}
pub fn forward(&mut self, x: &Tensor, xa: &Tensor, flush_kv_cache: bool) -> Result<Tensor> {
let _enter = self.span.enter();
let last = x.dim(D::Minus1)?;
let token_embedding = self.token_embedding.forward(x)?;
let positional_embedding = self.positional_embedding.narrow(0, 0, last)?;
let mut x = token_embedding.broadcast_add(&positional_embedding)?;
for block in self.blocks.iter_mut() {
x = block.forward(&x, Some(xa), Some(&self.mask), flush_kv_cache)?;
}
self.ln.forward(&x)
}
pub fn final_linear(&self, x: &Tensor) -> Result<Tensor> {
let b_size = x.dim(0)?;
let w = self.token_embedding.embeddings().broadcast_left(b_size)?;
let logits = {
let _enter = self.span_final.enter();
x.matmul(&w.t()?)?
};
Ok(logits)
}
pub fn reset_kv_cache(&mut self) {
for block in self.blocks.iter_mut() {
block.reset_kv_cache();
}
}
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L221
#[derive(Debug, Clone)]
pub struct Whisper {
pub encoder: AudioEncoder,
pub decoder: TextDecoder,
pub config: Config,
}
impl Whisper {
pub fn load(vb: &VarBuilder, config: Config) -> Result<Self> {
let encoder = AudioEncoder::load(vb.pp("model.encoder"), &config)?;
let decoder = TextDecoder::load(vb.pp("model.decoder"), &config)?;
Ok(Self {
encoder,
decoder,
config,
})
}
pub fn reset_kv_cache(&mut self) {
self.encoder.reset_kv_cache();
self.decoder.reset_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.