repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/layer_norm.rs | candle-nn/tests/layer_norm.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use candle::{test_utils, Device, Tensor};
use candle_nn::{LayerNorm, Module};
#[test]
fn layer_norm() -> Result<()> {
let device = &Device::Cpu;
let w = Tensor::new(&[3f32], device)?;
let b = Tensor::new(&[0.5f32], device)?;
let ln2 = LayerNorm::new(Tensor::cat(&[&w, &w], 0)?, Tensor::cat(&[&b, &b], 0)?, 1e-8);
let ln3 = LayerNorm::new(
Tensor::cat(&[&w, &w, &w], 0)?,
Tensor::cat(&[&b, &b, &b], 0)?,
1e-8,
);
let ln = LayerNorm::new(w, b, 1e-8);
let two = Tensor::new(&[[[2f32]]], device)?;
let res = ln.forward(&two)?.flatten_all()?;
assert_eq!(res.to_vec1::<f32>()?, [0.5f32]);
let inp = Tensor::new(&[[[4f32, 0f32]]], device)?;
let res = ln2.forward(&inp)?;
assert_eq!(res.to_vec3::<f32>()?, [[[3.5f32, -2.5]]]);
let inp = Tensor::new(&[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]], device)?;
let res = ln3.forward(&inp)?;
assert_eq!(
test_utils::to_vec3_round(&res, 4)?,
[[
[-3.1742, 0.5, 4.1742],
[-3.1742, 0.5, 4.1742],
[4.1742, 0.5, -3.1742]
]]
);
let mean = (res.sum_keepdim(2)? / 3.0)?;
// The average value should be `b`.
assert_eq!(
test_utils::to_vec3_round(&mean, 4)?,
[[[0.5], [0.5], [0.5]]]
);
let std = (res.broadcast_sub(&mean)?.sqr()?.sum_keepdim(2)?.sqrt()? / 3.0)?;
// The standard deviation should be sqrt(`w`).
assert_eq!(
test_utils::to_vec3_round(&std, 4)?,
[[[1.7321], [1.7321], [1.7321]]]
);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/sdpa.rs | candle-nn/tests/sdpa.rs | #[cfg(feature = "metal")]
mod metal_sdpa_tests {
use candle::{DType, Device, Result, Shape, Tensor};
use rand::SeedableRng;
use rand_distr::Distribution;
use std::ops::{Div, Mul};
fn randn<S: Into<Shape>>(
rng: &mut rand::rngs::StdRng,
shape: S,
dev: &Device,
) -> Result<Tensor> {
let shape = shape.into();
let elem_count = shape.elem_count();
let normal = rand_distr::Normal::new(0.0, 1.0).unwrap();
let vs: Vec<f32> = (0..elem_count).map(|_| normal.sample(rng)).collect();
Tensor::from_vec(vs, &shape, dev)
}
#[test]
fn sdpa_full() -> Result<()> {
// Test the full SDPA kernel path (q_seq > 8)
const BS: usize = 4;
const R: usize = 16;
const L: usize = 16;
const DK: usize = 64;
const H: usize = 3;
let scale: f64 = f64::from(DK as u32).sqrt().recip();
let device = Device::new_metal(0)?;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let q = randn(&mut rng, (BS, H, R, DK), &device)?;
let k = randn(&mut rng, (BS, H, L, DK), &device)?;
let v = randn(&mut rng, (BS, H, L, DK), &device)?;
let ground_truth = {
let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?;
let att = candle_nn::ops::softmax_last_dim(&att.to_dtype(DType::F32)?)?
.to_dtype(q.dtype())?;
att.matmul(&v.clone())?
};
let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, None, false, scale as f32, 1.)?;
assert_eq!(ground_truth.shape(), sdpa_output.shape());
let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)?
.sum_all()?
.to_scalar()?;
// Larger sequences have higher accumulated error
assert!(error <= 0.02, "{}", error);
Ok(())
}
#[test]
fn sdpa_vector() -> Result<()> {
// Allow vectorized, seqlen = 1
const BS: usize = 4;
const R: usize = 1;
const L: usize = 1;
const DK: usize = 64;
const H: usize = 3;
let scale: f64 = f64::from(DK as u32).sqrt().recip();
let device = Device::new_metal(0)?;
let mut rng = rand::rngs::StdRng::seed_from_u64(4242);
let q = randn(&mut rng, (BS, H, R, DK), &device)?;
let k = randn(&mut rng, (BS, H, L, DK), &device)?;
let v = randn(&mut rng, (BS, H, L, DK), &device)?;
let ground_truth = {
let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?;
let att = candle_nn::ops::softmax_last_dim(&att.to_dtype(DType::F32)?)?
.to_dtype(q.dtype())?;
att.matmul(&v.clone())?
};
let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, None, false, scale as f32, 1.)?;
assert_eq!(ground_truth.shape(), sdpa_output.shape());
let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)?
.sum_all()?
.to_scalar()?;
assert!(error <= 0.000, "{}", error);
Ok(())
}
#[test]
fn sdpa_full_softcapping() -> Result<()> {
// Test softcapping with sdpa_vector kernel (q_seq = 1)
// NOTE: Vector kernel only supports q_seq = 1 correctly
// Full kernel does NOT support softcapping
const BS: usize = 4;
const R: usize = 1; // Vector kernel requires q_seq = 1
const L: usize = 4;
const DK: usize = 64;
const H: usize = 3;
const SOFTCAP: f64 = 50.;
let scale: f64 = f64::from(DK as u32).sqrt().recip();
let device = Device::new_metal(0)?;
let mut rng = rand::rngs::StdRng::seed_from_u64(424242);
let q = randn(&mut rng, (BS, H, R, DK), &device)?;
let k = randn(&mut rng, (BS, H, L, DK), &device)?;
let v = randn(&mut rng, (BS, H, L, DK), &device)?;
let ground_truth = {
let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?;
let att = candle_nn::ops::softmax_last_dim(
&att.to_dtype(DType::F32)?
.div(SOFTCAP)?
.tanh()?
.mul(SOFTCAP)?,
)?
.to_dtype(q.dtype())?;
att.matmul(&v.clone())?
};
let sdpa_output =
candle_nn::ops::sdpa(&q, &k, &v, None, false, scale as f32, SOFTCAP as f32)?;
assert_eq!(ground_truth.shape(), sdpa_output.shape());
let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)?
.sum_all()?
.to_scalar()?;
// Slightly higher error for cross-attention case (R=1, L=4)
assert!(error <= 0.002, "{}", error);
Ok(())
}
#[test]
fn sdpa_vector_softcapping() -> Result<()> {
// Allow vectorized, seqlen = 1
const BS: usize = 4;
const R: usize = 1;
const L: usize = 1;
const DK: usize = 64;
const H: usize = 3;
const SOFTCAP: f64 = 50.;
let scale: f64 = f64::from(DK as u32).sqrt().recip();
let device = Device::new_metal(0)?;
let mut rng = rand::rngs::StdRng::seed_from_u64(42424242);
let q = randn(&mut rng, (BS, H, R, DK), &device)?;
let k = randn(&mut rng, (BS, H, L, DK), &device)?;
let v = randn(&mut rng, (BS, H, L, DK), &device)?;
let ground_truth = {
let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?;
let att = candle_nn::ops::softmax_last_dim(
&att.to_dtype(DType::F32)?
.div(SOFTCAP)?
.tanh()?
.mul(SOFTCAP)?,
)?
.to_dtype(q.dtype())?;
att.matmul(&v.clone())?
};
let sdpa_output =
candle_nn::ops::sdpa(&q, &k, &v, None, false, scale as f32, SOFTCAP as f32)?;
assert_eq!(ground_truth.shape(), sdpa_output.shape());
let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)?
.sum_all()?
.to_scalar()?;
assert!(error <= 0.0001, "{}", error);
Ok(())
}
#[test]
fn sdpa_vector_cross() -> Result<()> {
// Allow vectorized, seqlen = 1. Simulat cross attention case where R != L, R = 1
const BS: usize = 4;
const R: usize = 1;
const L: usize = 24;
const DK: usize = 64;
const H: usize = 3;
let scale: f64 = f64::from(DK as u32).sqrt().recip();
let device = Device::new_metal(0)?;
let mut rng = rand::rngs::StdRng::seed_from_u64(4242424242);
let q = randn(&mut rng, (BS, H, R, DK), &device)?;
let k = randn(&mut rng, (BS, H, L, DK), &device)?;
let v = randn(&mut rng, (BS, H, L, DK), &device)?;
let ground_truth = {
let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?;
let att = candle_nn::ops::softmax_last_dim(&att.to_dtype(DType::F32)?)?
.to_dtype(q.dtype())?;
att.matmul(&v.clone())?
};
let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, None, false, scale as f32, 1.)?;
assert_eq!(ground_truth.shape(), sdpa_output.shape());
let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)?
.sum_all()?
.to_scalar()?;
assert!(error <= 0.0013, "{}", error);
Ok(())
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/rnn.rs | candle-nn/tests/rnn.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{test_utils::to_vec2_round, DType, Device, Result, Tensor};
use candle_nn::RNN;
/* The following test can be verified against PyTorch using the following snippet.
import torch
from torch import nn
lstm = nn.LSTM(2, 3, 1)
lstm.weight_ih_l0 = torch.nn.Parameter(torch.arange(0., 24.).reshape(12, 2).cos())
lstm.weight_hh_l0 = torch.nn.Parameter(torch.arange(0., 36.).reshape(12, 3).sin())
lstm.bias_ih_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1, 1, -0.5, 2]))
lstm.bias_hh_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1, 1, -0.5, 2]).cos())
state = torch.zeros((1, 3)), torch.zeros((1, 3))
for inp in [3., 1., 4., 1., 5., 9., 2.]:
inp = torch.tensor([[inp, inp * 0.5]])
_out, state = lstm(inp, state)
print(state)
# (tensor([[ 0.9919, 0.1738, -0.1451]], grad_fn=...), tensor([[ 5.7250, 0.4458, -0.2908]], grad_fn=...))
*/
#[test]
fn lstm() -> Result<()> {
let cpu = &Device::Cpu;
let w_ih = Tensor::arange(0f32, 24f32, cpu)?.reshape((12, 2))?;
let w_ih = w_ih.cos()?;
let w_hh = Tensor::arange(0f32, 36f32, cpu)?.reshape((12, 3))?;
let w_hh = w_hh.sin()?;
let b_ih = Tensor::new(
&[-1f32, 1., -0.5, 2., -1., 1., -0.5, 2., -1., 1., -0.5, 2.],
cpu,
)?;
let b_hh = b_ih.cos()?;
let tensors: std::collections::HashMap<_, _> = [
("weight_ih_l0".to_string(), w_ih),
("weight_hh_l0".to_string(), w_hh),
("bias_ih_l0".to_string(), b_ih),
("bias_hh_l0".to_string(), b_hh),
]
.into_iter()
.collect();
let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, cpu);
let lstm = candle_nn::lstm(2, 3, Default::default(), vb)?;
let mut state = lstm.zero_state(1)?;
for inp in [3f32, 1., 4., 1., 5., 9., 2.] {
let inp = Tensor::new(&[[inp, inp * 0.5]], cpu)?;
state = lstm.step(&inp, &state)?
}
let h = state.h();
let c = state.c();
assert_eq!(to_vec2_round(h, 4)?, &[[0.9919, 0.1738, -0.1451]]);
assert_eq!(to_vec2_round(c, 4)?, &[[5.725, 0.4458, -0.2908]]);
Ok(())
}
/* The following test can be verified against PyTorch using the following snippet.
import torch
from torch import nn
gru = nn.GRU(2, 3, 1)
gru.weight_ih_l0 = torch.nn.Parameter(torch.arange(0., 18.).reshape(9, 2).cos())
gru.weight_hh_l0 = torch.nn.Parameter(torch.arange(0., 27.).reshape(9, 3).sin())
gru.bias_ih_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1]))
gru.bias_hh_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1]).cos())
state = torch.zeros((1, 3))
for inp in [3., 1., 4., 1., 5., 9., 2.]:
inp = torch.tensor([[inp, inp * 0.5]])
_out, state = gru(inp, state)
print(state)
# tensor([[ 0.0579, 0.8836, -0.9991]], grad_fn=<SqueezeBackward1>)
*/
#[test]
fn gru() -> Result<()> {
let cpu = &Device::Cpu;
let w_ih = Tensor::arange(0f32, 18f32, cpu)?.reshape((9, 2))?;
let w_ih = w_ih.cos()?;
let w_hh = Tensor::arange(0f32, 27f32, cpu)?.reshape((9, 3))?;
let w_hh = w_hh.sin()?;
let b_ih = Tensor::new(&[-1f32, 1., -0.5, 2., -1., 1., -0.5, 2., -1.], cpu)?;
let b_hh = b_ih.cos()?;
let tensors: std::collections::HashMap<_, _> = [
("weight_ih_l0".to_string(), w_ih),
("weight_hh_l0".to_string(), w_hh),
("bias_ih_l0".to_string(), b_ih),
("bias_hh_l0".to_string(), b_hh),
]
.into_iter()
.collect();
let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, cpu);
let gru = candle_nn::gru(2, 3, Default::default(), vb)?;
let mut state = gru.zero_state(1)?;
for inp in [3f32, 1., 4., 1., 5., 9., 2.] {
let inp = Tensor::new(&[[inp, inp * 0.5]], cpu)?;
state = gru.step(&inp, &state)?
}
let h = state.h();
assert_eq!(to_vec2_round(h, 4)?, &[[0.0579, 0.8836, -0.9991]]);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/optim.rs | candle-nn/tests/optim.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::test_utils::{to_vec0_round, to_vec2_round};
use anyhow::Result;
use candle::{DType, Device, Tensor, Var};
use candle_nn::{AdamW, Linear, Module, Optimizer, ParamsAdamW, SGD};
#[test]
fn sgd_optim() -> Result<()> {
let x = Var::new(0f32, &Device::Cpu)?;
let mut sgd = SGD::new(vec![x.clone()], 0.1)?;
let xt = x.as_tensor();
for _step in 0..100 {
let loss = ((xt - 4.2)? * (xt - 4.2)?)?;
sgd.backward_step(&loss)?
}
assert_eq!(x.to_scalar::<f32>()?, 4.199999);
Ok(())
}
/* The results of this test have been checked against the following PyTorch code.
import torch
from torch import optim
w_gen = torch.tensor([[3., 1.]])
b_gen = torch.tensor([-2.])
sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]])
sample_ys = sample_xs.matmul(w_gen.t()) + b_gen
m = torch.nn.Linear(2, 1)
with torch.no_grad():
m.weight.zero_()
m.bias.zero_()
optimizer = optim.SGD(m.parameters(), lr=0.004, momentum=0.)
for _step in range(1000):
optimizer.zero_grad()
ys = m(sample_xs)
loss = ((ys - sample_ys)**2).sum()
loss.backward()
optimizer.step()
print(m.weight)
print(m.bias)
*/
#[test]
fn sgd_linear_regression() -> Result<()> {
// Generate some linear data, y = 3.x1 + x2 - 2.
let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?;
let b_gen = Tensor::new(-2f32, &Device::Cpu)?;
let gen = Linear::new(w_gen, Some(b_gen));
let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?;
let sample_ys = gen.forward(&sample_xs)?;
// Now use backprop to run a linear regression between samples and get the coefficients back.
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let mut sgd = SGD::new(vec![w.clone(), b.clone()], 0.004)?;
let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone()));
for _step in 0..1000 {
let ys = lin.forward(&sample_xs)?;
let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?;
sgd.backward_step(&loss)?;
}
assert_eq!(w.to_vec2::<f32>()?, &[[2.9983196, 0.99790204]]);
assert_eq!(b.to_scalar::<f32>()?, -1.9796902);
Ok(())
}
/* The following test returns the same values as the PyTorch code below.
import torch
from torch import optim
w_gen = torch.tensor([[3., 1.]])
b_gen = torch.tensor([-2.])
sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]])
sample_ys = sample_xs.matmul(w_gen.t()) + b_gen
m = torch.nn.Linear(2, 1)
with torch.no_grad():
m.weight.zero_()
m.bias.zero_()
optimizer = optim.AdamW(m.parameters(), lr=0.1)
for _step in range(100):
optimizer.zero_grad()
ys = m(sample_xs)
loss = ((ys - sample_ys)**2).sum()
loss.backward()
optimizer.step()
print(m.weight)
print(m.bias)
*/
#[test]
fn adamw_linear_regression() -> Result<()> {
let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?;
let b_gen = Tensor::new(-2f32, &Device::Cpu)?;
let gen = Linear::new(w_gen, Some(b_gen));
let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?;
let sample_ys = gen.forward(&sample_xs)?;
// Now use backprop to run a linear regression between samples and get the coefficients back.
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let params = ParamsAdamW {
lr: 0.1,
..Default::default()
};
let mut opt = AdamW::new(vec![w.clone(), b.clone()], params)?;
let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone()));
for _step in 0..100 {
let ys = lin.forward(&sample_xs)?;
let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?;
opt.backward_step(&loss)?;
}
assert_eq!(to_vec2_round(w.as_tensor(), 4)?, &[[2.7257, 0.7097]]);
assert_eq!(to_vec0_round(b.as_tensor(), 4)?, 0.7873);
Ok(())
}
#[test]
fn adamw_linear_regression_varmap() -> Result<()> {
use candle_nn::Init::Const;
// Similar as the previous test but using a VarMap.
let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?;
let b_gen = Tensor::new(-2f32, &Device::Cpu)?;
let gen = Linear::new(w_gen, Some(b_gen));
let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?;
let sample_ys = gen.forward(&sample_xs)?;
let mut var_map = candle_nn::VarMap::new();
let w = var_map.get((1, 2), "w", Const(0.), DType::F32, &Device::Cpu)?;
let b = var_map.get((), "b", Const(0.), DType::F32, &Device::Cpu)?;
let params = ParamsAdamW {
lr: 0.1,
..Default::default()
};
let mut opt = AdamW::new(var_map.all_vars(), params)?;
let lin = Linear::new(w, Some(b));
for _step in 0..100 {
let ys = lin.forward(&sample_xs)?;
let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?;
opt.backward_step(&loss)?;
}
assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[2.7257, 0.7097]]);
assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 0.7873);
var_map.set([("w", Tensor::zeros((1, 2), DType::F32, &Device::Cpu)?)].into_iter())?;
var_map.set([("b", Tensor::ones((), DType::F32, &Device::Cpu)?)].into_iter())?;
assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[0., 0.]]);
assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 1.);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/batch_norm.rs | candle-nn/tests/batch_norm.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use candle::{test_utils, DType, Device, Tensor};
use candle_nn::{batch_norm, BatchNorm, BatchNormConfig, VarBuilder, VarMap};
/* The test below has been generated using the following PyTorch code:
import torch
torch.manual_seed(19551105)
m = torch.nn.BatchNorm2d(5, affine=False)
input = torch.randn(2, 5, 3, 4)
output = m(input)
print(input.flatten())
print(output.flatten())
print(m.running_mean)
print(m.running_var)
*/
#[test]
fn batch_norm_test() -> Result<()> {
let running_mean = Tensor::zeros(5, DType::F32, &Device::Cpu)?;
let running_var = Tensor::ones(5, DType::F32, &Device::Cpu)?;
let bn = BatchNorm::new_no_bias(5, running_mean.clone(), running_var.clone(), 1e-8)?;
let input: [f32; 120] = [
-0.7493, -1.0410, 1.6977, -0.6579, 1.7982, -0.0087, 0.2812, -0.1190, 0.2908, -0.5975,
-0.0278, -0.2138, -1.3130, -1.6048, -2.2028, 0.9452, 0.4002, 0.0831, 1.0004, 0.1860,
0.5004, 0.5539, 0.9991, -0.2540, -0.0703, -0.3752, -0.1096, -0.2374, 1.0258, -2.2208,
-0.0257, 0.6073, -1.1627, -0.0964, -1.9718, 1.6577, 0.1931, -0.3692, -0.8011, 0.9059,
0.4797, 0.6521, -0.0165, -0.6683, -0.4148, 2.0649, -0.8276, 1.7947, -0.2061, 0.5812,
-1.3598, 1.6192, 1.0466, -0.4423, 0.4202, 0.1749, 0.6969, 0.2616, -0.0369, -1.4951,
-0.0814, -0.1877, 0.0267, 0.6150, 0.2402, -1.1440, -2.0068, 0.6032, -2.6639, 0.8260,
0.1085, -0.1693, 1.2805, 0.7654, -0.4930, 0.3770, 1.1309, 0.2303, 0.2949, -0.2634, -0.5225,
0.4269, 0.6341, 1.5736, 0.9827, -1.2499, 0.3509, -1.6243, -0.8123, 0.7634, -0.3047, 0.0143,
-0.4032, 0.0537, 0.7022, 0.8405, -1.2221, -1.6847, -0.0714, -0.1608, 0.5579, -1.5858,
0.4617, -0.6480, 0.1332, 0.0419, -0.9784, 0.4173, 1.2313, -1.9046, -0.1656, 0.1259, 0.0763,
1.4252, -0.9115, -0.1093, -0.3100, -0.6734, -1.4357, 0.9205,
];
let input = Tensor::new(&input, &Device::Cpu)?.reshape((2, 5, 3, 4))?;
let output = bn.forward_train(&input)?;
assert_eq!(output.dims(), &[2, 5, 3, 4]);
let output = output.flatten_all()?;
assert_eq!(
test_utils::to_vec1_round(&output, 4)?,
&[
-0.6391, -0.9414, 1.8965, -0.5444, 2.0007, 0.1283, 0.4287, 0.014, 0.4387, -0.4818,
0.1085, -0.0842, -1.6809, -2.0057, -2.6714, 0.8328, 0.2262, -0.1268, 0.8943, -0.0123,
0.3377, 0.3973, 0.8928, -0.5021, 0.0861, -0.2324, 0.0451, -0.0884, 1.2311, -2.1603,
0.1327, 0.7939, -1.055, 0.0589, -1.9002, 1.8912, 0.2918, -0.3253, -0.7993, 1.0741,
0.6063, 0.7955, 0.0617, -0.6536, -0.3754, 2.3461, -0.8284, 2.0495, -0.201, 0.6476,
-1.4446, 1.7665, 1.1493, -0.4556, 0.4741, 0.2097, 0.7723, 0.3031, -0.0186, -1.5905,
0.053, -0.0572, 0.165, 0.7746, 0.3862, -1.0481, -1.9422, 0.7624, -2.6231, 0.9933,
0.2498, -0.0381, 1.2061, 0.6327, -0.7681, 0.2004, 1.0396, 0.037, 0.109, -0.5125,
-0.8009, 0.2559, 0.4865, 1.5324, 1.1861, -1.1461, 0.5261, -1.5372, -0.689, 0.957,
-0.1587, 0.1745, -0.2616, 0.2156, 0.8931, 1.0375, -1.2614, -1.7691, 0.0015, -0.0966,
0.6921, -1.6605, 0.5866, -0.6313, 0.226, 0.1258, -0.9939, 0.5378, 1.3484, -2.0319,
-0.1574, 0.1568, 0.1034, 1.5574, -0.9614, -0.0967, -0.313, -0.7047, -1.5264, 1.0134
]
);
let bn2 = BatchNorm::new(
5,
running_mean,
running_var,
Tensor::new(&[0.5f32], &Device::Cpu)?.broadcast_as(5)?,
Tensor::new(&[-1.5f32], &Device::Cpu)?.broadcast_as(5)?,
1e-8,
)?;
let output2 = bn2.forward_train(&input)?;
assert_eq!(output2.dims(), &[2, 5, 3, 4]);
let output2 = output2.flatten_all()?;
let diff2 = ((output2 - (output * 0.5)?)? + 1.5)?.sqr()?;
let sum_diff2 = diff2.sum_keepdim(0)?;
assert_eq!(test_utils::to_vec1_round(&sum_diff2, 4)?, &[0f32]);
assert_eq!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
&[-0.0133, 0.0197, -0.0153, -0.0073, -0.0020]
);
assert_eq!(
test_utils::to_vec1_round(bn.running_var(), 4)?,
&[0.9972, 0.9842, 0.9956, 0.9866, 0.9898]
);
Ok(())
}
// This test makes sure that we can train a batch norm layer using a VarMap.
#[test]
fn train_batch_norm() -> Result<()> {
let vm = VarMap::new();
let vb = VarBuilder::from_varmap(&vm, DType::F32, &Device::Cpu);
let bn = batch_norm(1, BatchNormConfig::default(), vb)?;
// Get a copy of the original mean to ensure it is being updated.
let original_mean = bn.running_mean().detach().copy()?;
let var_map_mean = {
vm.data()
.lock()
.unwrap()
.get("running_mean")
.unwrap()
.clone()
};
// Ensure the var map mean is the same as the running mean.
assert_eq!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?,
);
// Train with a something guaranteed to be different from the running mean.
let mean_plus_one = {
let one = original_mean.ones_like()?;
original_mean.add(&one)?.reshape((1, 1))?
};
bn.forward_train(&mean_plus_one)?;
// Assert that the running mean has been updated.
assert_ne!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
test_utils::to_vec1_round(&original_mean, 4)?,
);
// Assert that the var map mean has been updated.
assert_eq!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?,
);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/loss.rs | candle-nn/tests/loss.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::test_utils::to_vec0_round;
use candle::{Device, Result, Tensor};
/* Equivalent python code:
import torch
import torch.nn.functional as F
input = torch.tensor([
[ 1.1050, 0.3013, -1.5394, -2.1528, -0.8634],
[ 1.0730, -0.9419, -0.1670, -0.6582, 0.5061],
[ 0.8318, 1.1154, -0.3610, 0.5351, 1.0830]])
target = torch.tensor([1, 0, 4])
print(F.nll_loss(F.log_softmax(input, dim=1), target))
print(F.cross_entropy(input, target))
*/
#[test]
fn nll_and_cross_entropy() -> Result<()> {
let cpu = Device::Cpu;
let input = Tensor::new(
&[
[1.1050f32, 0.3013, -1.5394, -2.1528, -0.8634],
[1.0730, -0.9419, -0.1670, -0.6582, 0.5061],
[0.8318, 1.1154, -0.3610, 0.5351, 1.0830],
],
&cpu,
)?;
let target = Tensor::new(&[1u32, 0, 4], &cpu)?;
let log_softmax = candle_nn::ops::log_softmax(&input, 1)?;
let loss = candle_nn::loss::nll(&log_softmax, &target)?;
assert_eq!(to_vec0_round(&loss, 4)?, 1.1312);
let loss = candle_nn::loss::cross_entropy(&input, &target)?;
assert_eq!(to_vec0_round(&loss, 4)?, 1.1312);
Ok(())
}
/* Equivalent python code:
import torch
import torch.nn.functional as F
inp = torch.Tensor([[ 2.3611, -0.8813, -0.5006, -0.2178],
[ 0.0419, 0.0763, -1.0457, -1.6692],
[-1.0494, 0.8111, 1.5723, 1.2315],
[ 1.3081, 0.6641, 1.1802, -0.2547],
[ 0.5292, 0.7636, 0.3692, -0.8318]])
target = torch.Tensor([[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
print(F.binary_cross_entropy_with_logits(inp, target))
*/
#[test]
fn binary_cross_entropy_with_logit() -> Result<()> {
let cpu = Device::Cpu;
let inp = [
[2.3611f32, -0.8813, -0.5006, -0.2178],
[0.0419, 0.0763, -1.0457, -1.6692],
[-1.0494, 0.8111, 1.5723, 1.2315],
[1.3081, 0.6641, 1.1802, -0.2547],
[0.5292, 0.7636, 0.3692, -0.8318],
];
let target = [
[0.0f32, 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.],
];
let inp = Tensor::new(&inp, &cpu)?;
let target = Tensor::new(&target, &cpu)?;
let loss = candle_nn::loss::binary_cross_entropy_with_logit(&inp, &target)?;
assert_eq!(to_vec0_round(&loss, 4)?, 0.8224);
Ok(())
}
/* Equivalent python code:
import torch
import torch.nn.functional as F
inp = torch.Tensor([[ 2.3611, -0.8813, -0.5006, -0.2178],
[ 0.0419, 0.0763, -1.0457, -1.6692],
[-1.0494, 0.8111, 1.5723, 1.2315],
[ 1.3081, 0.6641, 1.1802, -0.2547],
[ 0.5292, 0.7636, 0.3692, -0.8318]])
target = torch.Tensor([[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
print(F.huber_loss(inp, target))
print(F.huber_loss(inp,target,delta=0.88))
*/
#[test]
fn huber_loss() -> Result<()> {
let cpu = Device::Cpu;
let inp = [
[2.3611f32, -0.8813, -0.5006, -0.2178],
[0.0419, 0.0763, -1.0457, -1.6692],
[-1.0494, 0.8111, 1.5723, 1.2315],
[1.3081, 0.6641, 1.1802, -0.2547],
[0.5292, 0.7636, 0.3692, -0.8318],
];
let target = [
[0.0f32, 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.],
];
let inp = Tensor::new(&inp, &cpu)?;
let target = Tensor::new(&target, &cpu)?;
let loss = candle_nn::loss::huber(&inp, &target, 1.0)?;
assert_eq!(to_vec0_round(&loss, 4)?, 0.4734);
let loss = candle_nn::loss::huber(&inp, &target, 0.88)?;
assert_eq!(to_vec0_round(&loss, 4)?, 0.4483);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/group_norm.rs | candle-nn/tests/group_norm.rs | /* Equivalent PyTorch code.
import torch
from torch.nn.functional import group_norm
t = torch.tensor(
[[[-0.3034, 0.2726, -0.9659],
[-1.1845, -1.3236, 0.0172],
[ 1.9507, 1.2554, -0.8625],
[ 1.0682, 0.3604, 0.3985],
[-0.4957, -0.4461, -0.9721],
[ 1.5157, -0.1546, -0.5596]],
[[-1.6698, -0.4040, -0.7927],
[ 0.3736, -0.0975, -0.1351],
[-0.9461, 0.5461, -0.6334],
[-1.0919, -0.1158, 0.1213],
[-0.9535, 0.1281, 0.4372],
[-0.2845, 0.3488, 0.5641]]])
print(group_norm(t, num_groups=2))
print(group_norm(t, num_groups=3))
*/
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use candle::test_utils::to_vec3_round;
use candle::{Device, Tensor};
use candle_nn::{GroupNorm, Module};
#[test]
fn group_norm() -> Result<()> {
let device = &Device::Cpu;
let w = Tensor::from_vec(vec![1f32; 6], 6, device)?;
let b = Tensor::from_vec(vec![0f32; 6], 6, device)?;
let gn2 = GroupNorm::new(w.clone(), b.clone(), 6, 2, 1e-5)?;
let gn3 = GroupNorm::new(w, b, 6, 3, 1e-5)?;
let input = Tensor::new(
&[
[
[-0.3034f32, 0.2726, -0.9659],
[-1.1845, -1.3236, 0.0172],
[1.9507, 1.2554, -0.8625],
[1.0682, 0.3604, 0.3985],
[-0.4957, -0.4461, -0.9721],
[1.5157, -0.1546, -0.5596],
],
[
[-1.6698, -0.4040, -0.7927],
[0.3736, -0.0975, -0.1351],
[-0.9461, 0.5461, -0.6334],
[-1.0919, -0.1158, 0.1213],
[-0.9535, 0.1281, 0.4372],
[-0.2845, 0.3488, 0.5641],
],
],
device,
)?;
assert_eq!(
to_vec3_round(&gn2.forward(&input)?, 4)?,
&[
[
[-0.1653, 0.3748, -0.7866],
[-0.9916, -1.1220, 0.1353],
[1.9485, 1.2965, -0.6896],
[1.2769, 0.3628, 0.4120],
[-0.7427, -0.6786, -1.3578],
[1.8547, -0.3022, -0.8252]
],
[
[-1.9342, 0.0211, -0.5793],
[1.2223, 0.4945, 0.4365],
[-0.8163, 1.4887, -0.3333],
[-1.7960, -0.0392, 0.3875],
[-1.5469, 0.3998, 0.9561],
[-0.3428, 0.7970, 1.1845]
]
]
);
assert_eq!(
to_vec3_round(&gn3.forward(&input)?, 4)?,
&[
[
[0.4560, 1.4014, -0.6313],
[-0.9901, -1.2184, 0.9822],
[1.4254, 0.6360, -1.7682],
[0.4235, -0.3800, -0.3367],
[-0.3890, -0.3268, -0.9862],
[2.1325, 0.0386, -0.4691]
],
[
[-1.8797, 0.0777, -0.5234],
[1.2802, 0.5517, 0.4935],
[-1.0102, 1.5327, -0.4773],
[-1.2587, 0.4047, 0.8088],
[-1.9074, 0.1691, 0.7625],
[-0.6230, 0.5928, 1.0061]
]
]
);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/kv_cache.rs | candle-nn/tests/kv_cache.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{Device, Result, Tensor};
#[test]
fn kv_cache() -> Result<()> {
let mut cache = candle_nn::kv_cache::Cache::new(0, 16);
for _ in [0, 1] {
assert_eq!(cache.current_seq_len(), 0);
let data = cache.current_data()?;
assert!(data.is_none());
let t = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?;
cache.append(&t)?;
let data = cache.current_data()?.unwrap();
assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3.]);
let t = Tensor::new(&[4f32], &Device::Cpu)?;
cache.append(&t)?;
let data = cache.current_data()?.unwrap();
assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3., 4.]);
let t = Tensor::new(&[0f32, 5., 6., 7.], &Device::Cpu)?;
cache.append(&t)?;
let data = cache.current_data()?.unwrap();
assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3., 4., 0., 5., 6., 7.]);
assert_eq!(cache.current_seq_len(), 8);
cache.reset();
}
Ok(())
}
#[test]
fn rotating_kv_cache() -> Result<()> {
let mut cache = candle_nn::kv_cache::RotatingCache::new(0, 6);
for _ in [0, 1] {
assert_eq!(cache.offset(), 0);
assert_eq!(cache.current_seq_len(), 0);
let data = cache.current_data()?;
assert!(data.is_none());
assert_eq!(cache.positions(1), &[0]);
assert_eq!(cache.positions(2), &[0, 1]);
let t = Tensor::new(&[1., 2., 3.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [1., 2., 3.]);
assert_eq!(cache.positions(0), &[0, 1, 2]);
assert_eq!(cache.positions(1), &[0, 1, 2, 3]);
assert_eq!(cache.positions(2), &[0, 1, 2, 3, 4]);
assert_eq!(cache.positions(3), &[0, 1, 2, 3, 4, 5]);
assert_eq!(cache.positions(4), &[6, 1, 2, 3, 4, 5]);
let t = Tensor::new(&[4.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [1., 2., 3., 4.]);
let t = Tensor::new(&[0., 5., 6., 7.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [6., 7., 3., 4., 0., 5.]);
assert_eq!(cache.current_seq_len(), 8);
assert_eq!(cache.offset(), 2);
let t = Tensor::new(&[8.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [6., 7., 8., 4., 0., 5.]);
assert_eq!(cache.current_seq_len(), 9);
assert_eq!(cache.offset(), 3);
let t = Tensor::new(&[9., 10., 11.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [6., 7., 8., 9., 10., 11.]);
assert_eq!(cache.current_seq_len(), 12);
assert_eq!(cache.offset(), 0);
let t = Tensor::new(&[12.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [12., 7., 8., 9., 10., 11.]);
assert_eq!(cache.current_seq_len(), 13);
assert_eq!(cache.offset(), 1);
let mask = cache.attn_mask(2, &Device::Cpu)?.unwrap();
assert_eq!(
mask.to_vec2::<u8>()?,
&[[0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
);
let mask = cache.attn_mask(3, &Device::Cpu)?.unwrap();
assert_eq!(
mask.to_vec2::<u8>()?,
&[[0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0]],
);
assert_eq!(cache.positions(0), &[12, 7, 8, 9, 10, 11]);
assert_eq!(cache.positions(2), &[12, 13, 14, 9, 10, 11]);
assert_eq!(cache.positions(3), &[12, 13, 14, 15, 10, 11]);
assert_eq!(cache.positions(8), &[13, 14, 15, 16, 17, 18, 19, 20]);
let t = Tensor::new(&[0., 1., 2., 3., 4., 5., 6., 7., 8.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [0., 1., 2., 3., 4., 5., 6., 7., 8.]);
assert_eq!(cache.current_seq_len(), 22);
assert_eq!(cache.offset(), 0);
assert_eq!(cache.positions(0), &[16, 17, 18, 19, 20, 21]);
assert_eq!(cache.positions(1), &[22, 17, 18, 19, 20, 21]);
let mask = cache.attn_mask(1, &Device::Cpu)?;
assert!(mask.is_none());
let mask = cache.attn_mask(2, &Device::Cpu)?.unwrap();
assert_eq!(
mask.to_vec2::<u8>()?,
&[[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
);
let mask = cache.attn_mask(3, &Device::Cpu)?.unwrap();
assert_eq!(
mask.to_vec2::<u8>()?,
&[[0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
);
let t = Tensor::new(&[42.], &Device::Cpu)?;
let data = cache.append(&t)?;
assert_eq!(data.to_vec1::<f64>()?, [42., 4., 5., 6., 7., 8.]);
assert_eq!(cache.current_seq_len(), 23);
assert_eq!(cache.offset(), 1);
cache.reset();
}
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/ops.rs | candle-nn/tests/ops.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{test_device, test_utils::to_vec3_round, Device, IndexOp, Result, Tensor};
fn softmax(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let t0 = candle_nn::ops::softmax(&tensor.log()?, 0)?;
let t1 = candle_nn::ops::softmax(&tensor.log()?, 1)?;
let t2 = candle_nn::ops::softmax(&tensor.log()?, 2)?;
assert_eq!(
to_vec3_round(&t0, 4)?,
&[
// 3/5, 1/2, 4/11
[[0.6, 0.5, 0.3636], [0.1111, 0.7143, 0.5294]],
// 2/5, 1/2, 7/11
[[0.4, 0.5, 0.6364], [0.8889, 0.2857, 0.4706]]
]
);
assert_eq!(
to_vec3_round(&t1, 4)?,
&[
// 3/4, 1/6, 4/13
[[0.75, 0.1667, 0.3077], [0.25, 0.8333, 0.6923]],
// 2/10, 1/3, 7/15
[[0.2, 0.3333, 0.4667], [0.8, 0.6667, 0.5333]]
]
);
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
// (3, 1, 4) / 8, (1, 5, 9) / 15
[[0.375, 0.125, 0.5], [0.0667, 0.3333, 0.6]],
// (2, 1, 7) / 10, (8, 2, 8) / 18
[[0.2, 0.1, 0.7], [0.4444, 0.1111, 0.4444]]
]
);
let t2 = candle_nn::ops::softmax_last_dim(&tensor.log()?)?;
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
// (3, 1, 4) / 8, (1, 5, 9) / 15
[[0.375, 0.125, 0.5], [0.0667, 0.3333, 0.6]],
// (2, 1, 7) / 10, (8, 2, 8) / 18
[[0.2, 0.1, 0.7], [0.4444, 0.1111, 0.4444]]
]
);
Ok(())
}
fn rms_norm(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let alpha = Tensor::new(&[1f32, 2f32, 3f32], device)?;
let t = candle_nn::ops::rms_norm(&tensor, &alpha, 1e-5)?;
assert_eq!(
to_vec3_round(&t, 4)?,
&[
[[1.019, 0.6794, 4.0762], [0.1674, 1.6744, 4.521]],
[[0.4714, 0.4714, 4.9497], [1.206, 0.603, 3.6181]]
]
);
let t2 = candle_nn::ops::rms_norm_slow(&tensor, &alpha, 1e-5)?;
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
[[1.019, 0.6794, 4.0762], [0.1674, 1.6744, 4.521]],
[[0.4714, 0.4714, 4.9497], [1.206, 0.603, 3.6181]]
]
);
let diff = (t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
fn rms_norml(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, seq_len, head_dim) = (24, 70, 64);
let el_count = b_size * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.random::<f32>()).collect();
let tensor = Tensor::new(src, device)?.reshape((b_size, seq_len, head_dim))?;
let alpha = Tensor::ones(head_dim, candle::DType::F32, device)?;
let t = candle_nn::ops::rms_norm(&tensor, &alpha, 1e-5)?;
let t2 = candle_nn::ops::rms_norm_slow(&tensor, &alpha, 1e-5)?;
let diff = (t - t2)?
.abs()?
.flatten_all()?
.max(0)?
.reshape(())?
.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
fn layer_norm(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let alpha = Tensor::new(&[1f32, 2f32, 3f32], device)?;
let beta = Tensor::new(&[0.5f32, 0f32, -0.2f32], device)?;
let t = candle_nn::ops::layer_norm(&tensor, &alpha, &beta, 1e-5)?;
assert_eq!(
to_vec3_round(&t, 4)?,
&[
[[0.7673, -2.6726, 3.0071], [-0.7247, 0.0, 3.4742]],
[[-0.008, -1.778, 3.991], [1.2071, -2.8284, 1.9213]]
]
);
let t2 = candle_nn::ops::layer_norm_slow(&tensor, &alpha, &beta, 1e-5)?;
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
[[0.7673, -2.6726, 3.0071], [-0.7247, 0.0, 3.4742]],
[[-0.008, -1.778, 3.991], [1.2071, -2.8284, 1.9213]]
]
);
let diff = (t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
fn layer_norml(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, seq_len, head_dim) = (24, 70, 64);
let el_count = b_size * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.random::<f32>()).collect();
let tensor = Tensor::new(src, device)?.reshape((b_size, seq_len, head_dim))?;
let alpha = Tensor::ones(head_dim, candle::DType::F32, device)?;
let beta = Tensor::zeros(head_dim, candle::DType::F32, device)?;
let t = candle_nn::ops::layer_norm(&tensor, &alpha, &beta, 1e-5)?;
let t2 = candle_nn::ops::layer_norm_slow(&tensor, &alpha, &beta, 1e-5)?;
let diff = (t - t2)?
.abs()?
.flatten_all()?
.max(0)?
.reshape(())?
.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
#[test]
fn softmax_numerical_stability() -> Result<()> {
let dev = &Device::Cpu;
let xs = Tensor::new(&[1234f32, 0.], dev)?;
let softmax = candle_nn::ops::softmax(&xs, 0)?;
assert_eq!(softmax.to_vec1::<f32>()?, &[1f32, 0.]);
Ok(())
}
fn ropei(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, num_head, seq_len, head_dim) = (2, 5, 10, 16);
let el_count = b_size * num_head * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.random::<f32>()).collect();
let cos: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let sin: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let src = Tensor::from_vec(src, (b_size, num_head, seq_len, head_dim), device)?;
let cos = Tensor::from_vec(cos, (seq_len, head_dim / 2), device)?;
let sin = Tensor::from_vec(sin, (seq_len, head_dim / 2), device)?;
let rope1 = candle_nn::rotary_emb::rope_i(&src, &cos, &sin)?;
let rope2 = candle_nn::rotary_emb::rope_i_slow(&src, &cos, &sin)?;
let sum_diff = (rope1 - rope2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
if device.is_cpu() {
assert_eq!(sum_diff, 0.);
} else {
assert!(sum_diff < 1e-4);
}
// Test with a 3d cos/sin
let cos2: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let sin2: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let cos2 = Tensor::from_vec(cos2, (seq_len, head_dim / 2), device)?;
let sin2 = Tensor::from_vec(sin2, (seq_len, head_dim / 2), device)?;
let rope1 = candle_nn::rotary_emb::rope_i(&src.i(0..1)?, &cos, &sin)?;
let rope2 = candle_nn::rotary_emb::rope_i(&src.i(1..2)?, &cos2, &sin2)?;
let both_cos = Tensor::stack(&[cos, cos2], 0)?;
let both_sin = Tensor::stack(&[sin, sin2], 0)?;
let both_rope = candle_nn::rotary_emb::rope_i(&src, &both_cos, &both_sin)?;
let both_rope2 = Tensor::cat(&[rope1, rope2], 0)?;
let sum_diff = (both_rope - both_rope2)?
.abs()?
.sum_all()?
.to_vec0::<f32>()?;
assert_eq!(sum_diff, 0.);
Ok(())
}
fn rope(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, num_head, seq_len, head_dim) = (2, 5, 10, 16);
let el_count = b_size * num_head * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.random::<f32>()).collect();
let cos: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let sin: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let src = Tensor::from_vec(src, (b_size, num_head, seq_len, head_dim), device)?;
let cos = Tensor::from_vec(cos, (seq_len, head_dim / 2), device)?;
let sin = Tensor::from_vec(sin, (seq_len, head_dim / 2), device)?;
let rope1 = candle_nn::rotary_emb::rope(&src, &cos, &sin)?;
let rope2 = candle_nn::rotary_emb::rope_slow(&src, &cos, &sin)?;
let sum_diff = (rope1 - rope2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
if device.is_cpu() {
assert_eq!(sum_diff, 0.);
} else {
assert!(sum_diff < 1e-4);
}
// Test with a 3d cos/sin
let cos2: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let sin2: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let cos2 = Tensor::from_vec(cos2, (seq_len, head_dim / 2), device)?;
let sin2 = Tensor::from_vec(sin2, (seq_len, head_dim / 2), device)?;
let rope1 = candle_nn::rotary_emb::rope(&src.i(0..1)?, &cos, &sin)?;
let rope2 = candle_nn::rotary_emb::rope(&src.i(1..2)?, &cos2, &sin2)?;
let both_cos = Tensor::stack(&[cos, cos2], 0)?;
let both_sin = Tensor::stack(&[sin, sin2], 0)?;
let both_rope = candle_nn::rotary_emb::rope(&src, &both_cos, &both_sin)?;
let both_rope2 = Tensor::cat(&[rope1, rope2], 0)?;
let sum_diff = (both_rope - both_rope2)?
.abs()?
.sum_all()?
.to_vec0::<f32>()?;
assert_eq!(sum_diff, 0.);
Ok(())
}
fn rope_thd(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, num_head, seq_len, head_dim) = (2, 5, 10, 16);
let el_count = b_size * num_head * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.random::<f32>()).collect();
let cos: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let sin: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let src = Tensor::from_vec(src, (b_size, num_head, seq_len, head_dim), device)?;
let cos = Tensor::from_vec(cos, (seq_len, head_dim / 2), device)?;
let sin = Tensor::from_vec(sin, (seq_len, head_dim / 2), device)?;
let rope1 = {
let src = src.transpose(1, 2)?.contiguous()?;
candle_nn::rotary_emb::rope_thd(&src, &cos, &sin)?.transpose(1, 2)?
};
let rope2 = candle_nn::rotary_emb::rope_slow(&src, &cos, &sin)?;
let sum_diff = (rope1 - rope2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
if device.is_cpu() {
assert_eq!(sum_diff, 0.);
} else {
assert!(sum_diff < 1e-4);
}
// Test with a 3d cos/sin
let cos2: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let sin2: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.random::<f32>())
.collect();
let cos2 = Tensor::from_vec(cos2, (seq_len, head_dim / 2), device)?;
let sin2 = Tensor::from_vec(sin2, (seq_len, head_dim / 2), device)?;
let rope1 = {
let src = src.transpose(1, 2)?.contiguous()?;
candle_nn::rotary_emb::rope_thd(&src.i(0..1)?, &cos, &sin)?
};
let rope2 = {
let src = src.transpose(1, 2)?.contiguous()?;
candle_nn::rotary_emb::rope_thd(&src.i(1..2)?, &cos2, &sin2)?
};
let both_cos = Tensor::stack(&[cos, cos2], 0)?;
let both_sin = Tensor::stack(&[sin, sin2], 0)?;
let both_rope = {
let src = src.transpose(1, 2)?.contiguous()?;
candle_nn::rotary_emb::rope_thd(&src, &both_cos, &both_sin)?
};
let both_rope2 = Tensor::cat(&[rope1, rope2], 0)?;
let sum_diff = (both_rope - both_rope2)?
.abs()?
.sum_all()?
.to_vec0::<f32>()?;
assert_eq!(sum_diff, 0.);
Ok(())
}
fn sigmoid(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let s1 = candle_nn::ops::sigmoid(&tensor)?;
let s2 = (1. / (1. + tensor.neg()?.exp()?)?)?;
let diff = (s1 - s2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
Ok(())
}
test_device!(ropei, ropei_cpu, ropei_gpu, ropei_metal);
test_device!(rope, rope_cpu, rope_gpu, rope_metal);
test_device!(rope_thd, rope_thd_cpu, rope_thd_gpu, rope_thd_metal);
test_device!(softmax, softmax_cpu, softmax_gpu, softmax_metal);
test_device!(rms_norm, rms_norm_cpu, rms_norm_gpu, rms_norm_metal);
test_device!(rms_norml, rms_norml_cpu, rms_norml_gpu, rms_norml_metal);
test_device!(layer_norm, ln_cpu, ln_gpu, ln_metal);
test_device!(layer_norml, lnl_cpu, lnl_gpu, lnl_metal);
test_device!(sigmoid, sigmoid_cpu, sigmoid_gpu, sigmoid_metal);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/tests/cpu_flash_attn.rs | candle-nn/tests/cpu_flash_attn.rs | use candle::{DType, Device, Result, Tensor};
use candle_nn::cpu_flash_attention::run_flash_attn_cpu;
#[test]
fn cpu_flash_attn() -> Result<()> {
let b = 1;
let s = 2;
let h = 1;
let d = 4;
let softmax_scale = 1.0f32 / (d as f32).sqrt();
let q = Tensor::randn(0f32, 1f32, (b, h, s, d), &Device::Cpu)?;
let k = Tensor::randn(0f32, 1f32, (b, h, s, d), &Device::Cpu)?;
let v = Tensor::randn(0f32, 1f32, (b, h, s, d), &Device::Cpu)?;
// SDPA needs (b,h,s,d)
let ground_truth = {
let att = (q.clone() * softmax_scale as f64)?.matmul(&k.clone().t()?)?;
let att =
candle_nn::ops::softmax_last_dim(&att.to_dtype(DType::F32)?)?.to_dtype(q.dtype())?;
att.matmul(&v.clone())?
};
// Flash attn needs (b,s,h,d)
let out = run_flash_attn_cpu::<f32>(
&q.transpose(1, 2)?,
&k.transpose(1, 2)?,
&v.transpose(1, 2)?,
None,
softmax_scale,
None,
None,
)?;
let out_arr: Vec<f32> = out.flatten_all()?.to_vec1()?;
let ground_truth_arr: Vec<f32> = ground_truth.flatten_all()?.to_vec1()?;
for (a, b) in out_arr.iter().zip(ground_truth_arr.iter()) {
assert!((a - b).abs() < 1e-5, "{a} {b}");
}
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/benches/bench_main.rs | candle-nn/benches/bench_main.rs | mod benchmarks;
use criterion::criterion_main;
criterion_main!(
benchmarks::softmax::benches,
benchmarks::layer_norm::benches,
benchmarks::conv::benches
);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/benches/benchmarks/layer_norm.rs | candle-nn/benches/benchmarks/layer_norm.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle::{DType, Device, Module, Tensor};
use candle_nn::LayerNorm;
use criterion::{criterion_group, Criterion};
use std::hint::black_box;
use std::time::Instant;
fn run(input: &Tensor, weight: &Tensor, bias: &Tensor) {
let _ = LayerNorm::new(weight.clone(), bias.clone(), 1e-5).forward(input);
}
const B: usize = 1;
const M: usize = 1024;
const K: usize = 1024;
fn run_layer_norm_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let elements = B * M * K;
let weight = Tensor::arange(0.0, elements as f32, device)
.unwrap()
.to_dtype(dtype)
.unwrap();
let bias = weight.ones_like().unwrap();
let input = weight.ones_like().unwrap();
let mut group = c.benchmark_group(device.bench_name(name));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&input), black_box(&weight), black_box(&bias));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let device = BenchDeviceHandler::new().unwrap();
for d in device.devices {
run_layer_norm_benchmark(c, &d, DType::F32, "layer_norm_f32");
run_layer_norm_benchmark(c, &d, DType::BF16, "layer_norm_bf16");
run_layer_norm_benchmark(c, &d, DType::F16, "layer_norm_f16");
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/benches/benchmarks/softmax.rs | candle-nn/benches/benchmarks/softmax.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle::{DType, Device, Tensor};
use candle_nn::ops::softmax_last_dim;
use criterion::Throughput;
use criterion::{criterion_group, Criterion};
use std::hint::black_box;
use std::time::Instant;
fn run(input: &Tensor) {
let _ = softmax_last_dim(input).unwrap();
}
const B: usize = 1;
const M: usize = 1024;
const K: usize = 1024;
fn run_softmax_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let elements = B * M * K;
let input = Tensor::rand(-1000.0f32, 1000.0f32, (B, M, K), device)
.unwrap()
.to_dtype(dtype)
.unwrap();
let flops = elements * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&input));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let device = BenchDeviceHandler::new().unwrap();
for d in device.devices {
run_softmax_benchmark(c, &d, DType::F32, "softmax_f32");
run_softmax_benchmark(c, &d, DType::BF16, "softmax_bf16");
run_softmax_benchmark(c, &d, DType::F16, "softmax_f16");
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/benches/benchmarks/mod.rs | candle-nn/benches/benchmarks/mod.rs | pub(crate) mod conv;
pub(crate) mod layer_norm;
pub(crate) mod softmax;
use candle::{Device, Result};
pub(crate) trait BenchDevice {
fn sync(&self) -> Result<()>;
fn bench_name<S: Into<String>>(&self, name: S) -> String;
}
impl BenchDevice for Device {
fn sync(&self) -> Result<()> {
match self {
Device::Cpu => Ok(()),
Device::Cuda(device) => {
#[cfg(feature = "cuda")]
{
use candle::backend::BackendDevice;
return Ok(device.synchronize()?);
}
#[cfg(not(feature = "cuda"))]
panic!("Cuda device without cuda feature enabled: {device:?}")
}
Device::Metal(device) => {
#[cfg(feature = "metal")]
return device.wait_until_completed();
#[cfg(not(feature = "metal"))]
panic!("Metal device without metal feature enabled: {device:?}")
}
}
}
fn bench_name<S: Into<String>>(&self, name: S) -> String {
match self {
Device::Cpu => {
let cpu_type = if cfg!(feature = "accelerate") {
"accelerate"
} else if cfg!(feature = "mkl") {
"mkl"
} else {
"cpu"
};
format!("{}_{}", cpu_type, name.into())
}
Device::Cuda(_) => format!("cuda_{}", name.into()),
Device::Metal(_) => format!("metal_{}", name.into()),
}
}
}
struct BenchDeviceHandler {
devices: Vec<Device>,
}
impl BenchDeviceHandler {
pub fn new() -> Result<Self> {
let mut devices = Vec::new();
if cfg!(feature = "metal") {
devices.push(Device::new_metal(0)?);
} else if cfg!(feature = "cuda") {
devices.push(Device::new_cuda(0)?);
}
devices.push(Device::Cpu);
Ok(Self { devices })
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/benches/benchmarks/conv.rs | candle-nn/benches/benchmarks/conv.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle::{DType, Device, Module, Tensor};
use candle_nn::{Conv2d, Conv2dConfig};
use criterion::{criterion_group, Criterion};
use std::hint::black_box;
use std::time::Instant;
const B: usize = 1;
const C: usize = 1;
fn run(input: Tensor, weight: Tensor, bias: Option<Tensor>, config: Conv2dConfig) {
Conv2d::new(weight, bias, config).forward(&input).unwrap();
}
fn run_conv2d_benchmark(
c: &mut Criterion,
device: &Device,
dtype: DType,
k_size: usize,
m: usize,
bias: bool,
) {
let weight = Tensor::ones((1, C, k_size, k_size), dtype, device)
.unwrap()
.to_dtype(dtype)
.unwrap();
let bias_t = if bias {
Some(Tensor::zeros(m, dtype, device).unwrap())
} else {
None
};
let input = Tensor::ones((B, C, m, m), dtype, device).unwrap();
let name = format!(
"conv2d_{dtype:?}_i{m}_k{k_size}x{k_size}_{}",
if bias { "b" } else { "nb" }
);
let mut group = c.benchmark_group(device.bench_name(name));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(
black_box(input.clone()),
black_box(weight.clone()),
black_box(bias_t.clone()),
Default::default(),
);
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let device = BenchDeviceHandler::new().unwrap();
for d in device.devices {
run_conv2d_benchmark(c, &d, DType::F32, 3, 128, true);
run_conv2d_benchmark(c, &d, DType::F32, 1, 128, false);
run_conv2d_benchmark(c, &d, DType::F32, 5, 128, false);
run_conv2d_benchmark(c, &d, DType::F32, 3, 512, false);
run_conv2d_benchmark(c, &d, DType::F16, 3, 128, true);
run_conv2d_benchmark(c, &d, DType::F16, 1, 128, false);
run_conv2d_benchmark(c, &d, DType::F16, 5, 128, false);
run_conv2d_benchmark(c, &d, DType::F16, 5, 512, false);
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/examples/basic_optimizer.rs | candle-nn/examples/basic_optimizer.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{DType, Device, Result, Tensor};
use candle_nn::{linear, AdamW, Linear, Module, Optimizer, ParamsAdamW, VarBuilder, VarMap};
fn gen_data() -> Result<(Tensor, Tensor)> {
// Generate some sample linear data.
let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?;
let b_gen = Tensor::new(-2f32, &Device::Cpu)?;
let gen = Linear::new(w_gen, Some(b_gen));
let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?;
let sample_ys = gen.forward(&sample_xs)?;
Ok((sample_xs, sample_ys))
}
fn main() -> Result<()> {
let (sample_xs, sample_ys) = gen_data()?;
// Use backprop to run a linear regression between samples and get the coefficients back.
let varmap = VarMap::new();
let vb = VarBuilder::from_varmap(&varmap, DType::F32, &Device::Cpu);
let model = linear(2, 1, vb.pp("linear"))?;
let params = ParamsAdamW {
lr: 0.1,
..Default::default()
};
let mut opt = AdamW::new(varmap.all_vars(), params)?;
for step in 0..10000 {
let ys = model.forward(&sample_xs)?;
let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?;
opt.backward_step(&loss)?;
println!("{step} {}", loss.to_vec0::<f32>()?);
}
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-nn/examples/cpu_benchmarks.rs | candle-nn/examples/cpu_benchmarks.rs | /// This example contains some simple benchmarks so that it's easy to run them in perf etc.
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::quantized::GgmlType;
use candle::{CpuStorage, Device, Layout, Module, Result, Shape, Tensor, D};
use clap::{Parser, Subcommand};
const CHECK_CONV2D: bool = false;
trait Benchmark {
type PreProcessData;
type RunResult;
fn preprocess() -> Result<Self::PreProcessData>;
fn run_one(_: &Self::PreProcessData) -> Result<Self::RunResult>;
const ITERS: usize;
}
struct Im2Col {
h_k: usize,
w_k: usize,
stride: usize,
dilation: usize,
padding: usize,
}
impl Im2Col {
fn hw_out(&self, h: usize, w: usize) -> (usize, usize) {
let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1;
let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1;
(h_out, w_out)
}
}
impl candle::CustomOp1 for Im2Col {
fn name(&self) -> &'static str {
"im2col"
}
fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> {
let &Self {
h_k,
w_k,
stride,
dilation,
padding,
} = self;
let (b, c, h, w) = layout.shape().dims4()?;
let (h_out, w_out) = self.hw_out(h, w);
let slice = storage.as_slice::<f32>()?;
let src = &slice[layout.start_offset()..];
let mut dst = vec![0f32; b * h_out * w_out * c * h_k * w_k];
let (src_s0, src_s1, src_s2, src_s3) = {
let s = layout.stride();
(s[0], s[1], s[2], s[3])
};
// TODO: provide specialized kernels for the common use cases.
// - h_k = w_k = 1
// - padding = 0
// - stride = 1
// - dilation = 1
for b_idx in 0..b {
let src_idx = b_idx * src_s0;
let dst_idx = b_idx * h_out * w_out * c * h_k * w_k;
for h_idx in 0..h_out {
let dst_idx = dst_idx + h_idx * w_out * c * h_k * w_k;
for w_idx in 0..w_out {
let dst_idx = dst_idx + w_idx * c * h_k * w_k;
for c_idx in 0..c {
let dst_idx = dst_idx + c_idx * h_k * w_k;
let src_idx = c_idx * src_s1 + src_idx;
for h_k_idx in 0..h_k {
let src_h = h_idx * stride + h_k_idx * dilation;
if padding != 0 && (src_h < padding || src_h >= h + padding) {
continue;
}
let src_h = src_h - padding;
let src_idx = src_idx + src_h * src_s2;
let dst_idx = dst_idx + h_k_idx * w_k;
for w_k_idx in 0..w_k {
let src_w = w_idx * stride + w_k_idx * dilation;
if padding != 0 && (src_w < padding || src_w >= w + padding) {
continue;
}
let src_w = src_w - padding;
let src_idx = src_idx + src_w * src_s3;
let dst_idx = dst_idx + w_k_idx;
dst[dst_idx] = src[src_idx]
}
}
}
}
}
}
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, (b * h_out * w_out, c * h_k * w_k).into()))
}
}
// Conv1d example as used in whisper.
struct Conv1d;
impl Benchmark for Conv1d {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let inp = Tensor::randn(0f32, 1., (1, 384, 3000), &Device::Cpu)?;
let w = Tensor::randn(0f32, 1., (384, 384, 3), &Device::Cpu)?;
Ok((inp, w))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.conv1d(&d.1, 0, 1, 1, 1)
}
const ITERS: usize = 5;
}
// Conv2d example as used in stable-diffusion.
struct Conv2d;
impl Benchmark for Conv2d {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?;
let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?;
Ok((inp, w))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.conv2d(&d.1, 0, 1, 1, 1)
}
const ITERS: usize = 5;
}
// Conv2d example as used in stable-diffusion, im2col implementation.
struct Conv2dIm2Col;
impl Benchmark for Conv2dIm2Col {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?;
let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?;
Ok((inp, w))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
// d.0.conv2d(&d.1, 0, 1, 1, 1)
let (b, _, h, w) = d.0.dims4()?;
let (_, _, h_k, w_k) = d.1.dims4()?;
let op = Im2Col {
h_k,
w_k,
stride: 1,
dilation: 1,
padding: 0,
};
let (h_out, w_out) = op.hw_out(h, w);
let col = d.0.apply_op1_no_bwd(&op)?;
let res = col.matmul(&d.1.flatten_from(1)?.t()?)?;
let res = res
.reshape((b, h_out, w_out, ()))?
.permute((0, 3, 1, 2))?
.contiguous()?;
if CHECK_CONV2D {
let res2 = d.0.conv2d(&d.1, op.padding, op.stride, op.dilation, 1);
let diff = (&res - res2)?.sqr()?.mean_all()?;
println!("{diff}");
}
Ok(res)
}
const ITERS: usize = 5;
}
struct MatMul;
impl Benchmark for MatMul {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let lhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?;
let rhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?;
Ok((lhs, rhs))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.matmul(&d.1)
}
const ITERS: usize = 100;
}
struct MatVec;
impl Benchmark for MatVec {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let lhs = Tensor::randn(0f32, 1., (1024 * 4, 1024 * 4), &Device::Cpu)?;
let rhs = Tensor::randn(0f32, 1., (1024 * 4, 1), &Device::Cpu)?;
Ok((lhs, rhs))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.matmul(&d.1)
}
const ITERS: usize = 100;
}
// This benchmark is similar to:
// https://github.com/ggerganov/llama.cpp/blob/master/examples/benchmark/benchmark-matmult.cpp
struct QMatMul;
impl Benchmark for QMatMul {
type PreProcessData = (candle::quantized::QMatMul, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let zeros = vec![candle::quantized::k_quants::BlockQ4_0::zeros(); 4096 * 11008 / 32];
let mm = candle::quantized::QTensor::new(
candle::quantized::QStorage::Cpu(Box::new(zeros)),
(4096, 11008),
)?;
let mm = candle::quantized::QMatMul::from_qtensor(mm)?;
let arg = Tensor::randn(0f32, 1., (128, 11008), &Device::Cpu)?;
Ok((mm, arg))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.forward(&d.1)
}
const ITERS: usize = 100;
}
struct Cat;
impl Benchmark for Cat {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let lhs = Tensor::randn(0f32, 1., (1, 32, 2000, 128), &Device::Cpu)?;
let rhs = Tensor::randn(0f32, 1., (1, 32, 1, 128), &Device::Cpu)?;
Ok((lhs, rhs))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
Tensor::cat(&[&d.0, &d.1], 2)
}
const ITERS: usize = 1000;
}
struct Softmax;
impl Benchmark for Softmax {
type PreProcessData = Tensor;
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
// Typical whisper tiny size.
let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?;
Ok(x)
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
candle_nn::ops::softmax(d, D::Minus1)
}
const ITERS: usize = 100;
}
struct SoftmaxLastDim;
impl Benchmark for SoftmaxLastDim {
type PreProcessData = Tensor;
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
// Typical whisper tiny size.
let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?;
Ok(x)
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
candle_nn::ops::softmax_last_dim(d)
}
const ITERS: usize = 100;
}
fn run<B: Benchmark>(iters: Option<usize>) -> Result<()> {
use std::hint::black_box;
let iters = iters.unwrap_or(B::ITERS);
let d = B::preprocess()?;
let start = std::time::Instant::now();
for _iter in 0..iters {
let _res = black_box(B::run_one(black_box(&d))?);
}
println!("{:?}", start.elapsed() / iters as u32);
Ok(())
}
#[derive(Subcommand, Debug, Clone)]
enum Task {
Conv1d,
Conv2d,
Conv2dIm2Col,
Matmul,
Matvec,
Qmatmul,
Softmax,
SoftmaxLastDim,
Cat,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Args {
/// The benchmark to be run.
#[command(subcommand)]
task: Task,
#[arg(long)]
iters: Option<usize>,
}
fn main() -> Result<()> {
let args = Args::parse();
match args.task {
Task::Conv1d => run::<Conv1d>(args.iters)?,
Task::Conv2d => run::<Conv2d>(args.iters)?,
Task::Conv2dIm2Col => run::<Conv2dIm2Col>(args.iters)?,
Task::Matmul => run::<MatMul>(args.iters)?,
Task::Matvec => run::<MatVec>(args.iters)?,
Task::Softmax => run::<Softmax>(args.iters)?,
Task::SoftmaxLastDim => run::<SoftmaxLastDim>(args.iters)?,
Task::Qmatmul => run::<QMatMul>(args.iters)?,
Task::Cat => run::<Cat>(args.iters)?,
}
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-kernels/build.rs | candle-kernels/build.rs | use std::env;
use std::path::PathBuf;
fn main() {
println!("cargo::rerun-if-changed=build.rs");
println!("cargo::rerun-if-changed=src/compatibility.cuh");
println!("cargo::rerun-if-changed=src/cuda_utils.cuh");
println!("cargo::rerun-if-changed=src/binary_op_macros.cuh");
// Build for PTX
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let ptx_path = out_dir.join("ptx.rs");
let mut builder = bindgen_cuda::Builder::default()
.arg("--expt-relaxed-constexpr")
.arg("-std=c++17")
.arg("-O3")
.arg("--use_fast_math");
println!("cargo::warning={builder:?}");
let bindings = builder.build_ptx().unwrap();
bindings.write(&ptx_path).unwrap();
// Remove unwanted MOE PTX constants from ptx.rs
remove_lines(&ptx_path, &["MOE_GGUF", "MOE_WMMA", "MOE_WMMA_GGUF"]);
// Build for FFI binding (must use custom bindgen_cuda, which supports simutanously build PTX and lib)
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let mut is_target_msvc = false;
if let Ok(target) = std::env::var("TARGET") {
if target.contains("msvc") {
is_target_msvc = true;
builder = builder.arg("-D_USE_MATH_DEFINES");
}
}
if !is_target_msvc {
builder = builder.arg("-Xcompiler").arg("-fPIC");
}
let builder = builder.kernel_paths(vec![
"src/moe/moe_gguf.cu",
"src/moe/moe_wmma.cu",
"src/moe/moe_wmma_gguf.cu",
]);
println!("cargo::warning={builder:?}");
builder.build_lib(out_dir.join("libmoe.a"));
println!("cargo:rustc-link-search={}", out_dir.display());
println!("cargo:rustc-link-lib=moe");
println!("cargo:rustc-link-lib=dylib=cudart");
if !is_target_msvc {
println!("cargo:rustc-link-lib=stdc++");
}
}
fn remove_lines<P: AsRef<std::path::Path>>(file: P, patterns: &[&str]) {
let content = std::fs::read_to_string(&file).unwrap();
let filtered = content
.lines()
.filter(|line| !patterns.iter().any(|p| line.contains(p)))
.collect::<Vec<_>>()
.join("\n");
std::fs::write(file, filtered).unwrap();
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-kernels/src/lib.rs | candle-kernels/src/lib.rs | mod ptx {
include!(concat!(env!("OUT_DIR"), "/ptx.rs"));
}
#[repr(u32)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Id {
Affine,
Binary,
Cast,
Conv,
Fill,
Indexing,
Quantized,
Reduce,
Sort,
Ternary,
Unary,
}
pub const ALL_IDS: [Id; 11] = [
Id::Affine,
Id::Binary,
Id::Cast,
Id::Conv,
Id::Fill,
Id::Indexing,
Id::Quantized,
Id::Reduce,
Id::Sort,
Id::Ternary,
Id::Unary,
];
pub struct Module {
index: usize,
ptx: &'static str,
}
impl Module {
pub fn index(&self) -> usize {
self.index
}
pub fn ptx(&self) -> &'static str {
self.ptx
}
}
const fn module_index(id: Id) -> usize {
let mut i = 0;
while i < ALL_IDS.len() {
if ALL_IDS[i] as u32 == id as u32 {
return i;
}
i += 1;
}
panic!("id not found")
}
macro_rules! mdl {
($cst:ident, $id:ident) => {
pub const $cst: Module = Module {
index: module_index(Id::$id),
ptx: ptx::$cst,
};
};
}
mdl!(AFFINE, Affine);
mdl!(BINARY, Binary);
mdl!(CAST, Cast);
mdl!(CONV, Conv);
mdl!(FILL, Fill);
mdl!(INDEXING, Indexing);
mdl!(QUANTIZED, Quantized);
mdl!(REDUCE, Reduce);
mdl!(SORT, Sort);
mdl!(TERNARY, Ternary);
mdl!(UNARY, Unary);
pub mod ffi;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-kernels/src/ptx.rs | candle-kernels/src/ptx.rs | pub const AFFINE: &str = include_str!(concat!(env!("OUT_DIR"), "/affine.ptx"));
pub const BINARY: &str = include_str!(concat!(env!("OUT_DIR"), "/binary.ptx"));
pub const CAST: &str = include_str!(concat!(env!("OUT_DIR"), "/cast.ptx"));
pub const CONV: &str = include_str!(concat!(env!("OUT_DIR"), "/conv.ptx"));
pub const FILL: &str = include_str!(concat!(env!("OUT_DIR"), "/fill.ptx"));
pub const INDEXING: &str = include_str!(concat!(env!("OUT_DIR"), "/indexing.ptx"));
pub const QUANTIZED: &str = include_str!(concat!(env!("OUT_DIR"), "/quantized.ptx"));
pub const REDUCE: &str = include_str!(concat!(env!("OUT_DIR"), "/reduce.ptx"));
pub const SORT: &str = include_str!(concat!(env!("OUT_DIR"), "/sort.ptx"));
pub const TERNARY: &str = include_str!(concat!(env!("OUT_DIR"), "/ternary.ptx"));
pub const UNARY: &str = include_str!(concat!(env!("OUT_DIR"), "/unary.ptx"));
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-kernels/src/ffi.rs | candle-kernels/src/ffi.rs | use core::ffi::c_void;
#[allow(dead_code)]
extern "C" {
// for unquntized models
pub fn moe_gemm_wmma(
input: *const c_void, // device pointer [size_m, size_k]
weights: *const c_void, // device pointer [num_experts, size_n, size_k]
sorted_token_ids: *const i32, // device pointer [size_m]
expert_ids: *const i32, // host array [size_m] (expert id per sorted token)
topk_weights: *const f32,
output: *mut c_void, // device pointer [size_m, size_n]
expert_counts: *mut i32, // pre-allocated buffer [num_experts]
expert_offsets: *mut i32, // pre-allocated buffer [num_experts + 1]
num_experts: i32,
topk: i32,
size_m: i32,
size_n: i32,
size_k: i32,
dtype: i32, // 0=float16, 1=bf16 (for input/output)
is_prefill: bool,
stream: i64,
);
pub fn moe_gemm_gguf(
input: *const f32, // input [size_m, size_k]
weights: *const c_void, // weights [num_experts, size_n, size_k]
sorted_token_ids: *const i32,
expert_ids: *const i32,
topk_weights: *const f32, // device ptr or nullptr
output: *mut c_void, // float output [size_m, size_n]
num_experts: i32,
topk: i32,
size_m: i32,
size_n: i32,
size_k: i32,
gguf_dtype: i32, // Q8_0: 0, Q4K: 1, Q2K: 2, Q3k: 3, Q5K: 4, Q6K: 5 (for weights)
stream: i64,
);
pub fn moe_gemm_gguf_prefill(
input: *const c_void, // input [size_m, size_k]
weights: *const u8, // weights [num_experts, size_n, size_k]
sorted_token_ids: *const i32,
expert_ids: *const i32, //must be host ptr
topk_weights: *const f32, // device ptr or nullptr
output: *mut c_void, // float output [size_m, size_n]
num_experts: i32,
topk: i32,
size_m: i32,
size_n: i32,
size_k: i32,
input_dtype: i32, // 0=f16, 1=bf16 (for inputs)
gguf_dtype: i32, //Q8_0: 0, Q4K: 1, Q2K: 2, Q3k: 3, Q5K: 4, Q6K: 5 (for weights)
stream: i64,
);
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn-build/src/lib.rs | candle-flash-attn-build/src/lib.rs | //! Build utilities for fetching cutlass headers on-demand.
//!
//! This crate provides a function to fetch NVIDIA's cutlass library headers
//! during build time, avoiding the need for git submodules.
use anyhow::{Context, Result};
use std::path::PathBuf;
use std::process::Command;
const CUTLASS_REPO: &str = "https://github.com/NVIDIA/cutlass.git";
/// Fetch cutlass headers if not already present at the specified commit.
///
/// The headers are cloned to `out_dir/cutlass` using sparse checkout to only
/// fetch the `include/` directory, minimizing download size.
///
/// # Arguments
/// * `out_dir` - The output directory (typically from `OUT_DIR` env var)
/// * `commit` - The git commit hash to checkout
///
/// # Returns
/// The path to the cutlass directory containing the `include/` subdirectory.
pub fn fetch_cutlass(out_dir: &PathBuf, commit: &str) -> Result<PathBuf> {
let cutlass_dir = out_dir.join("cutlass");
// Check if cutlass is already fetched and at the right commit
if cutlass_dir.join("include").exists() {
let output = Command::new("git")
.args(["rev-parse", "HEAD"])
.current_dir(&cutlass_dir)
.output();
if let Ok(output) = output {
let current_commit = String::from_utf8_lossy(&output.stdout).trim().to_string();
if current_commit == commit {
return Ok(cutlass_dir);
}
}
}
// Clone cutlass if the directory doesn't exist
if !cutlass_dir.exists() {
println!("cargo::warning=Cloning cutlass from {}", CUTLASS_REPO);
let status = Command::new("git")
.args([
"clone",
"--depth",
"1",
CUTLASS_REPO,
cutlass_dir.to_str().unwrap(),
])
.status()
.context("Failed to clone cutlass repository")?;
if !status.success() {
anyhow::bail!("git clone failed with status: {}", status);
}
// Set up sparse checkout to only get the include directory
let status = Command::new("git")
.args(["sparse-checkout", "set", "include"])
.current_dir(&cutlass_dir)
.status()
.context("Failed to set sparse checkout for cutlass")?;
if !status.success() {
anyhow::bail!("git sparse-checkout failed with status: {}", status);
}
}
// Fetch and checkout the specific commit
println!("cargo::warning=Checking out cutlass commit {}", commit);
let status = Command::new("git")
.args(["fetch", "origin", commit])
.current_dir(&cutlass_dir)
.status()
.context("Failed to fetch cutlass commit")?;
if !status.success() {
anyhow::bail!("git fetch failed with status: {}", status);
}
let status = Command::new("git")
.args(["checkout", commit])
.current_dir(&cutlass_dir)
.status()
.context("Failed to checkout cutlass commit")?;
if !status.success() {
anyhow::bail!("git checkout failed with status: {}", status);
}
Ok(cutlass_dir)
}
/// Returns the include path argument for nvcc/compiler.
///
/// # Arguments
/// * `cutlass_dir` - Path returned from `fetch_cutlass`
pub fn cutlass_include_arg(cutlass_dir: &PathBuf) -> String {
format!("-I{}/include", cutlass_dir.display())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-wasm-tests/src/lib.rs | candle-wasm-tests/src/lib.rs | pub fn add(left: usize, right: usize) -> usize {
left + right
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let result = add(2, 2);
assert_eq!(result, 4);
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-wasm-tests/tests/quantized_tests.rs | candle-wasm-tests/tests/quantized_tests.rs | #![allow(unused)]
use candle::{
quantized::{self, k_quants, GgmlDType, GgmlType},
test_utils::to_vec2_round,
Device, Module, Result, Tensor,
};
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn quantized_matmul_neg() -> Result<()> {
let cpu = &Device::Cpu;
let (m, k, n) = (3, 64, 4);
let lhs = (0..(m * k))
.map(|v| v as f32 - (m * k) as f32 / 2.0)
.collect::<Vec<_>>();
let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?;
let mut dst = vec![42.; 3 * 4];
let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8];
let rhs = (0..k * n)
.map(|v| v as f32 - (k * n) as f32 / 3.0)
.collect::<Vec<_>>();
let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?;
k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t);
k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?;
assert_eq!(
dst.iter().map(|x| x.round()).collect::<Vec<_>>(),
&[
243524.0, -19596.0, -285051.0, -549815.0, 23777.0, 21651.0, 19398.0, 18367.0,
-196472.0, 63012.0, 324585.0, 587902.0
]
);
let mm = tensor_lhs.matmul(&tensor_rhs)?;
assert_eq!(
to_vec2_round(&mm, 0)?,
&[
[244064.0, -20128.0, -284320.0, -548512.0],
[23563.0, 21515.0, 19467.0, 17419.0],
[-196939.0, 63157.0, 323253.0, 583349.0]
]
);
let qtensor = quantized::QTensor::new(quantized::QStorage::Cpu(Box::new(rhs_t)), (4, 64))?;
let matmul = quantized::QMatMul::from_qtensor(qtensor)?;
let res = matmul.forward(&tensor_lhs)?;
assert_eq!(
to_vec2_round(&res, 0)?,
&[
[243524.0, -19596.0, -285051.0, -549815.0],
[23777.0, 21651.0, 19398.0, 18367.0],
[-196472.0, 63012.0, 324585.0, 587902.0]
]
);
Ok(())
}
/// Creates a vector similarly to the one used in GGML unit tests: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L26-L30
fn create_ggml_like_vector(offset: f32) -> Vec<f32> {
const GGML_TEST_SIZE: usize = 32 * 128;
(0..GGML_TEST_SIZE)
.map(|i| 0.1 + 2.0 * (i as f32 + offset).cos())
.collect()
}
/// Very simple dot product implementation
fn vec_dot_reference(a: &[f32], b: &[f32]) -> f32 {
a.iter().zip(b).map(|(a, b)| a * b).sum()
}
/// Returns the error achieved by the GGML matmul unit test.
fn ggml_reference_matmul_error(dtype: GgmlDType) -> Result<f32> {
let err = match dtype {
GgmlDType::F16 => 0.000010,
GgmlDType::Q2K => 0.004086,
GgmlDType::Q3K => 0.016148,
GgmlDType::Q4K => 0.002425,
GgmlDType::Q5K => 0.000740,
GgmlDType::Q6K => 0.000952,
GgmlDType::Q4_0 => 0.001143,
GgmlDType::Q4_1 => 0.007784,
GgmlDType::Q5_0 => 0.001353,
GgmlDType::Q5_1 => 0.001363,
GgmlDType::Q8_0 => 0.000092,
// Not from the ggml repo.
GgmlDType::Q8K => 0.00065,
_ => candle::bail!("No GGML results for quantization type {dtype:?}",),
};
Ok(err)
}
/// Mirrores the GGML matmul unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L76-L91
fn ggml_matmul_error_test<T: GgmlType>() -> Result<()> {
const GGML_MAX_DOT_PRODUCT_ERROR: f32 = 0.02;
let a = create_ggml_like_vector(0.0);
let b = create_ggml_like_vector(1.0);
let length = a.len();
let mut a_quant = vec![T::zeros(); length / T::BLCK_SIZE];
let mut b_quant = vec![T::VecDotType::zeros(); length / T::VecDotType::BLCK_SIZE];
T::from_float(&a, &mut a_quant);
T::VecDotType::from_float(&b, &mut b_quant);
let result = T::vec_dot(length, &a_quant, &b_quant);
let result_unopt = T::vec_dot_unopt(length, &a_quant, &b_quant);
let reference_result = vec_dot_reference(&a, &b);
if (result - result_unopt).abs() / length as f32 > 1e-6 {
candle::bail!(
"the opt and unopt vec-dot returned different values, opt {result}, unopt {result_unopt}"
)
}
let error = (result - reference_result).abs() / length as f32;
let ggml_error = ggml_reference_matmul_error(T::DTYPE)?;
if !error.is_finite() || error > GGML_MAX_DOT_PRODUCT_ERROR {
candle::bail!(
"Dot product error {} exceeds max error {}",
error,
GGML_MAX_DOT_PRODUCT_ERROR
);
}
// We diverge slightly due to different rounding behavior / f16 to f32 conversions in GGML
// => we use a slightly higher error threshold
const ERROR_LENIENCY: f32 = 0.00001;
if error - ERROR_LENIENCY > ggml_error {
candle::bail!(
"Dot product error {} exceeds ggml reference error {}",
error,
ggml_error
);
}
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q40() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ4_0>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q50() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ5_0>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q80() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ8_0>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q2k() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ2K>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q3k() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ3K>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q4k() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ4K>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q5k() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ5K>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q6k() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ6K>()?;
Ok(())
}
#[wasm_bindgen_test]
fn quantized_matmul_q8k() -> Result<()> {
ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ8K>()?;
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-book/src/simplified.rs | candle-book/src/simplified.rs | //! #A simplified example in Rust of training a neural network and then using it based on the Candle Framework by Hugging Face.
//! Author: Evgeny Igumnov 2023 igumnovnsk@gmail.com
//! This program implements a neural network to predict the winner of the second round of elections based on the results of the first round.
//!
//! ##Basic moments:
//!
//! A multilayer perceptron with two hidden layers is used. The first hidden layer has 4 neurons, the second has 2 neurons.
//! The input is a vector of 2 numbers - the percentage of votes for the first and second candidates in the first stage.
//! The output is the number 0 or 1, where 1 means that the first candidate will win in the second stage, 0 means that he will lose.
//! For training, samples with real data on the results of the first and second stages of different elections are used.
//! The model is trained by backpropagation using gradient descent and the cross-entropy loss function.
//! Model parameters (weights of neurons) are initialized randomly, then optimized during training.
//! After training, the model is tested on a deferred sample to evaluate the accuracy.
//! If the accuracy on the test set is below 100%, the model is considered underfit and the learning process is repeated.
//! Thus, this neural network learns to find hidden relationships between the results of the first and second rounds of voting in order to make predictions for new data.
#[rustfmt::skip]
mod tests {
use candle::{DType, Result, Tensor, D, Device};
use candle_nn::{loss, ops, Linear, Module, VarBuilder, VarMap, Optimizer};
// ANCHOR: book_training_simplified1
const VOTE_DIM: usize = 2;
const RESULTS: usize = 1;
const EPOCHS: usize = 10;
const LAYER1_OUT_SIZE: usize = 4;
const LAYER2_OUT_SIZE: usize = 2;
const LEARNING_RATE: f64 = 0.05;
#[derive(Clone)]
pub struct Dataset {
pub train_votes: Tensor,
pub train_results: Tensor,
pub test_votes: Tensor,
pub test_results: Tensor,
}
struct MultiLevelPerceptron {
ln1: Linear,
ln2: Linear,
ln3: Linear,
}
impl MultiLevelPerceptron {
fn new(vs: VarBuilder) -> Result<Self> {
let ln1 = candle_nn::linear(VOTE_DIM, LAYER1_OUT_SIZE, vs.pp("ln1"))?;
let ln2 = candle_nn::linear(LAYER1_OUT_SIZE, LAYER2_OUT_SIZE, vs.pp("ln2"))?;
let ln3 = candle_nn::linear(LAYER2_OUT_SIZE, RESULTS + 1, vs.pp("ln3"))?;
Ok(Self { ln1, ln2, ln3 })
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.ln1.forward(xs)?;
let xs = xs.relu()?;
let xs = self.ln2.forward(&xs)?;
let xs = xs.relu()?;
self.ln3.forward(&xs)
}
}
// ANCHOR_END: book_training_simplified1
// ANCHOR: book_training_simplified3
#[tokio::test]
async fn simplified() -> anyhow::Result<()> {
let dev = Device::cuda_if_available(0)?;
let train_votes_vec: Vec<u32> = vec![
15, 10,
10, 15,
5, 12,
30, 20,
16, 12,
13, 25,
6, 14,
31, 21,
];
let train_votes_tensor = Tensor::from_vec(train_votes_vec.clone(), (train_votes_vec.len() / VOTE_DIM, VOTE_DIM), &dev)?.to_dtype(DType::F32)?;
let train_results_vec: Vec<u32> = vec![
1,
0,
0,
1,
1,
0,
0,
1,
];
let train_results_tensor = Tensor::from_vec(train_results_vec, train_votes_vec.len() / VOTE_DIM, &dev)?;
let test_votes_vec: Vec<u32> = vec![
13, 9,
8, 14,
3, 10,
];
let test_votes_tensor = Tensor::from_vec(test_votes_vec.clone(), (test_votes_vec.len() / VOTE_DIM, VOTE_DIM), &dev)?.to_dtype(DType::F32)?;
let test_results_vec: Vec<u32> = vec![
1,
0,
0,
];
let test_results_tensor = Tensor::from_vec(test_results_vec.clone(), test_results_vec.len(), &dev)?;
let m = Dataset {
train_votes: train_votes_tensor,
train_results: train_results_tensor,
test_votes: test_votes_tensor,
test_results: test_results_tensor,
};
let trained_model: MultiLevelPerceptron;
loop {
println!("Trying to train neural network.");
match train(m.clone(), &dev) {
Ok(model) => {
trained_model = model;
break;
},
Err(e) => {
println!("Error: {}", e);
continue;
}
}
}
let real_world_votes: Vec<u32> = vec![
13, 22,
];
let tensor_test_votes = Tensor::from_vec(real_world_votes.clone(), (1, VOTE_DIM), &dev)?.to_dtype(DType::F32)?;
let final_result = trained_model.forward(&tensor_test_votes)?;
let result = final_result
.argmax(D::Minus1)?
.to_dtype(DType::F32)?
.get(0).map(|x| x.to_scalar::<f32>())??;
println!("real_life_votes: {:?}", real_world_votes);
println!("neural_network_prediction_result: {:?}", result);
Ok(())
}
// ANCHOR_END: book_training_simplified3
// ANCHOR: book_training_simplified2
fn train(m: Dataset, dev: &Device) -> anyhow::Result<MultiLevelPerceptron> {
let train_results = m.train_results.to_device(dev)?;
let train_votes = m.train_votes.to_device(dev)?;
let varmap = VarMap::new();
let vs = VarBuilder::from_varmap(&varmap, DType::F32, dev);
let model = MultiLevelPerceptron::new(vs.clone())?;
let mut sgd = candle_nn::SGD::new(varmap.all_vars(), LEARNING_RATE)?;
let test_votes = m.test_votes.to_device(dev)?;
let test_results = m.test_results.to_device(dev)?;
let mut final_accuracy: f32 = 0.0;
for epoch in 1..EPOCHS + 1 {
let logits = model.forward(&train_votes)?;
let log_sm = ops::log_softmax(&logits, D::Minus1)?;
let loss = loss::nll(&log_sm, &train_results)?;
sgd.backward_step(&loss)?;
let test_logits = model.forward(&test_votes)?;
let sum_ok = test_logits
.argmax(D::Minus1)?
.eq(&test_results)?
.to_dtype(DType::F32)?
.sum_all()?
.to_scalar::<f32>()?;
let test_accuracy = sum_ok / test_results.dims1()? as f32;
final_accuracy = 100. * test_accuracy;
println!("Epoch: {epoch:3} Train loss: {:8.5} Test accuracy: {:5.2}%",
loss.to_scalar::<f32>()?,
final_accuracy
);
if final_accuracy == 100.0 {
break;
}
}
if final_accuracy < 100.0 {
Err(anyhow::Error::msg("The model is not trained well enough."))
} else {
Ok(model)
}
}
// ANCHOR_END: book_training_simplified2
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-book/src/lib.rs | candle-book/src/lib.rs | #[cfg(test)]
pub mod simplified;
#[cfg(test)]
mod tests {
use anyhow::Result;
use candle::{DType, Device, Tensor};
use parquet::file::reader::SerializedFileReader;
// NOTE: Waiting on https://github.com/rust-lang/mdBook/pull/1856
#[rustfmt::skip]
#[tokio::test]
async fn book_hub_1() {
// ANCHOR: book_hub_1
use candle::Device;
use hf_hub::api::tokio::Api;
let api = Api::new().unwrap();
let repo = api.model("bert-base-uncased".to_string());
let weights_filename = repo.get("model.safetensors").await.unwrap();
let weights = candle::safetensors::load(weights_filename, &Device::Cpu).unwrap();
// ANCHOR_END: book_hub_1
assert_eq!(weights.len(), 206);
}
#[rustfmt::skip]
#[test]
fn book_hub_2() {
{
// ANCHOR: book_hub_2
use candle::Device;
use hf_hub::api::sync::Api;
use memmap2::Mmap;
use std::fs;
let api = Api::new().unwrap();
let repo = api.model("bert-base-uncased".to_string());
let weights_filename = repo.get("model.safetensors").unwrap();
let file = fs::File::open(weights_filename).unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let weights = candle::safetensors::load_buffer(&mmap[..], &Device::Cpu).unwrap();
// ANCHOR_END: book_hub_2
assert_eq!(weights.len(), 206);
}
// #[rustfmt::skip]
// #[test]
// fn book_hub_3() {
{
// ANCHOR: book_hub_3
use candle::{DType, Device, Tensor};
use hf_hub::api::sync::Api;
use memmap2::Mmap;
use safetensors::slice::IndexOp;
use safetensors::SafeTensors;
use std::fs;
let api = Api::new().unwrap();
let repo = api.model("bert-base-uncased".to_string());
let weights_filename = repo.get("model.safetensors").unwrap();
let file = fs::File::open(weights_filename).unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
// Use safetensors directly
let tensors = SafeTensors::deserialize(&mmap[..]).unwrap();
let view = tensors
.tensor("bert.encoder.layer.0.attention.self.query.weight")
.unwrap();
// We're going to load shard with rank 1, within a world_size of 4
// We're going to split along dimension 0 doing VIEW[start..stop, :]
let rank = 1;
let world_size = 4;
let dim = 0;
let dtype = view.dtype();
let mut tp_shape = view.shape().to_vec();
let size = tp_shape[0];
if size % world_size != 0 {
panic!("The dimension is not divisible by `world_size`");
}
let block_size = size / world_size;
let start = rank * block_size;
let stop = (rank + 1) * block_size;
// Everything is expressed in tensor dimension
// bytes offsets is handled automatically for safetensors.
let iterator = view.slice(start..stop).unwrap();
tp_shape[dim] = block_size;
// Convert safetensors Dtype to candle DType
let dtype: DType = dtype.try_into().unwrap();
// TODO: Implement from_buffer_iterator so we can skip the extra CPU alloc.
let raw: Vec<u8> = iterator.into_iter().flatten().cloned().collect();
let tp_tensor = Tensor::from_raw_buffer(&raw, dtype, &tp_shape, &Device::Cpu).unwrap();
// ANCHOR_END: book_hub_3
assert_eq!(view.shape(), &[768, 768]);
assert_eq!(tp_tensor.dims(), &[192, 768]);
}
}
#[allow(unused)]
#[rustfmt::skip]
fn book_training_1() -> Result<()>{
// ANCHOR: book_training_1
use hf_hub::{api::sync::Api, Repo, RepoType};
let dataset_id = "mnist".to_string();
let api = Api::new()?;
let repo = Repo::with_revision(
dataset_id,
RepoType::Dataset,
"refs/convert/parquet".to_string(),
);
let repo = api.repo(repo);
let test_parquet_filename = repo.get("mnist/test/0000.parquet")?;
let train_parquet_filename = repo.get("mnist/train/0000.parquet")?;
let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)?;
let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)?;
// ANCHOR_END: book_training_1
// Ignore unused
let _train = train_parquet;
// ANCHOR: book_training_2
for row in test_parquet {
for (idx, (name, field)) in row?.get_column_iter().enumerate() {
println!("Column id {idx}, name {name}, value {field}");
}
}
// ANCHOR_END: book_training_2
let test_parquet_filename = repo.get("mnist/test/0000.parquet")?;
let train_parquet_filename = repo.get("mnist/train/0000.parquet")?;
let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)?;
let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)?;
// ANCHOR: book_training_3
let test_samples = 10_000;
let mut test_buffer_images: Vec<u8> = Vec::with_capacity(test_samples * 784);
let mut test_buffer_labels: Vec<u8> = Vec::with_capacity(test_samples);
for row in test_parquet{
for (_name, field) in row?.get_column_iter() {
if let parquet::record::Field::Group(subrow) = field {
for (_name, field) in subrow.get_column_iter() {
if let parquet::record::Field::Bytes(value) = field {
let image = image::load_from_memory(value.data()).unwrap();
test_buffer_images.extend(image.to_luma8().as_raw());
}
}
}else if let parquet::record::Field::Long(label) = field {
test_buffer_labels.push(*label as u8);
}
}
}
let test_images = (Tensor::from_vec(test_buffer_images, (test_samples, 784), &Device::Cpu)?.to_dtype(DType::F32)? / 255.)?;
let test_labels = Tensor::from_vec(test_buffer_labels, (test_samples, ), &Device::Cpu)?;
let train_samples = 60_000;
let mut train_buffer_images: Vec<u8> = Vec::with_capacity(train_samples * 784);
let mut train_buffer_labels: Vec<u8> = Vec::with_capacity(train_samples);
for row in train_parquet{
for (_name, field) in row?.get_column_iter() {
if let parquet::record::Field::Group(subrow) = field {
for (_name, field) in subrow.get_column_iter() {
if let parquet::record::Field::Bytes(value) = field {
let image = image::load_from_memory(value.data()).unwrap();
train_buffer_images.extend(image.to_luma8().as_raw());
}
}
}else if let parquet::record::Field::Long(label) = field {
train_buffer_labels.push(*label as u8);
}
}
}
let train_images = (Tensor::from_vec(train_buffer_images, (train_samples, 784), &Device::Cpu)?.to_dtype(DType::F32)? / 255.)?;
let train_labels = Tensor::from_vec(train_buffer_labels, (train_samples, ), &Device::Cpu)?;
let mnist = candle_datasets::vision::Dataset {
train_images,
train_labels,
test_images,
test_labels,
labels: 10,
};
// ANCHOR_END: book_training_3
assert_eq!(mnist.test_images.dims(), &[10_000, 784]);
assert_eq!(mnist.test_labels.dims(), &[10_000]);
assert_eq!(mnist.train_images.dims(), &[60_000, 784]);
assert_eq!(mnist.train_labels.dims(), &[60_000]);
Ok(())
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/batcher.rs | candle-datasets/src/batcher.rs | use candle::{Result, Tensor};
pub struct Batcher<I> {
inner: I,
batch_size: usize,
return_last_incomplete_batch: bool,
}
impl<I> Batcher<I> {
fn new(inner: I) -> Self {
Self {
inner,
batch_size: 16,
return_last_incomplete_batch: false,
}
}
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
pub fn return_last_incomplete_batch(mut self, r: bool) -> Self {
self.return_last_incomplete_batch = r;
self
}
}
pub struct Iter1<I: Iterator<Item = Tensor>> {
inner: I,
}
pub struct Iter2<I: Iterator<Item = (Tensor, Tensor)>> {
inner: I,
}
impl<I: Iterator<Item = Tensor>> Batcher<Iter1<I>> {
pub fn new1(inner: I) -> Self {
Self::new(Iter1 { inner })
}
}
impl<I: Iterator<Item = (Tensor, Tensor)>> Batcher<Iter2<I>> {
pub fn new2(inner: I) -> Self {
Self::new(Iter2 { inner })
}
}
pub struct IterResult1<I: Iterator<Item = Result<Tensor>>> {
inner: I,
}
pub struct IterResult2<I: Iterator<Item = Result<(Tensor, Tensor)>>> {
inner: I,
}
impl<I: Iterator<Item = Result<Tensor>>> Batcher<IterResult1<I>> {
pub fn new_r1(inner: I) -> Self {
Self::new(IterResult1 { inner })
}
}
impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Batcher<IterResult2<I>> {
pub fn new_r2(inner: I) -> Self {
Self::new(IterResult2 { inner })
}
}
impl<I: Iterator<Item = Tensor>> Iterator for Batcher<Iter1<I>> {
type Item = Result<Tensor>;
fn next(&mut self) -> Option<Self::Item> {
let mut items = Vec::with_capacity(self.batch_size);
for _i in 0..self.batch_size {
// We have two levels of inner here so that we can have two implementations of the
// Iterator trait that are different for Iter1 and Iter2. If rust gets better
// specialization at some point we can get rid of this.
match self.inner.inner.next() {
Some(item) => items.push(item),
None => {
if self.return_last_incomplete_batch && !items.is_empty() {
break;
}
return None;
}
}
}
Some(Tensor::stack(&items, 0))
}
}
impl<I: Iterator<Item = (Tensor, Tensor)>> Iterator for Batcher<Iter2<I>> {
type Item = Result<(Tensor, Tensor)>;
fn next(&mut self) -> Option<Self::Item> {
let mut xs = Vec::with_capacity(self.batch_size);
let mut ys = Vec::with_capacity(self.batch_size);
for _i in 0..self.batch_size {
match self.inner.inner.next() {
Some((x, y)) => {
xs.push(x);
ys.push(y)
}
None => {
if self.return_last_incomplete_batch && !xs.is_empty() && !ys.is_empty() {
break;
}
return None;
}
}
}
let xs = Tensor::stack(&xs, 0);
let ys = Tensor::stack(&ys, 0);
Some(xs.and_then(|xs| ys.map(|ys| (xs, ys))))
}
}
impl<I: Iterator<Item = Result<Tensor>>> Iterator for Batcher<IterResult1<I>> {
type Item = Result<Tensor>;
fn next(&mut self) -> Option<Self::Item> {
let mut items = Vec::with_capacity(self.batch_size);
for _i in 0..self.batch_size {
// We have two levels of inner here so that we can have two implementations of the
// Iterator trait that are different for Iter1 and Iter2. If rust gets better
// specialization at some point we can get rid of this.
match self.inner.inner.next() {
Some(item) => items.push(item),
None => {
if self.return_last_incomplete_batch && !items.is_empty() {
break;
}
return None;
}
}
}
let items = items.into_iter().collect::<Result<Vec<Tensor>>>();
Some(items.and_then(|items| Tensor::stack(&items, 0)))
}
}
impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Iterator for Batcher<IterResult2<I>> {
type Item = Result<(Tensor, Tensor)>;
fn next(&mut self) -> Option<Self::Item> {
let mut xs = Vec::with_capacity(self.batch_size);
let mut ys = Vec::with_capacity(self.batch_size);
let mut errs = vec![];
for _i in 0..self.batch_size {
match self.inner.inner.next() {
Some(Ok((x, y))) => {
xs.push(x);
ys.push(y)
}
Some(Err(err)) => errs.push(err),
None => {
if self.return_last_incomplete_batch && !xs.is_empty() && !ys.is_empty() {
break;
}
return None;
}
}
}
if !errs.is_empty() {
return Some(Err(errs.swap_remove(0)));
}
let xs = Tensor::stack(&xs, 0);
let ys = Tensor::stack(&ys, 0);
Some(xs.and_then(|xs| ys.map(|ys| (xs, ys))))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/lib.rs | candle-datasets/src/lib.rs | //! Datasets & Dataloaders for Candle
pub mod batcher;
pub mod hub;
pub mod nlp;
pub mod vision;
pub use batcher::Batcher;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/hub.rs | candle-datasets/src/hub.rs | use hf_hub::{
api::sync::{Api, ApiRepo},
Repo, RepoType,
};
use parquet::file::reader::SerializedFileReader;
use std::fs::File;
/// Re-export of the `FileReader` trait from the `parquet` crate.
///
/// This trait provides access to Parquet file metadata and row groups:
/// - [`FileReader::metadata`]
/// - [`FileReader::num_row_groups`]
/// - [`FileReader::get_row_group`]
/// - [`FileReader::get_row_iter`]
///
/// This is re-exported so downstream users of [`from_hub`] can use these
/// methods without needing to explicitly add `parquet` as a dependency.
///
/// # Example
/// ```
/// use candle_datasets::hub::{from_hub, FileReader}; // Re-exported trait
/// let api = hf_hub::api::sync::Api::new().unwrap();
/// let files = from_hub(&api, "hf-internal-testing/dummy_image_text_data".to_string()).unwrap();
/// let num_rows = files[0].metadata().file_metadata().num_rows();
/// ```
pub use parquet::file::reader::FileReader;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("ApiError : {0}")]
ApiError(#[from] hf_hub::api::sync::ApiError),
#[error("IoError : {0}")]
IoError(#[from] std::io::Error),
#[error("ParquetError : {0}")]
ParquetError(#[from] parquet::errors::ParquetError),
}
fn sibling_to_parquet(
rfilename: &str,
repo: &ApiRepo,
) -> Result<SerializedFileReader<File>, Error> {
let local = repo.get(rfilename)?;
let file = File::open(local)?;
Ok(SerializedFileReader::new(file)?)
}
/// Loads all `.parquet` files from a given dataset ID on the Hugging Face Hub.
///
/// This returns a list of `SerializedFileReader<File>` that can be used to read Parquet content.
///
/// # Example
/// ```
/// use candle_datasets::hub::{from_hub, FileReader};
/// let api = hf_hub::api::sync::Api::new().unwrap();
/// let readers = from_hub(&api, "hf-internal-testing/dummy_image_text_data".to_string()).unwrap();
/// let metadata = readers[0].metadata();
/// assert_eq!(metadata.file_metadata().num_rows(), 20);
/// ```
pub fn from_hub(api: &Api, dataset_id: String) -> Result<Vec<SerializedFileReader<File>>, Error> {
let repo = Repo::with_revision(
dataset_id,
RepoType::Dataset,
"refs/convert/parquet".to_string(),
);
let repo = api.repo(repo);
let info = repo.info()?;
info.siblings
.into_iter()
.filter(|s| s.rfilename.ends_with(".parquet"))
.map(|s| sibling_to_parquet(&s.rfilename, &repo))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dataset() {
let api = Api::new().unwrap();
let files = from_hub(
&api,
"hf-internal-testing/dummy_image_text_data".to_string(),
)
.unwrap();
assert_eq!(files.len(), 1);
assert_eq!(files[0].metadata().file_metadata().num_rows(), 20);
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/vision/cifar.rs | candle-datasets/src/vision/cifar.rs | //! The CIFAR-10 dataset.
//!
//! The files can be downloaded from the following page:
//! <https://www.cs.toronto.edu/~kriz/cifar.html>
//! The binary version of the dataset is used.
use crate::vision::Dataset;
use candle::{DType, Device, Error, Result, Tensor};
use hf_hub::{api::sync::Api, Repo, RepoType};
use parquet::file::reader::{FileReader, SerializedFileReader};
use std::fs::File;
use std::io::{BufReader, Read};
const W: usize = 32;
const H: usize = 32;
const C: usize = 3;
const BYTES_PER_IMAGE: usize = W * H * C + 1;
const SAMPLES_PER_FILE: usize = 10000;
fn read_file(filename: &std::path::Path) -> Result<(Tensor, Tensor)> {
let mut buf_reader = BufReader::new(File::open(filename)?);
let mut data = vec![0u8; SAMPLES_PER_FILE * BYTES_PER_IMAGE];
buf_reader.read_exact(&mut data)?;
let mut images = vec![];
let mut labels = vec![];
for index in 0..SAMPLES_PER_FILE {
let content_offset = BYTES_PER_IMAGE * index;
labels.push(data[content_offset]);
images.push(&data[1 + content_offset..content_offset + BYTES_PER_IMAGE]);
}
let images: Vec<u8> = images
.iter()
.copied()
.flatten()
.copied()
.collect::<Vec<_>>();
let labels = Tensor::from_vec(labels, SAMPLES_PER_FILE, &Device::Cpu)?;
let images = Tensor::from_vec(images, (SAMPLES_PER_FILE, C, H, W), &Device::Cpu)?;
let images = (images.to_dtype(DType::F32)? / 255.)?;
Ok((images, labels))
}
pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<Dataset> {
let dir = dir.as_ref();
let (test_images, test_labels) = read_file(&dir.join("test_batch.bin"))?;
let train_images_and_labels = [
"data_batch_1.bin",
"data_batch_2.bin",
"data_batch_3.bin",
"data_batch_4.bin",
"data_batch_5.bin",
]
.iter()
.map(|x| read_file(&dir.join(x)))
.collect::<Result<Vec<_>>>()?;
let (train_images, train_labels): (Vec<_>, Vec<_>) =
train_images_and_labels.into_iter().unzip();
Ok(Dataset {
train_images: Tensor::cat(&train_images, 0)?,
train_labels: Tensor::cat(&train_labels, 0)?,
test_images,
test_labels,
labels: 10,
})
}
fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> {
let samples = parquet.metadata().file_metadata().num_rows() as usize;
let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 1_024);
let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples);
for row in parquet.into_iter().flatten() {
for (_name, field) in row.get_column_iter() {
if let parquet::record::Field::Group(subrow) = field {
for (_name, field) in subrow.get_column_iter() {
if let parquet::record::Field::Bytes(value) = field {
// image-rs crate convention is to load in (width, height, channels) order
// See: https://docs.rs/image/latest/image/trait.ImageDecoder.html#tymethod.dimensions
let image = image::load_from_memory(value.data()).unwrap();
buffer_images.extend(image.to_rgb8().as_raw());
}
}
} else if let parquet::record::Field::Long(label) = field {
buffer_labels.push(*label as u8);
}
}
}
// Reorder image-rs convention (width, height, channels) to candle/pytorch convolution convention (channels, height, width)
let images = (Tensor::from_vec(buffer_images, (samples, 32, 32, 3), &Device::Cpu)?
.to_dtype(DType::F32)?
.permute((0, 3, 2, 1))?
/ 255.)?;
let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?;
Ok((images, labels))
}
pub fn load() -> Result<Dataset> {
let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let dataset_id = "cifar10".to_string();
let repo = Repo::with_revision(
dataset_id,
RepoType::Dataset,
"refs/convert/parquet".to_string(),
);
let repo = api.repo(repo);
let test_parquet_filename = repo
.get("plain_text/test/0000.parquet")
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let train_parquet_filename = repo
.get("plain_text/train/0000.parquet")
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let (test_images, test_labels) = load_parquet(test_parquet)?;
let (train_images, train_labels) = load_parquet(train_parquet)?;
Ok(crate::vision::Dataset {
train_images,
train_labels,
test_images,
test_labels,
labels: 10,
})
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/vision/mnist.rs | candle-datasets/src/vision/mnist.rs | //! The MNIST hand-written digit dataset.
//!
//! The files can be obtained from the following link:
//! <http://yann.lecun.com/exdb/mnist/>
use candle::{DType, Device, Error, Result, Tensor};
use hf_hub::{api::sync::Api, Repo, RepoType};
use parquet::file::reader::{FileReader, SerializedFileReader};
use std::fs::File;
use std::io::{self, BufReader, Read};
fn read_u32<T: Read>(reader: &mut T) -> std::io::Result<u32> {
use byteorder::ReadBytesExt;
reader.read_u32::<byteorder::BigEndian>()
}
fn check_magic_number<T: Read>(reader: &mut T, expected: u32) -> Result<()> {
let magic_number = read_u32(reader)?;
if magic_number != expected {
Err(io::Error::other(format!(
"incorrect magic number {magic_number} != {expected}"
)))?;
}
Ok(())
}
fn read_labels(filename: &std::path::Path) -> Result<Tensor> {
let mut buf_reader = BufReader::new(File::open(filename)?);
check_magic_number(&mut buf_reader, 2049)?;
let samples = read_u32(&mut buf_reader)?;
let mut data = vec![0u8; samples as usize];
buf_reader.read_exact(&mut data)?;
let samples = data.len();
Tensor::from_vec(data, samples, &Device::Cpu)
}
fn read_images(filename: &std::path::Path) -> Result<Tensor> {
let mut buf_reader = BufReader::new(File::open(filename)?);
check_magic_number(&mut buf_reader, 2051)?;
let samples = read_u32(&mut buf_reader)? as usize;
let rows = read_u32(&mut buf_reader)? as usize;
let cols = read_u32(&mut buf_reader)? as usize;
let data_len = samples * rows * cols;
let mut data = vec![0u8; data_len];
buf_reader.read_exact(&mut data)?;
let tensor = Tensor::from_vec(data, (samples, rows * cols), &Device::Cpu)?;
tensor.to_dtype(DType::F32)? / 255.
}
pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<crate::vision::Dataset> {
let dir = dir.as_ref();
let train_images = read_images(&dir.join("train-images-idx3-ubyte"))?;
let train_labels = read_labels(&dir.join("train-labels-idx1-ubyte"))?;
let test_images = read_images(&dir.join("t10k-images-idx3-ubyte"))?;
let test_labels = read_labels(&dir.join("t10k-labels-idx1-ubyte"))?;
Ok(crate::vision::Dataset {
train_images,
train_labels,
test_images,
test_labels,
labels: 10,
})
}
fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> {
let samples = parquet.metadata().file_metadata().num_rows() as usize;
let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 784);
let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples);
for row in parquet.into_iter().flatten() {
for (_name, field) in row.get_column_iter() {
if let parquet::record::Field::Group(subrow) = field {
for (_name, field) in subrow.get_column_iter() {
if let parquet::record::Field::Bytes(value) = field {
let image = image::load_from_memory(value.data()).unwrap();
buffer_images.extend(image.to_luma8().as_raw());
}
}
} else if let parquet::record::Field::Long(label) = field {
buffer_labels.push(*label as u8);
}
}
}
let images = (Tensor::from_vec(buffer_images, (samples, 784), &Device::Cpu)?
.to_dtype(DType::F32)?
/ 255.)?;
let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?;
Ok((images, labels))
}
pub(crate) fn load_mnist_like(
dataset_id: &str,
revision: &str,
test_filename: &str,
train_filename: &str,
) -> Result<crate::vision::Dataset> {
let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let repo = Repo::with_revision(
dataset_id.to_string(),
RepoType::Dataset,
revision.to_string(),
);
let repo = api.repo(repo);
let test_parquet_filename = repo
.get(test_filename)
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let train_parquet_filename = repo
.get(train_filename)
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let (test_images, test_labels) = load_parquet(test_parquet)?;
let (train_images, train_labels) = load_parquet(train_parquet)?;
Ok(crate::vision::Dataset {
train_images,
train_labels,
test_images,
test_labels,
labels: 10,
})
}
pub fn load() -> Result<crate::vision::Dataset> {
load_mnist_like(
"ylecun/mnist",
"refs/convert/parquet",
"mnist/test/0000.parquet",
"mnist/train/0000.parquet",
)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/vision/mod.rs | candle-datasets/src/vision/mod.rs | use candle::Tensor;
pub struct Dataset {
pub train_images: Tensor,
pub train_labels: Tensor,
pub test_images: Tensor,
pub test_labels: Tensor,
pub labels: usize,
}
pub mod cifar;
pub mod fashion_mnist;
pub mod mnist;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/vision/fashion_mnist.rs | candle-datasets/src/vision/fashion_mnist.rs | //! Zalando Fashion MNIST dataset.
//! A slightly more difficult dataset that is drop-in compatible with MNIST.
//!
//! Taken from here: https://huggingface.co/datasets/zalando-datasets/fashion_mnist
use candle::Result;
pub fn load() -> Result<crate::vision::Dataset> {
crate::vision::mnist::load_mnist_like(
"zalando-datasets/fashion_mnist",
"refs/convert/parquet",
"fashion_mnist/test/0000.parquet",
"fashion_mnist/train/0000.parquet",
)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/nlp/tinystories.rs | candle-datasets/src/nlp/tinystories.rs | //! Helper functions for the tinystories dataset. This uses the pre-tokenized version as generated
//! by the tools from https://github.com/karpathy/llama2.c
use candle::{Device, Result, Tensor};
pub struct Dataset {
valid_tokens: Vec<memmap2::Mmap>,
train_tokens: Vec<memmap2::Mmap>,
}
fn mmap_file(p: &std::path::PathBuf) -> Result<memmap2::Mmap> {
let file = std::fs::File::open(p)?;
let mmap = unsafe { memmap2::MmapOptions::new().map(&file)? };
Ok(mmap)
}
impl Dataset {
pub fn new<P: AsRef<std::path::Path>>(dir: P) -> Result<Self> {
let dir = dir.as_ref();
let mut bin_files = vec![];
for file in std::fs::read_dir(dir)?.flatten() {
let file = file.path();
if let Some(extension) = file.extension() {
if extension == "bin" {
bin_files.push(file)
}
}
}
if bin_files.len() < 2 {
candle::bail!("found less than two bin files in {:?}", dir)
}
bin_files.sort();
let valid_tokens = mmap_file(&bin_files[0])?;
let train_tokens = bin_files[1..]
.iter()
.map(mmap_file)
.collect::<Result<Vec<_>>>()?;
Ok(Self {
valid_tokens: vec![valid_tokens],
train_tokens,
})
}
pub fn train_tokens(&self) -> usize {
self.train_tokens.len()
}
pub fn valid_tokens(&self) -> usize {
self.valid_tokens.len()
}
}
pub struct DatasetRandomIter<'a> {
all_tokens: &'a [memmap2::Mmap],
tokens: Vec<&'a memmap2::Mmap>,
current_tokens: &'a memmap2::Mmap,
indexes_in_bytes: Vec<usize>,
seq_len: usize,
device: Device,
}
impl<'a> DatasetRandomIter<'a> {
pub fn new(ds: &'a Dataset, valid: bool, seq_len: usize, device: Device) -> Self {
use rand::rng;
use rand::seq::SliceRandom;
let all_tokens = if valid {
&ds.valid_tokens
} else {
&ds.train_tokens
};
let mut tokens = all_tokens.iter().collect::<Vec<_>>();
tokens.shuffle(&mut rng());
let current_tokens = tokens.pop().unwrap();
let seq_len_in_bytes = seq_len * 2;
let mut indexes_in_bytes = (0..current_tokens.len() - seq_len_in_bytes)
.step_by(seq_len_in_bytes)
.collect::<Vec<_>>();
indexes_in_bytes.shuffle(&mut rng());
Self {
all_tokens,
tokens,
current_tokens,
indexes_in_bytes,
seq_len,
device,
}
}
}
impl Iterator for DatasetRandomIter<'_> {
type Item = Result<(Tensor, Tensor)>;
fn next(&mut self) -> Option<Self::Item> {
use byteorder::{LittleEndian, ReadBytesExt};
use rand::rng;
use rand::seq::SliceRandom;
let seq_len = self.seq_len;
if self.indexes_in_bytes.is_empty() {
if self.tokens.is_empty() {
self.tokens = self.all_tokens.iter().collect();
self.tokens.shuffle(&mut rng());
}
self.current_tokens = self.tokens.pop().unwrap();
let seq_len_in_bytes = self.seq_len * 2;
self.indexes_in_bytes = (0..self.current_tokens.len() - seq_len_in_bytes)
.step_by(seq_len_in_bytes)
.collect::<Vec<_>>();
self.indexes_in_bytes.shuffle(&mut rng());
}
let start_idx = self.indexes_in_bytes.pop().unwrap();
let bytes = &self.current_tokens[start_idx..start_idx + 2 * (seq_len + 1)];
let mut tokens = vec![0u16; bytes.len() / 2];
if let Err(err) = std::io::Cursor::new(bytes).read_u16_into::<LittleEndian>(&mut tokens) {
return Some(Err(err.into()));
}
let tokens = tokens.into_iter().map(|v| v as u32).collect::<Vec<_>>();
let inputs = Tensor::new(&tokens[..seq_len], &self.device);
let targets = Tensor::new(&tokens[1..], &self.device);
Some(candle::error::zip(inputs, targets))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-datasets/src/nlp/mod.rs | candle-datasets/src/nlp/mod.rs | pub mod tinystories;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn/build.rs | candle-flash-attn/build.rs | // Build script to run nvcc and generate the C glue code for launching the flash-attention kernel.
// The cuda build time is very long so one can set the CANDLE_FLASH_ATTN_BUILD_DIR environment
// variable in order to cache the compiled artifacts and avoid recompiling too often.
use anyhow::{Context, Result};
use candle_flash_attn_build::{cutlass_include_arg, fetch_cutlass};
use std::path::PathBuf;
const CUTLASS_COMMIT: &str = "7d49e6c7e2f8896c47f586706e67e1fb215529dc";
const KERNEL_FILES: [&str; 33] = [
"kernels/flash_api.cu",
"kernels/flash_fwd_hdim128_fp16_sm80.cu",
"kernels/flash_fwd_hdim160_fp16_sm80.cu",
"kernels/flash_fwd_hdim192_fp16_sm80.cu",
"kernels/flash_fwd_hdim224_fp16_sm80.cu",
"kernels/flash_fwd_hdim256_fp16_sm80.cu",
"kernels/flash_fwd_hdim32_fp16_sm80.cu",
"kernels/flash_fwd_hdim64_fp16_sm80.cu",
"kernels/flash_fwd_hdim96_fp16_sm80.cu",
"kernels/flash_fwd_hdim128_bf16_sm80.cu",
"kernels/flash_fwd_hdim160_bf16_sm80.cu",
"kernels/flash_fwd_hdim192_bf16_sm80.cu",
"kernels/flash_fwd_hdim224_bf16_sm80.cu",
"kernels/flash_fwd_hdim256_bf16_sm80.cu",
"kernels/flash_fwd_hdim32_bf16_sm80.cu",
"kernels/flash_fwd_hdim64_bf16_sm80.cu",
"kernels/flash_fwd_hdim96_bf16_sm80.cu",
"kernels/flash_fwd_hdim128_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim160_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim192_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim224_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim256_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim32_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim64_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim96_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim128_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim160_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim192_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim224_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim256_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim32_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim64_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim96_bf16_causal_sm80.cu",
];
fn main() -> Result<()> {
println!("cargo::rerun-if-changed=build.rs");
for kernel_file in KERNEL_FILES.iter() {
println!("cargo::rerun-if-changed={kernel_file}");
}
println!("cargo::rerun-if-changed=kernels/flash_fwd_kernel.h");
println!("cargo::rerun-if-changed=kernels/flash_fwd_launch_template.h");
println!("cargo::rerun-if-changed=kernels/flash.h");
println!("cargo::rerun-if-changed=kernels/philox.cuh");
println!("cargo::rerun-if-changed=kernels/softmax.h");
println!("cargo::rerun-if-changed=kernels/utils.h");
println!("cargo::rerun-if-changed=kernels/kernel_traits.h");
println!("cargo::rerun-if-changed=kernels/block_info.h");
println!("cargo::rerun-if-changed=kernels/static_switch.h");
println!("cargo::rerun-if-changed=kernels/hardware_info.h");
let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?);
let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") {
Err(_) =>
{
#[allow(clippy::redundant_clone)]
out_dir.clone()
}
Ok(build_dir) => {
let path = PathBuf::from(build_dir);
path.canonicalize().expect(&format!(
"Directory doesn't exists: {} (the current directory is {})",
&path.display(),
std::env::current_dir()?.display()
))
}
};
// Fetch cutlass headers on-demand
let cutlass_dir = fetch_cutlass(&out_dir, CUTLASS_COMMIT)?;
let cutlass_include: &'static str =
Box::leak(cutlass_include_arg(&cutlass_dir).into_boxed_str());
let kernels = KERNEL_FILES.iter().collect();
let mut builder = bindgen_cuda::Builder::default()
.kernel_paths(kernels)
.out_dir(build_dir.clone())
.arg("-std=c++17")
.arg("-O3")
.arg("-U__CUDA_NO_HALF_OPERATORS__")
.arg("-U__CUDA_NO_HALF_CONVERSIONS__")
.arg("-U__CUDA_NO_HALF2_OPERATORS__")
.arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__")
.arg(&cutlass_include)
.arg("--expt-relaxed-constexpr")
.arg("--expt-extended-lambda")
.arg("--use_fast_math")
.arg("--verbose");
let mut is_target_msvc = false;
if let Ok(target) = std::env::var("TARGET") {
if target.contains("msvc") {
is_target_msvc = true;
builder = builder.arg("-D_USE_MATH_DEFINES");
}
}
if !is_target_msvc {
builder = builder.arg("-Xcompiler").arg("-fPIC");
}
let out_file = build_dir.join("libflashattention.a");
builder.build_lib(out_file);
println!("cargo::rustc-link-search={}", build_dir.display());
println!("cargo::rustc-link-lib=flashattention");
println!("cargo::rustc-link-lib=dylib=cudart");
if !is_target_msvc {
println!("cargo::rustc-link-lib=dylib=stdc++");
}
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn/src/lib.rs | candle-flash-attn/src/lib.rs | mod ffi;
use candle::backend::BackendStorage;
use candle::cuda_backend::cudarc::driver::DevicePtr;
use candle::{CpuStorage, DType, Layout, Result, Shape, Tensor};
use half::{bf16, f16};
pub struct FlashAttn {
pub softmax_scale: f32,
pub alibi_slopes: Option<Tensor>,
pub window_size_left: Option<usize>,
pub window_size_right: Option<usize>,
pub softcap: Option<f32>,
}
fn round_multiple(x: usize, m: usize) -> usize {
(x + m - 1) / m * m
}
impl FlashAttn {
fn cuda_fwd_t<
T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr,
>(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
is_bf16: bool,
) -> Result<(candle::CudaStorage, Shape)> {
// https://github.com/Dao-AILab/flash-attention/blob/b252072409e69c25f2b9d473cc534e49b24decd2/csrc/flash_attn/flash_api.cpp#L187
let dev = q.device();
let out_shape = q_l.shape().clone();
let out_l = Layout::contiguous(&out_shape);
let q = q.as_cuda_slice::<T>()?;
let k = k.as_cuda_slice::<T>()?;
let v = v.as_cuda_slice::<T>()?;
let q = q.slice(q_l.start_offset()..);
let k = k.slice(k_l.start_offset()..);
let v = v.slice(v_l.start_offset()..);
let q_stride = q_l.stride();
let k_stride = k_l.stride();
let v_stride = v_l.stride();
let o_stride = out_l.stride();
let q_rank = q_stride.len();
let k_rank = k_stride.len();
let v_rank = v_stride.len();
let o_rank = o_stride.len();
if q_rank != 4 || k_rank != 4 || v_rank != 4 {
candle::bail!(
"flash-attn expects input tensors of rank 4 (q: {q_rank}, k: {k_rank}, v: {v_rank}"
)
}
if q_stride[q_rank - 1] != 1 {
candle::bail!("the last dim of q must be contiguous {q_stride:?}")
}
if k_stride[k_rank - 1] != 1 {
candle::bail!("the last dim of k must be contiguous {k_stride:?}")
}
if v_stride[v_rank - 1] != 1 {
candle::bail!("the last dim of v must be contiguous {v_stride:?}")
}
let (b_sz, seqlen_q, num_heads, head_size_og) = q_l.shape().dims4()?;
let (_b_sz, seqlen_k, num_heads_k, _head_size_og) = k_l.shape().dims4()?;
let expected_kv = (b_sz, seqlen_k, num_heads_k, head_size_og);
if expected_kv != k_l.shape().dims4()? {
candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape())
}
if expected_kv != v_l.shape().dims4()? {
candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape())
}
if head_size_og > 256 {
candle::bail!("only supports head dimension at most 256 (got {head_size_og})")
}
if head_size_og % 8 != 0 {
// TODO: Handle head sizes that are not a multiple of 8 via some padding.
candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})")
}
if num_heads % num_heads_k != 0 {
candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}")
}
let stream = dev.cuda_stream();
let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes {
if alibi_slopes.dtype() != DType::F32 {
candle::bail!(
"DType mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes.dtype(),
DType::F32
);
}
let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout();
if num_heads != alibi_slopes_layout.shape().dims1()? {
candle::bail!(
"shape mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes_layout.shape(),
(num_heads)
);
}
let alibi_slopes = match &*alibi_slopes {
candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?,
_ => candle::bail!("alibi_slopes must be a cuda tensor"),
};
let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..);
// Dropping the guard here doesn't seem very safe.
let (ptr, _guard) = alibi_slopes.device_ptr(&stream);
ptr as *const core::ffi::c_void
} else {
std::ptr::null()
};
// if window_size_left > self.max_seqlen_k or None => -1
let mut window_size_left = self
.window_size_left
.filter(|v| v <= &seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
// if window_size_right > self.max_seqlen_k or None => -1
let mut window_size_right = self
.window_size_right
.filter(|v| v <= &seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
let head_size = round_multiple(head_size_og, 8);
let head_size_rounded = round_multiple(head_size, 32);
let seqlen_q_rounded = round_multiple(seqlen_q, 128);
let seqlen_k_rounded = round_multiple(seqlen_k, 128);
let elem_count = out_shape.elem_count();
let dst = unsafe { dev.alloc::<T>(elem_count)? };
let softmax_lse = dev.alloc_zeros::<f32>(b_sz * 128 * num_heads * seqlen_q)?;
let is_bf16 = if is_bf16 { 1 } else { 0 };
// Causal is the special case where window_size_right == 0 and window_size_left < 0.
// Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
let is_causal = if window_size_left < 0 && window_size_right == 0 {
1
} else {
0
};
if window_size_left < 0 && window_size_right >= 0 {
window_size_left = seqlen_k as i32;
}
if window_size_left >= 0 && window_size_right < 0 {
window_size_right = seqlen_k as i32;
}
unsafe {
let (q_ptr, _guard) = q.device_ptr(&stream);
let (k_ptr, _guard) = k.device_ptr(&stream);
let (v_ptr, _guard) = v.device_ptr(&stream);
let (dst_ptr, _guard) = dst.device_ptr(&stream);
let (softmax_lse_ptr, _guard) = softmax_lse.device_ptr(&stream);
ffi::run_mha(
q_ptr as *const core::ffi::c_void,
k_ptr as *const core::ffi::c_void,
v_ptr as *const core::ffi::c_void,
dst_ptr as *const core::ffi::c_void,
softmax_lse_ptr as *const core::ffi::c_void,
/* alibi_slopes_ptr */ alibi_slopes_ptr,
/* cu_seqlens_q_ptr */ std::ptr::null(),
/* cu_seqlens_k_ptr */ std::ptr::null(),
/* q_batch_stride */ q_stride[0] as u32,
/* k_batch_stride */ k_stride[0] as u32,
/* v_batch_stride */ v_stride[0] as u32,
/* o_batch_stride */ o_stride[0] as u32,
/* alibi_slopes_batch_stride */ 0,
/* q_row_stride */ q_stride[q_rank - 3] as u32,
/* k_row_stride */ k_stride[k_rank - 3] as u32,
/* v_row_stride */ v_stride[v_rank - 3] as u32,
/* o_row_stride */ o_stride[o_rank - 3] as u32,
/* q_head_stride */ q_stride[q_rank - 2] as u32,
/* k_head_stride */ k_stride[k_rank - 2] as u32,
/* v_head_stride */ v_stride[v_rank - 2] as u32,
/* o_head_stride */ o_stride[o_rank - 2] as u32,
/* b */ b_sz as u32,
/* h */ num_heads as u32,
/* h_k */ num_heads_k as u32,
/* d */ head_size as u32,
/* d_rounded */ head_size_rounded as u32,
/* softmax_scale*/ self.softmax_scale,
/* seqlen_q */ seqlen_q as u32,
/* seqlen_k */ seqlen_k as u32,
/* seqlen_q_rounded */ seqlen_q_rounded as u32,
/* seqlen_k_rounded */ seqlen_k_rounded as u32,
/* is_bf16 */ is_bf16,
/* is_causal */ is_causal,
/* upadded_lse */ 0,
/* window_size_left */ window_size_left,
/* window_size_right */ window_size_right,
/* softcap */ self.softcap.unwrap_or(0f32),
)
}
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone());
Ok((dst, out_shape))
}
}
impl candle::CustomOp3 for FlashAttn {
fn name(&self) -> &'static str {
"flash-attn"
}
fn cpu_fwd(
&self,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
) -> Result<(CpuStorage, Shape)> {
candle::bail!("no cpu support for flash-attn")
}
fn cuda_fwd(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
match q.dtype() {
candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false),
candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true),
dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"),
}
}
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttn {
softmax_scale,
alibi_slopes: None,
window_size_left,
window_size_right,
softcap: None,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
) -> Result<Tensor> {
let op = FlashAttn {
softmax_scale,
alibi_slopes: None,
window_size_left,
window_size_right,
softcap: None,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_alibi(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttn {
softmax_scale,
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
softcap: None,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_alibi_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
) -> Result<Tensor> {
let op = FlashAttn {
softmax_scale,
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
softcap: None,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors `k` and `v` with fewer heads
/// than `q`. The number of heads in `k` and `v` must be divisible by the number of heads in `q`.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Optional alibi slopes tensor with shape `(num_heads_q)`.
/// * `softmax_scale` - Scaling factor for the softmax operation.
/// * `window_size_left` - Optional limit on left attention to value tokens.
/// * `window_size_right` - Optional limit on right attention to value tokens.
/// * `softcap` - Gemma style softcap the attention logits before the softmax.
///
/// # Causal Mask
///
/// Setting `window_size_left=None` and `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`.
///
/// # Returns
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_alibi_windowed_softcap(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: Option<&Tensor>,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
softcap: f32,
) -> Result<Tensor> {
let op = FlashAttn {
softmax_scale,
alibi_slopes: alibi_slopes.cloned(),
window_size_left,
window_size_right,
softcap: Some(softcap),
};
q.apply_op3(k, v, op)
}
struct FlashAttnVarLen {
pub softmax_scale: f32,
pub max_seqlen_q: usize,
pub max_seqlen_k: usize,
pub seqlens_q: Tensor,
pub seqlens_k: Tensor,
pub alibi_slopes: Option<Tensor>,
pub window_size_left: Option<usize>,
pub window_size_right: Option<usize>,
pub softcap: Option<f32>,
}
impl FlashAttnVarLen {
fn cuda_fwd_t<
T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr,
>(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
is_bf16: bool,
) -> Result<(candle::CudaStorage, Shape)> {
// https://github.com/Dao-AILab/flash-attention/blob/184b992dcb2a0890adaa19eb9b541c3e4f9d2a08/csrc/flash_attn/flash_api.cpp#L327
let dev = q.device();
let out_shape = q_l.shape().clone();
let out_l = Layout::contiguous(&out_shape);
let (seqlens_q, seqlens_q_layout) = self.seqlens_q.storage_and_layout();
let seqlens_q = match &*seqlens_q {
candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32!
_ => candle::bail!("seqlens_q must be a cuda tensor"),
};
let seqlens_q = match seqlens_q_layout.contiguous_offsets() {
Some((o1, o2)) => seqlens_q.slice(o1..o2),
None => candle::bail!("seqlens_q has to be contiguous"),
};
let (seqlens_k, seqlens_k_layout) = self.seqlens_k.storage_and_layout();
let seqlens_k = match &*seqlens_k {
candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32!
_ => candle::bail!("seqlens_k must be a cuda tensor"),
};
let seqlens_k = match seqlens_k_layout.contiguous_offsets() {
Some((o1, o2)) => seqlens_k.slice(o1..o2),
None => candle::bail!("seqlens_k has to be contiguous"),
};
let q = q.as_cuda_slice::<T>()?;
let k = k.as_cuda_slice::<T>()?;
let v = v.as_cuda_slice::<T>()?;
let q = q.slice(q_l.start_offset()..);
let k = k.slice(k_l.start_offset()..);
let v = v.slice(v_l.start_offset()..);
let q_stride = q_l.stride();
let k_stride = k_l.stride();
let v_stride = v_l.stride();
let o_stride = out_l.stride();
let q_rank = q_stride.len();
let k_rank = k_stride.len();
let v_rank = v_stride.len();
let o_rank = o_stride.len();
if q_rank != 3 || k_rank != 3 || v_rank != 3 {
candle::bail!(
"flash-attn-varlen expects input tensors of rank 3 (q: {q_rank}, k: {k_rank}, v: {v_rank}"
)
}
if q_stride[q_rank - 1] != 1 {
candle::bail!("the last dim of q must be contiguous {q_stride:?}")
}
if k_stride[k_rank - 1] != 1 {
candle::bail!("the last dim of k must be contiguous {k_stride:?}")
}
if v_stride[v_rank - 1] != 1 {
candle::bail!("the last dim of v must be contiguous {v_stride:?}")
}
let (total_q, num_heads, head_size_og) = q_l.shape().dims3()?;
let (total_k, num_heads_k, _head_size_og) = k_l.shape().dims3()?;
let expected_kv = (total_k, num_heads_k, head_size_og);
if expected_kv != k_l.shape().dims3()? {
candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape())
}
if expected_kv != v_l.shape().dims3()? {
candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape())
}
if head_size_og > 256 {
candle::bail!("only supports head dimension at most 256 (got {head_size_og})")
}
if head_size_og % 8 != 0 {
// TODO: Handle head sizes that are not a multiple of 8 via some padding.
candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})")
}
if num_heads % num_heads_k != 0 {
candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}")
}
let nseqlens_q = seqlens_q_layout.shape().dims1()?;
if nseqlens_q < 2 {
candle::bail!("seqlens_q should have a len >= 2 {nseqlens_q}")
}
let nseqlens_k = seqlens_k_layout.shape().dims1()?;
if nseqlens_k != nseqlens_q {
candle::bail!("seqlens_q and seqlens_k should have the same number of elements {nseqlens_q} <> {nseqlens_k}")
}
let batch_size = nseqlens_q - 1;
let stream = dev.cuda_stream();
let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes {
if alibi_slopes.dtype() != DType::F32 {
candle::bail!(
"DType mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes.dtype(),
DType::F32
);
}
let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout();
if num_heads != alibi_slopes_layout.shape().dims1()? {
candle::bail!(
"shape mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes_layout.shape(),
(num_heads)
);
}
let alibi_slopes = match &*alibi_slopes {
candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?,
_ => candle::bail!("alibi_slopes must be a cuda tensor"),
};
let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..);
// Dropping the guard here doesn't seem very safe.
let (ptr, _guard) = alibi_slopes.device_ptr(&stream);
ptr as *const core::ffi::c_void
} else {
std::ptr::null()
};
// if window_size_left > self.max_seqlen_k or None => -1
let mut window_size_left = self
.window_size_left
.filter(|v| v <= &self.max_seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
// if window_size_right > self.max_seqlen_k or None => -1
let mut window_size_right = self
.window_size_right
.filter(|v| v <= &self.max_seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
let head_size = round_multiple(head_size_og, 8);
let head_size_rounded = round_multiple(head_size, 32);
let seqlen_q_rounded = round_multiple(self.max_seqlen_q, 128);
let seqlen_k_rounded = round_multiple(self.max_seqlen_k, 128);
let elem_count = out_shape.elem_count();
let dst = unsafe { dev.alloc::<T>(elem_count)? };
let softmax_lse = dev.alloc_zeros::<f32>(num_heads * total_q)?;
let is_bf16 = if is_bf16 { 1 } else { 0 };
// Causal is the special case where window_size_right == 0 and window_size_left < 0.
// Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
let is_causal = if window_size_left < 0 && window_size_right == 0 {
1
} else {
0
};
if window_size_left < 0 && window_size_right >= 0 {
window_size_left = self.max_seqlen_k as i32;
}
if window_size_left >= 0 && window_size_right < 0 {
window_size_right = self.max_seqlen_k as i32;
}
unsafe {
let (q_ptr, _guard) = q.device_ptr(&stream);
let (k_ptr, _guard) = k.device_ptr(&stream);
let (v_ptr, _guard) = v.device_ptr(&stream);
let (dst_ptr, _guard) = dst.device_ptr(&stream);
let (softmax_lse_ptr, _guard) = softmax_lse.device_ptr(&stream);
let (seqlens_q_ptr, _guard) = seqlens_q.device_ptr(&stream);
let (seqlens_k_ptr, _guard) = seqlens_k.device_ptr(&stream);
ffi::run_mha(
q_ptr as *const core::ffi::c_void,
k_ptr as *const core::ffi::c_void,
v_ptr as *const core::ffi::c_void,
dst_ptr as *const core::ffi::c_void,
softmax_lse_ptr as *const core::ffi::c_void,
/* alibi_slopes_ptr */ alibi_slopes_ptr as *const core::ffi::c_void,
/* cu_seqlens_q_ptr */ seqlens_q_ptr as *const i32,
/* cu_seqlens_k_ptr */ seqlens_k_ptr as *const i32,
/* q_batch_stride */ 0,
/* k_batch_stride */ 0,
/* v_batch_stride */ 0,
/* o_batch_stride */ 0,
/* alibi_slopes_batch_stride */ 0,
/* q_row_stride */ q_stride[q_rank - 3] as u32,
/* k_row_stride */ k_stride[k_rank - 3] as u32,
/* v_row_stride */ v_stride[v_rank - 3] as u32,
/* o_row_stride */ o_stride[o_rank - 3] as u32,
/* q_head_stride */ q_stride[q_rank - 2] as u32,
/* k_head_stride */ k_stride[k_rank - 2] as u32,
/* v_head_stride */ v_stride[v_rank - 2] as u32,
/* o_head_stride */ o_stride[o_rank - 2] as u32,
/* b */ batch_size as u32,
/* h */ num_heads as u32,
/* h_k */ num_heads_k as u32,
/* d */ head_size as u32,
/* d_rounded */ head_size_rounded as u32,
/* softmax_scale*/ self.softmax_scale,
/* seqlen_q */ self.max_seqlen_q as u32,
/* seqlen_k */ self.max_seqlen_k as u32,
/* seqlen_q_rounded */ seqlen_q_rounded as u32,
/* seqlen_k_rounded */ seqlen_k_rounded as u32,
/* is_bf16 */ is_bf16,
/* is_causal */ is_causal,
/* upadded_lse */ 1,
/* window_size_left */ window_size_left,
/* window_size_right */ window_size_right,
/* softcap */ self.softcap.unwrap_or(0.0),
)
}
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone());
Ok((dst, out_shape))
}
}
impl candle::CustomOp3 for FlashAttnVarLen {
fn name(&self) -> &'static str {
"flash-attn-varlen"
}
fn cpu_fwd(
&self,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
) -> Result<(CpuStorage, Shape)> {
candle::bail!("no cpu support for flash-attn")
}
fn cuda_fwd(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
match q.dtype() {
candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false),
candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true),
dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"),
}
}
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v2 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
pub fn flash_attn_varlen(
q: &Tensor,
k: &Tensor,
v: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: None,
window_size_left,
window_size_right,
softcap: None,
};
q.apply_op3(k, v, op)
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v2 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
pub fn flash_attn_varlen_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
) -> Result<Tensor> {
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: None,
window_size_left,
window_size_right,
softcap: None,
};
q.apply_op3(k, v, op)
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v2 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
pub fn flash_attn_varlen_alibi(
q: &Tensor,
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn/src/ffi.rs | candle-flash-attn/src/ffi.rs | use core::ffi::{c_int, c_void};
extern "C" {
pub(crate) fn run_mha(
q_ptr: *const c_void,
k_ptr: *const c_void,
v_ptr: *const c_void,
o_ptr: *const c_void,
softmax_lse_ptr: *const c_void,
alibi_slopes_ptr: *const c_void,
cu_seqlens_q_ptr: *const i32,
cu_seqlens_k_ptr: *const i32,
q_batch_stride: u32,
k_batch_stride: u32,
v_batch_stride: u32,
o_batch_stride: u32,
alibi_slopes_batch_stride: u32,
q_row_stride: u32,
k_row_stride: u32,
v_row_stride: u32,
o_row_stride: u32,
q_head_stride: u32,
k_head_stride: u32,
v_head_stride: u32,
o_head_stride: u32,
b: u32,
h: u32,
h_k: u32,
d: u32,
d_rounded: u32,
softmax_scale: f32,
seqlen_q: u32,
seqlen_k: u32,
seqlen_q_rounded: u32,
seqlen_k_rounded: u32,
is_bf16: c_int,
is_causal: c_int,
unpadded_lse: c_int,
window_size_left: c_int,
window_size_right: c_int,
softcap: f32,
);
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn/tests/flash_attn_tests.rs | candle-flash-attn/tests/flash_attn_tests.rs | use anyhow::Result;
use candle::{DType, Device, IndexOp, Tensor, D};
fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> {
let b = 10f32.powi(digits);
let t = t.to_vec3::<f32>()?;
let t = t
.iter()
.map(|t| {
t.iter()
.map(|t| t.iter().map(|t| f32::round(t * b) / b).collect())
.collect()
})
.collect();
Ok(t)
}
fn fa_acausal(q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32) -> Result<Tensor> {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
let att = (q.matmul(&k.t()?)? * softmax_scale as f64)?;
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let output = att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?;
Ok(output)
}
fn fa_acausal_softcap(q: &Tensor, k: &Tensor, v: &Tensor, softcap: f32) -> Result<Tensor> {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
// let att = (q.matmul(&k.t()?)? * softmax_scale as f64)?;
let att = q.matmul(&k.t()?)?;
let att = (softcap as f64 * ((att / softcap as f64)?.tanh())?)?;
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let output = att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?;
Ok(output)
}
#[test]
fn flash_attn_acausal() -> Result<()> {
let device = Device::new_cuda(0)?;
let q = Tensor::arange(0u32, 48, &device)?
.to_dtype(DType::F16)?
.reshape((1, 3, 2, 8))?;
let k = (&q / 40.)?;
let v = (&q / 50.)?;
let q = (&q / 30.)?;
let ys1 = fa_acausal(&q, &k, &v, 0.5)?;
let ys1 = ys1.i(0)?.to_dtype(DType::F32)?;
let ys2 = {
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
candle_flash_attn::flash_attn(&q, &k, &v, 0.5, false)?.transpose(1, 2)?
};
let ys2 = ys2.i(0)?.to_dtype(DType::F32)?;
let diff = ys1.sub(&ys2)?.abs()?.flatten_all()?.max(0)?;
assert_eq!(ys1.dims(), &[3, 2, 8]);
assert_eq!(
to_vec3_round(ys1, 4)?,
&[
[
[0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238],
[0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322]
],
[
[0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605],
[0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684]
],
[
[0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955],
[0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023]
]
]
);
assert_eq!(ys2.dims(), &[3, 2, 8]);
assert_eq!(
to_vec3_round(ys2, 4)?,
&[
[
[0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238],
[0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322]
],
[
[0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605],
[0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684]
],
[
[0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955],
[0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023]
]
]
);
assert!(diff.to_vec0::<f32>()?.abs() < 1e-5);
Ok(())
}
#[test]
fn flash_attn_acausal_softcap() -> Result<()> {
let device = Device::new_cuda(0)?;
let q = Tensor::arange(0u32, 3 * 5 * 8, &device)?
.to_dtype(DType::F16)?
.reshape((1, 3, 5, 8))?;
let k = (&q / 40.)?;
let v = (&q / 50.)?;
let q = (&q / 30.)?;
let softcap = 5.0f32;
let ys1 = fa_acausal_softcap(&q, &k, &v, softcap.clone())?;
let ys1 = ys1.i(0)?.to_dtype(DType::F32)?;
let ys2 = {
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
candle_flash_attn::flash_attn_alibi_windowed_softcap(
&q,
&k,
&v,
None, // alibi_slopes //
1.0, // softmax //
None, // window_size_left //
None, // window_size_right //
softcap.clone(), // softcap //
)?
.transpose(1, 2)?
};
let ys2 = ys2.i(0)?.to_dtype(DType::F32)?;
let diff = ys1.sub(&ys2)?.abs()?.flatten_all()?.max(0)?;
assert_eq!(ys1.dims(), &[3, 5, 8]);
assert_eq!(ys2.dims(), &[3, 5, 8]);
assert!(diff.to_vec0::<f32>()?.abs() < 1e-3);
Ok(())
}
#[test]
fn flash_attn_varlen() -> Result<()> {
let device = Device::new_cuda(0)?;
let q = Tensor::arange(0u32, 48, &device)?
.to_dtype(DType::F16)?
.reshape((3, 2, 8))?;
let k = (&q / 40.)?;
let v = (&q / 50.)?;
let q = (&q / 30.)?;
let seqlens_q = Tensor::new(&[0u32, 2u32], &device)?;
let seqlens_k = Tensor::new(&[0u32, 2u32], &device)?;
let ys = {
let q = q.transpose(0, 1)?;
let k = k.transpose(0, 1)?;
let v = v.transpose(0, 1)?;
candle_flash_attn::flash_attn_varlen(
&q, &k, &v, &seqlens_q, &seqlens_k, 32, 32, 0.5, false,
)?
.transpose(0, 1)?
};
let ys = ys.to_dtype(DType::F32)?;
assert_eq!(ys.dims(), &[3, 2, 8]);
assert_eq!(
to_vec3_round(ys, 4)?,
&[
[
[0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238],
[0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322]
],
[
[0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605],
[0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684]
],
[
[0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955],
[0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023]
]
]
);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/lib.rs | candle-transformers/src/lib.rs | pub mod fused_moe;
pub mod generation;
pub mod models;
pub mod object_detection;
pub mod pipelines;
pub mod quantized_nn;
pub mod quantized_var_builder;
pub mod utils;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/quantized_nn.rs | candle-transformers/src/quantized_nn.rs | //! Utilities for quanitized network layers
//!
//! This module contains various implementations of standard neural network layers, modules and
//! utilities including embedding, linear layers, and various normalization techniques.
//! Most implementations provide quantized weights support.
use crate::models::with_tracing::QMatMul;
use crate::quantized_var_builder::VarBuilder;
use candle::quantized::QTensor;
use candle::{Module, Result, Tensor};
#[derive(Debug, Clone)]
pub struct Embedding {
inner: candle_nn::Embedding,
span: tracing::Span,
}
impl Embedding {
pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> {
let embeddings = vb.get((d1, d2), "weight")?.dequantize(vb.device())?;
let inner = candle_nn::Embedding::new(embeddings, d2);
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn embeddings(&self) -> &Tensor {
self.inner.embeddings()
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
pub struct Linear {
weight: QMatMul,
bias: Option<Tensor>,
}
impl Linear {
pub fn from_arc(weight: std::sync::Arc<QTensor>, bias: Option<Tensor>) -> Result<Self> {
let weight = QMatMul::from_weights(weight)?;
Ok(Self { weight, bias })
}
pub fn from_weights(weight: QMatMul, bias: Option<Tensor>) -> Self {
Self { weight, bias }
}
}
impl Module for Linear {
fn forward(&self, x: &Tensor) -> candle::Result<Tensor> {
let x = x.apply(&self.weight)?;
match &self.bias {
None => Ok(x),
Some(bias) => x.broadcast_add(bias),
}
}
}
pub fn linear_b(in_dim: usize, out_dim: usize, bias: bool, vb: VarBuilder) -> Result<Linear> {
let bias = if bias {
Some(vb.get(out_dim, "bias")?.dequantize(vb.device())?)
} else {
None
};
let weight = QMatMul::new(in_dim, out_dim, vb)?;
Ok(Linear { weight, bias })
}
pub fn linear(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> {
let bias = vb.get(out_dim, "bias")?.dequantize(vb.device())?;
let weight = QMatMul::new(in_dim, out_dim, vb)?;
Ok(Linear {
weight,
bias: Some(bias),
})
}
pub fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> {
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
let bias = vb.get(size, "bias")?.dequantize(vb.device())?;
Ok(candle_nn::LayerNorm::new(weight, bias, eps))
}
pub fn layer_norm_no_bias(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> {
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
Ok(candle_nn::LayerNorm::new_no_bias(weight, eps))
}
pub fn linear_no_bias(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> {
let weight = QMatMul::new(in_dim, out_dim, vb)?;
Ok(Linear { weight, bias: None })
}
#[derive(Debug, Clone)]
pub struct RmsNorm {
weight: Tensor,
eps: f64,
span: tracing::Span,
}
impl RmsNorm {
pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
Ok(Self { weight, eps, span })
}
pub fn from_qtensor(weight: QTensor, eps: f64) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let weight = weight.dequantize(&weight.device())?;
Ok(Self { weight, eps, span })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
candle_nn::ops::rms_norm(x, &self.weight, self.eps as f32)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/fused_moe.rs | candle-transformers/src/fused_moe.rs | // Adapted from: https://github.com/guoqingbao/vllm.rs/blob/main/src/models/layers/moe.rs
use candle::Module;
use candle::{quantized::QTensor, DType, Result, Tensor, D};
use candle_nn::{linear_no_bias, moe, Activation, Linear, VarBuilder};
use std::sync::Arc;
pub struct MoeCfg {
pub hidden_size: usize,
pub num_experts: usize,
pub num_experts_per_tok: usize,
pub moe_intermediate_size: usize,
pub norm_topk_prob: bool,
pub act: Activation,
pub decoder_sparse_step: Option<usize>,
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct FusedMoe {
gate: Linear,
gate_up_w: Tensor,
down_w: Tensor,
w_size_n: usize,
act: Activation,
norm_topk_prob: bool,
num_experts_per_tok: usize,
// world_size: usize,
dtype: DType,
}
impl FusedMoe {
pub fn new(cfg: &MoeCfg, vb: VarBuilder, dtype: DType) -> Result<Self> {
let num_experts = cfg.num_experts;
let gate = linear_no_bias(cfg.hidden_size, num_experts, vb.pp("gate"))?;
let experts_vb = vb.pp("experts");
let mut gate_up_experts = Vec::with_capacity(num_experts);
let mut down_experts = Vec::with_capacity(num_experts);
//pack experts
for i in 0..num_experts {
let experts_vb = experts_vb.pp(format!("{i}").as_str());
let (gate_up_expert, down_expert) = {
// n x k format
let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL;
let gate_expert = experts_vb.pp("gate_proj").get_with_hints(
(cfg.moe_intermediate_size, cfg.hidden_size),
"weight",
init_ws,
)?;
let up_expert = experts_vb.pp("up_proj").get_with_hints(
(cfg.moe_intermediate_size, cfg.hidden_size),
"weight",
init_ws,
)?;
let down_expert = experts_vb.pp("down_proj").get_with_hints(
(cfg.hidden_size, cfg.moe_intermediate_size),
"weight",
init_ws,
)?;
//pack gate_proj and up_proj
let gate_up_expert = Tensor::cat(&[&gate_expert, &up_expert], 0)?;
(gate_up_expert, down_expert)
};
gate_up_experts.push(gate_up_expert);
down_experts.push(down_expert);
}
let gate_up_w = Tensor::stack(&gate_up_experts, 0)?;
let down_w = Tensor::stack(&down_experts, 0)?;
// let world_size = comm.world_size();
let w_size_n = gate_up_w.dim(1)? / 2;
Ok(Self {
gate,
gate_up_w,
down_w,
w_size_n,
act: cfg.act,
norm_topk_prob: cfg.norm_topk_prob,
num_experts_per_tok: cfg.num_experts_per_tok,
// world_size,
dtype,
})
}
pub fn forward(&self, xs: &Tensor, is_prefill: bool) -> Result<Tensor> {
let (batch, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let (num_tokens, hidden_dim) = xs.dims2()?;
let router_logits = self.gate.forward(&xs)?;
let routing_weights =
candle_nn::ops::softmax_last_dim(&router_logits.to_dtype(DType::F32)?)?;
let topk_ids = routing_weights
.arg_sort_last_dim(false)?
.narrow(D::Minus1, 0, self.num_experts_per_tok)?
.contiguous()?;
let mut topk_weights = routing_weights.gather(&topk_ids, D::Minus1)?;
if self.norm_topk_prob {
topk_weights = topk_weights.broadcast_div(&topk_weights.sum_keepdim(D::Minus1)?)?;
}
let (expert_ids, sorted_token_ids) = if is_prefill {
// For long-context (32K+), need to use custom sort kernel
// #[cfg(feature = "cuda")]
// {
// use attention_rs::sort::ArgSortOp;
// topk_ids.flatten_all()?.sort(true)?
// }
// #[cfg(not(feature = "cuda"))]
topk_ids.flatten_all()?.sort_last_dim(true)?
} else {
topk_ids.flatten_all()?.sort_last_dim(true)?
};
//out (M, top_k, N)
let gate_up = moe::moe_gemm(
&xs,
&self.gate_up_w,
&None,
&sorted_token_ids,
&expert_ids,
self.num_experts_per_tok,
is_prefill,
)?;
let gate = gate_up
.narrow(candle::D::Minus1, 0, self.w_size_n)?
.contiguous()?;
let up = gate_up
.narrow(candle::D::Minus1, self.w_size_n, self.w_size_n)?
.contiguous()?;
//(M * top_k, N // 2)
let down_inputs = (up * gate.apply(&self.act)?)?.reshape(((), self.w_size_n))?;
//view(M, top_k, K) -> sum -> (M, K)
let ys = moe::moe_gemm(
&down_inputs,
&self.down_w,
&Some(topk_weights),
&sorted_token_ids,
&expert_ids,
self.num_experts_per_tok,
is_prefill,
)?
.reshape((num_tokens, (), hidden_dim))?
.sum(D::Minus2)?;
ys.reshape((batch, seq_len, hidden_dim))
}
}
pub struct FusedMoeGGUF {
pub gate: Linear,
pub gate_experts: Arc<QTensor>,
pub up_experts: Arc<QTensor>,
pub down_experts: Arc<QTensor>,
pub act: Activation,
pub norm_topk_prob: bool,
pub num_experts_per_tok: usize,
// all_reduce: AllReduce,
// world_size: usize,
pub dtype: DType,
}
impl FusedMoeGGUF {
pub fn new(
cfg: &MoeCfg,
vb: crate::quantized_var_builder::VarBuilder,
dtype: DType,
) -> Result<Self> {
let num_experts = cfg.num_experts;
let gate_ws = vb
.pp("ffn_gate_inp")
.get((num_experts, cfg.hidden_size), "weight")?
.dequantize(vb.device())?
.to_dtype(DType::F32)?;
let gate = Linear::new(gate_ws, None);
let (gate_experts, up_experts, down_experts) = {
(
vb.pp("ffn_gate_exps").get(
(num_experts, cfg.moe_intermediate_size, cfg.hidden_size),
"weight",
)?,
vb.pp("ffn_up_exps").get(
(num_experts, cfg.moe_intermediate_size, cfg.hidden_size),
"weight",
)?,
vb.pp("ffn_down_exps").get(
(num_experts, cfg.hidden_size, cfg.moe_intermediate_size),
"weight",
)?,
)
};
Ok(Self {
gate,
gate_experts,
up_experts,
down_experts,
act: cfg.act,
norm_topk_prob: cfg.norm_topk_prob,
num_experts_per_tok: cfg.num_experts_per_tok,
// all_reduce: AllReduce::new(comm),
// world_size: 1,
dtype,
})
}
pub fn forward(&self, xs: &Tensor, is_prefill: bool) -> Result<Tensor> {
let (batch, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let (num_tokens, hidden_dim) = xs.dims2()?;
let original_dtype = xs.dtype();
let xs = if xs.dtype() != DType::F32 {
xs.to_dtype(DType::F32)?
} else {
xs.to_owned()
};
let router_logits = self.gate.forward(&xs)?;
let routing_weights =
candle_nn::ops::softmax_last_dim(&router_logits.to_dtype(DType::F32)?)?;
let topk_ids = routing_weights
.arg_sort_last_dim(false)?
.narrow(D::Minus1, 0, self.num_experts_per_tok)?
.contiguous()?;
let mut topk_weights = routing_weights.gather(&topk_ids, D::Minus1)?;
if self.norm_topk_prob {
topk_weights = topk_weights.broadcast_div(&topk_weights.sum_keepdim(D::Minus1)?)?;
}
let (expert_ids, sorted_token_ids) = if is_prefill {
// For long-context (32K+), need to use custom sort kernel
// #[cfg(feature = "cuda")]
// {
// use attention_rs::sort::ArgSortOp;
// topk_ids.flatten_all()?.sort(true)?
// }
// #[cfg(not(feature = "cuda"))]
topk_ids.flatten_all()?.sort_last_dim(true)?
} else {
topk_ids.flatten_all()?.sort_last_dim(true)?
};
let ys = {
let gate = moe::moe_gemm_gguf(
&xs,
&self.gate_experts,
&None,
&sorted_token_ids,
&expert_ids,
self.num_experts_per_tok,
is_prefill,
self.dtype,
)?;
let up = moe::moe_gemm_gguf(
&xs,
&self.up_experts,
&None,
&sorted_token_ids,
&expert_ids,
self.num_experts_per_tok,
is_prefill,
self.dtype,
)?;
let down_inputs = (up * gate.apply(&self.act)?)?;
moe::moe_gemm_gguf(
&down_inputs,
&self.down_experts,
&Some(topk_weights),
&sorted_token_ids,
&expert_ids,
self.num_experts_per_tok,
is_prefill,
self.dtype,
)?
};
let mut ys = ys.reshape((num_tokens, (), hidden_dim))?.sum(D::Minus2)?;
if ys.dtype() != original_dtype {
ys = ys.to_dtype(original_dtype)?;
}
ys.reshape((batch, seq_len, hidden_dim))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/object_detection.rs | candle-transformers/src/object_detection.rs | //! Bounding Boxes and Intersection
//!
//! This module provides functionality for handling bounding boxes and their manipulation,
//! particularly in the context of object detection. It includes tools for calculating
//! intersection over union (IoU) and non-maximum suppression (NMS).
/// A bounding box around an object.
#[derive(Debug, Clone)]
pub struct Bbox<D> {
pub xmin: f32,
pub ymin: f32,
pub xmax: f32,
pub ymax: f32,
pub confidence: f32,
pub data: D,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct KeyPoint {
pub x: f32,
pub y: f32,
pub mask: f32,
}
/// Intersection over union of two bounding boxes.
pub fn iou<D>(b1: &Bbox<D>, b2: &Bbox<D>) -> f32 {
let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.);
let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.);
let i_xmin = b1.xmin.max(b2.xmin);
let i_xmax = b1.xmax.min(b2.xmax);
let i_ymin = b1.ymin.max(b2.ymin);
let i_ymax = b1.ymax.min(b2.ymax);
let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.);
i_area / (b1_area + b2_area - i_area)
}
pub fn non_maximum_suppression<D>(bboxes: &mut [Vec<Bbox<D>>], threshold: f32) {
// Perform non-maximum suppression.
for bboxes_for_class in bboxes.iter_mut() {
bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap());
let mut current_index = 0;
for index in 0..bboxes_for_class.len() {
let mut drop = false;
for prev_index in 0..current_index {
let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]);
if iou > threshold {
drop = true;
break;
}
}
if !drop {
bboxes_for_class.swap(current_index, index);
current_index += 1;
}
}
bboxes_for_class.truncate(current_index);
}
}
// Updates confidences starting at highest and comparing subsequent boxes.
fn update_confidences<D>(
bboxes_for_class: &[Bbox<D>],
updated_confidences: &mut [f32],
iou_threshold: f32,
sigma: f32,
) {
let len = bboxes_for_class.len();
for current_index in 0..len {
let current_bbox = &bboxes_for_class[current_index];
for index in (current_index + 1)..len {
let iou_val = iou(current_bbox, &bboxes_for_class[index]);
if iou_val > iou_threshold {
// Decay calculation from page 4 of: https://arxiv.org/pdf/1704.04503
let decay = (-iou_val * iou_val / sigma).exp();
let updated_confidence = bboxes_for_class[index].confidence * decay;
updated_confidences[index] = updated_confidence;
}
}
}
}
// Sorts the bounding boxes by confidence and applies soft non-maximum suppression.
// This function is based on the algorithm described in https://arxiv.org/pdf/1704.04503
pub fn soft_non_maximum_suppression<D>(
bboxes: &mut [Vec<Bbox<D>>],
iou_threshold: Option<f32>,
confidence_threshold: Option<f32>,
sigma: Option<f32>,
) {
let iou_threshold = iou_threshold.unwrap_or(0.5);
let confidence_threshold = confidence_threshold.unwrap_or(0.1);
let sigma = sigma.unwrap_or(0.5);
for bboxes_for_class in bboxes.iter_mut() {
// Sort boxes by confidence in descending order
bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap());
let mut updated_confidences = bboxes_for_class
.iter()
.map(|bbox| bbox.confidence)
.collect::<Vec<_>>();
update_confidences(
bboxes_for_class,
&mut updated_confidences,
iou_threshold,
sigma,
);
// Update confidences, set to 0.0 if below threshold
for (i, &confidence) in updated_confidences.iter().enumerate() {
bboxes_for_class[i].confidence = if confidence < confidence_threshold {
0.0
} else {
confidence
};
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/utils.rs | candle-transformers/src/utils.rs | //! Apply penalty and repeat_kv
use candle::{Result, Tensor};
pub fn apply_repeat_penalty(logits: &Tensor, penalty: f32, context: &[u32]) -> Result<Tensor> {
let device = logits.device();
let mut logits = logits.to_dtype(candle::DType::F32)?.to_vec1::<f32>()?;
let mut already_seen = std::collections::HashSet::new();
for token_id in context {
if already_seen.contains(token_id) {
continue;
}
already_seen.insert(token_id);
if let Some(logit) = logits.get_mut(*token_id as usize) {
if *logit >= 0. {
*logit /= penalty
} else {
*logit *= penalty
}
}
}
let logits_len = logits.len();
Tensor::from_vec(logits, logits_len, device)
}
/// Repeats a key or value tensor for grouped query attention
/// The input tensor should have a shape `(batch, num_kv_heads, seq_len, head_dim)`,
pub fn repeat_kv(xs: Tensor, n_rep: usize) -> Result<Tensor> {
if n_rep == 1 {
Ok(xs)
} else {
let (b_sz, n_kv_head, seq_len, head_dim) = xs.dims4()?;
// Using cat is faster than a broadcast as it avoids going through a potentially
// strided copy.
// https://github.com/huggingface/candle/pull/2043
Tensor::cat(&vec![&xs; n_rep], 2)?.reshape((b_sz, n_kv_head * n_rep, seq_len, head_dim))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/quantized_var_builder.rs | candle-transformers/src/quantized_var_builder.rs | //! Varbuilder for Loading gguf files
//!
//! VarBuilder is a utility to store quantized tensors from a [GGUF model file](https://huggingface.co/docs/hub/gguf).
//! These tensors can be loaded from disk using `from_gguf` or from an in-memory
//! buffer using `from_gguf_buffer`.
use candle::quantized::QTensor;
use candle::{Device, Result, Shape};
use std::sync::Arc;
// VarBuilder specialized for QTensors
#[derive(Clone)]
pub struct VarBuilder {
data: Arc<std::collections::HashMap<String, Arc<QTensor>>>,
path: Vec<String>,
device: Device,
}
impl VarBuilder {
pub fn from_gguf<P: AsRef<std::path::Path>>(p: P, device: &Device) -> Result<Self> {
let mut file = std::fs::File::open(p)?;
let content = candle::quantized::gguf_file::Content::read(&mut file)?;
let mut data = std::collections::HashMap::new();
for tensor_name in content.tensor_infos.keys() {
let tensor = content.tensor(&mut file, tensor_name, device)?;
data.insert(tensor_name.to_string(), Arc::new(tensor));
}
Ok(Self {
data: Arc::new(data),
path: Vec::new(),
device: device.clone(),
})
}
pub fn from_gguf_buffer(buffer: &[u8], device: &Device) -> Result<Self> {
let mut cursor = std::io::Cursor::new(buffer);
let content = candle::quantized::gguf_file::Content::read(&mut cursor)?;
let mut data = std::collections::HashMap::new();
for tensor_name in content.tensor_infos.keys() {
let tensor = content.tensor(&mut cursor, tensor_name, device)?;
data.insert(tensor_name.to_string(), Arc::new(tensor));
}
Ok(Self {
data: Arc::new(data),
path: Vec::new(),
device: device.clone(),
})
}
pub fn pp<S: ToString>(&self, s: S) -> Self {
let mut path = self.path.clone();
path.push(s.to_string());
Self {
data: self.data.clone(),
path,
device: self.device.clone(),
}
}
fn path(&self, tensor_name: &str) -> String {
if self.path.is_empty() {
tensor_name.to_string()
} else {
[&self.path.join("."), tensor_name].join(".")
}
}
pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Arc<QTensor>> {
let path = self.path(name);
match self.data.get(&path) {
None => {
candle::bail!("cannot find tensor {path}")
}
Some(qtensor) => {
let shape = s.into();
if qtensor.shape() != &shape {
candle::bail!(
"shape mismatch for {name}, got {:?}, expected {shape:?}",
qtensor.shape()
)
}
Ok(qtensor.clone())
}
}
}
pub fn get_no_shape(&self, name: &str) -> Result<Arc<QTensor>> {
let path = self.path(name);
match self.data.get(&path) {
None => {
candle::bail!("cannot find tensor {name}")
}
Some(qtensor) => Ok(qtensor.clone()),
}
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn contains_key(&self, key: &str) -> bool {
self.data.contains_key(key)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mobileclip.rs | candle-transformers/src/models/mobileclip.rs | //! Mobile CLIP model, combining a lightweight vision encoder with a text encoder
//!
//! A mobile-optimized CLIP implementation that uses:
//! - FastViT as the vision encoder
//! - OpenCLIP text encoder
//! - Projection layers to align the feature spaces
//!
//! See model details at:
//! - [FastViT](https://arxiv.org/abs/2303.14189)
//! - [OpenCLIP](https://github.com/mlfoundations/open_clip)
//!
//! References:
//! - [MobileVLM](https://huggingface.co/mobileVLM)
//! - [MetaCLIP](https://arxiv.org/abs/2309.16671)
//!
use super::fastvit;
use super::openclip::text_model;
use candle::{Result, Tensor, D};
use candle_nn::{Func, VarBuilder};
#[derive(Clone, Debug)]
pub struct MobileClipModel {
text_model: text_model::OpenClipTextTransformer,
vision_model: Func<'static>,
text_projection: Tensor,
logit_scale: Tensor,
}
#[derive(Clone, Debug)]
pub struct MobileClipConfig {
pub text_config: text_model::Config,
pub vision_config: fastvit::Config,
pub image_size: usize,
}
impl MobileClipConfig {
pub fn s1() -> Self {
let text_config = text_model::Config::vit_base_patch32();
let vision_config = fastvit::Config::mci1();
Self {
text_config,
vision_config,
image_size: 256,
}
}
pub fn s2() -> Self {
let text_config = text_model::Config::vit_base_patch32();
let vision_config = fastvit::Config::mci2();
Self {
text_config,
vision_config,
image_size: 256,
}
}
}
impl MobileClipModel {
pub fn new(vs: VarBuilder, c: &MobileClipConfig) -> Result<Self> {
let vision_model = fastvit::fastvit(&c.vision_config, 512, vs.pp("visual.trunk"))?;
let text_model = text_model::OpenClipTextTransformer::new(vs.pp("text"), &c.text_config)?;
let text_projection = vs.get(
(c.text_config.embed_dim, c.text_config.projection_dim),
"text.text_projection",
)?;
let logit_scale = vs.get(&[], "logit_scale")?;
Ok(Self {
text_model,
vision_model,
text_projection,
logit_scale,
})
}
pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> {
input_ids
.apply(&self.text_model)?
.matmul(&self.text_projection)
}
pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> {
pixel_values.apply(&self.vision_model)
}
pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> {
let image_features = self.get_image_features(pixel_values)?;
let text_features = self.get_text_features(input_ids)?;
let image_features_normalized = div_l2_norm(&image_features)?;
let text_features_normalized = div_l2_norm(&text_features)?;
let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?;
let logit_scale = self.logit_scale.exp()?;
let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?;
let logits_per_image = logits_per_text.t()?;
Ok((logits_per_text, logits_per_image))
}
}
pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> {
let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?;
v.broadcast_div(&l2_norm)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/with_tracing.rs | candle-transformers/src/models/with_tracing.rs | use candle::{Module, Result, Tensor};
use candle_nn::VarBuilder;
#[derive(Debug, Clone)]
pub struct Embedding {
inner: candle_nn::Embedding,
span: tracing::Span,
}
impl Embedding {
pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> {
let inner = candle_nn::embedding(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn from_weights(weights: Tensor) -> Result<Self> {
let (_in_size, out_size) = weights.dims2()?;
let inner = candle_nn::Embedding::new(weights, out_size);
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn embeddings(&self) -> &Tensor {
self.inner.embeddings()
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
pub struct Linear {
inner: candle_nn::Linear,
span: tracing::Span,
}
impl Linear {
pub fn from_weights(weights: Tensor, bias: Option<Tensor>) -> Self {
let inner = candle_nn::Linear::new(weights, bias);
let span = tracing::span!(tracing::Level::TRACE, "linear");
Self { inner, span }
}
}
pub fn linear_b(d1: usize, d2: usize, b: bool, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear_b(d1, d2, b, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
pub fn linear(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear_no_bias(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
impl Module for Linear {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
// Wrap the conv2d op to provide some tracing.
#[derive(Debug, Clone)]
pub struct Conv2d {
inner: candle_nn::Conv2d,
span: tracing::Span,
}
impl Module for Conv2d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(x)
}
}
pub fn conv2d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: candle_nn::Conv2dConfig,
vs: candle_nn::VarBuilder,
) -> Result<Conv2d> {
let span = tracing::span!(tracing::Level::TRACE, "conv2d");
let inner = candle_nn::conv2d(in_channels, out_channels, kernel_size, cfg, vs)?;
Ok(Conv2d { inner, span })
}
// QMatMul wrapper adding some tracing.
#[derive(Clone)]
pub struct QMatMul {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QMatMul {
pub fn new(
out_dim: usize,
in_dim: usize,
vb: crate::quantized_var_builder::VarBuilder,
) -> Result<Self> {
let ws = vb.get((in_dim, out_dim), "weight")?;
let inner = candle::quantized::QMatMul::from_arc(ws)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
pub fn from_weights(ws: std::sync::Arc<candle::quantized::QTensor>) -> Result<Self> {
let inner = candle::quantized::QMatMul::from_arc(ws)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
}
impl Module for QMatMul {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
impl std::fmt::Debug for QMatMul {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "QMatMul")
}
}
#[derive(Clone, Debug)]
pub struct LayerNorm {
inner: candle_nn::LayerNorm,
span: tracing::Span,
}
impl LayerNorm {
pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self {
let inner = candle_nn::LayerNorm::new(weight, bias, eps);
let span = tracing::span!(tracing::Level::TRACE, "layer-norm");
Self { inner, span }
}
}
impl Module for LayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
pub fn layer_norm<C: Into<candle_nn::LayerNormConfig>>(
size: usize,
c: C,
vb: VarBuilder,
) -> Result<LayerNorm> {
let inner = candle_nn::layer_norm(size, c, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "layer-norm");
Ok(LayerNorm { inner, span })
}
#[derive(Debug, Clone)]
pub struct RmsNorm {
inner: candle_nn::RmsNorm,
span: tracing::Span,
}
impl RmsNorm {
pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let inner = candle_nn::rms_norm(size, eps, vb)?;
Ok(Self { inner, span })
}
pub fn forward_diff(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward_diff(x)
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(x)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/llama.rs | candle-transformers/src/models/llama.rs | //! Llama inference implementation.
//!
//! See ["LLaMA: Open and Efficient Foundation Language Models"](https://arxiv.org/abs/2302.13971)
//!
//! Implementation based on Hugging Face's [transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py)
use super::with_tracing::{linear_no_bias as linear, Linear, RmsNorm};
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use std::{collections::HashMap, f32::consts::PI};
pub const DEFAULT_MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub enum Llama3RopeType {
#[serde(rename = "llama3")]
Llama3,
#[default]
#[serde(rename = "default")]
Default,
}
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub struct Llama3RopeConfig {
pub factor: f32,
pub low_freq_factor: f32,
pub high_freq_factor: f32,
pub original_max_position_embeddings: usize,
pub rope_type: Llama3RopeType,
}
#[derive(Debug, Clone, serde::Deserialize)]
#[serde(untagged)]
pub enum LlamaEosToks {
Single(u32),
Multiple(Vec<u32>),
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct LlamaConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: Option<usize>,
pub rms_norm_eps: f64,
#[serde(default = "default_rope")]
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<LlamaEosToks>,
pub rope_scaling: Option<Llama3RopeConfig>,
pub max_position_embeddings: usize,
pub tie_word_embeddings: Option<bool>,
}
impl LlamaConfig {
pub fn num_key_value_heads(&self) -> usize {
self.num_key_value_heads.unwrap_or(self.num_attention_heads)
}
}
fn default_rope() -> f32 {
10_000.0
}
impl LlamaConfig {
pub fn into_config(self, use_flash_attn: bool) -> Config {
Config {
hidden_size: self.hidden_size,
intermediate_size: self.intermediate_size,
vocab_size: self.vocab_size,
num_hidden_layers: self.num_hidden_layers,
num_attention_heads: self.num_attention_heads,
num_key_value_heads: self.num_key_value_heads(),
rms_norm_eps: self.rms_norm_eps,
rope_theta: self.rope_theta,
use_flash_attn,
bos_token_id: self.bos_token_id,
eos_token_id: self.eos_token_id,
rope_scaling: self.rope_scaling,
max_position_embeddings: self.max_position_embeddings,
tie_word_embeddings: self.tie_word_embeddings.unwrap_or(false),
}
}
}
#[derive(Debug, Clone)]
pub struct Config {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub use_flash_attn: bool,
pub rms_norm_eps: f64,
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<LlamaEosToks>,
pub rope_scaling: Option<Llama3RopeConfig>,
pub max_position_embeddings: usize,
pub tie_word_embeddings: bool,
}
impl Config {
pub fn config_7b_v1(use_flash_attn: bool) -> Self {
Self {
hidden_size: 4096,
intermediate_size: 11008,
vocab_size: 32000,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 32,
use_flash_attn,
rms_norm_eps: 1e-6,
rope_theta: 10_000.0,
bos_token_id: None,
eos_token_id: None,
rope_scaling: None,
max_position_embeddings: DEFAULT_MAX_SEQ_LEN,
tie_word_embeddings: false,
}
}
pub fn config_7b_v2(use_flash_attn: bool) -> Self {
Self {
hidden_size: 4096,
intermediate_size: 11008,
vocab_size: 32000,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 32,
use_flash_attn,
rms_norm_eps: 1e-5,
rope_theta: 10_000.0,
bos_token_id: None,
eos_token_id: None,
rope_scaling: None,
max_position_embeddings: DEFAULT_MAX_SEQ_LEN,
tie_word_embeddings: false,
}
}
}
#[derive(Debug, Clone)]
pub struct Cache {
masks: HashMap<usize, Tensor>,
pub use_kv_cache: bool,
kvs: Vec<Option<(Tensor, Tensor)>>,
cos: Tensor,
sin: Tensor,
device: Device,
}
fn calculate_default_inv_freq(cfg: &Config) -> Vec<f32> {
let head_dim = cfg.hidden_size / cfg.num_attention_heads;
(0..head_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / head_dim as f32))
.collect()
}
impl Cache {
pub fn new(use_kv_cache: bool, dtype: DType, config: &Config, device: &Device) -> Result<Self> {
// precompute freqs_cis
let theta = match &config.rope_scaling {
None
| Some(Llama3RopeConfig {
rope_type: Llama3RopeType::Default,
..
}) => calculate_default_inv_freq(config),
Some(rope_scaling) => {
let low_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.low_freq_factor;
let high_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.high_freq_factor;
calculate_default_inv_freq(config)
.into_iter()
.map(|freq| {
let wavelen = 2. * PI / freq;
if wavelen < high_freq_wavelen {
freq
} else if wavelen > low_freq_wavelen {
freq / rope_scaling.factor
} else {
let smooth = (rope_scaling.original_max_position_embeddings as f32
/ wavelen
- rope_scaling.low_freq_factor)
/ (rope_scaling.high_freq_factor - rope_scaling.low_freq_factor);
(1. - smooth) * freq / rope_scaling.factor + smooth * freq
}
})
.collect::<Vec<_>>()
}
};
let theta = Tensor::new(theta, device)?;
let idx_theta = Tensor::arange(0, config.max_position_embeddings as u32, device)?
.to_dtype(DType::F32)?
.reshape((config.max_position_embeddings, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
// This is different from the paper, see:
// https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112
let cos = idx_theta.cos()?.to_dtype(dtype)?;
let sin = idx_theta.sin()?.to_dtype(dtype)?;
Ok(Self {
masks: HashMap::new(),
use_kv_cache,
kvs: vec![None; config.num_hidden_layers],
device: device.clone(),
cos,
sin,
})
}
fn mask(&mut self, t: usize) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), &self.device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
}
#[derive(Debug, Clone)]
struct CausalSelfAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: usize,
use_flash_attn: bool,
span: tracing::Span,
span_rot: tracing::Span,
max_position_embeddings: usize,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl CausalSelfAttention {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _, seq_len, _hidden_size) = x.dims4()?;
let cos = cache.cos.narrow(0, index_pos, seq_len)?;
let sin = cache.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(x, &cos, &sin)
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seq_len, hidden_size) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let mut v = v
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let q = self.apply_rotary_emb(&q, index_pos, cache)?;
let mut k = self.apply_rotary_emb(&k, index_pos, cache)?;
if cache.use_kv_cache {
if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] {
k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?;
let k_seq_len = k.dims()[1];
if k_seq_len > self.max_position_embeddings {
k = k
.narrow(
D::Minus1,
k_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
let v_seq_len = v.dims()[1];
if v_seq_len > 2 * self.max_position_embeddings {
v = v
.narrow(
D::Minus1,
v_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
}
cache.kvs[block_idx] = Some((k.clone(), v.clone()))
}
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let y = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?.transpose(1, 2)?
} else {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if seq_len == 1 {
att
} else {
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
masked_fill(&att, &mask, f32::NEG_INFINITY)?
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, hidden_size])?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
crate::utils::repeat_kv(x, self.num_attention_heads / self.num_key_value_heads)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let size_in = cfg.hidden_size;
let size_q = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_attention_heads;
let size_kv = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_key_value_heads;
let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?;
let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?;
let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?;
let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_attention_heads: cfg.num_attention_heads,
num_key_value_heads: cfg.num_key_value_heads,
head_dim: cfg.hidden_size / cfg.num_attention_heads,
use_flash_attn: cfg.use_flash_attn,
span,
span_rot,
max_position_embeddings: cfg.max_position_embeddings,
})
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct Mlp {
c_fc1: Linear,
c_fc2: Linear,
c_proj: Linear,
span: tracing::Span,
}
impl Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let x = (candle_nn::ops::silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?;
self.c_proj.forward(&x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "mlp");
let h_size = cfg.hidden_size;
let i_size = cfg.intermediate_size;
let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?;
let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?;
let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?;
Ok(Self {
c_fc1,
c_fc2,
c_proj,
span,
})
}
}
#[derive(Debug, Clone)]
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
mlp: Mlp,
span: tracing::Span,
}
impl Block {
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = x;
let x = self.rms_1.forward(x)?;
let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?;
let residual = &x;
let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "block");
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?;
let mlp = Mlp::load(vb.pp("mlp"), cfg)?;
let rms_1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let rms_2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
rms_1,
attn,
rms_2,
mlp,
span,
})
}
}
#[derive(Debug, Clone)]
pub struct Llama {
wte: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
lm_head: Linear,
}
impl Llama {
// required by LLaVA
pub fn embed(&self, x: &Tensor) -> Result<Tensor> {
self.wte.forward(x)
}
// required by LLaVA
pub fn forward_input_embed(
&self,
input_embed: &Tensor,
index_pos: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let (_, seq_len, _) = input_embed.dims3()?;
let mut x = input_embed.clone();
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?.contiguous()?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mut x = self.wte.forward(x)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?.contiguous()?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wte = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(wte.embeddings().clone(), None)
} else {
linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.num_hidden_layers)
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cfg).unwrap())
.collect();
Ok(Self {
wte,
blocks,
ln_f,
lm_head,
})
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/granitemoehybrid.rs | candle-transformers/src/models/granitemoehybrid.rs | //! GraniteMoeHybrid is a Long Context Transformer Language Model.
//!
//! A high performance transformer model optimized for efficient processing
//! of very long context sequences
use super::with_tracing::{linear_no_bias as linear, Linear, RmsNorm};
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use std::iter::repeat_n;
use std::{collections::HashMap, f32::consts::PI};
pub const DEFAULT_MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub enum GraniteMoeHybridRopeType {
#[serde(rename = "granite")]
Granite,
#[default]
#[serde(rename = "default")]
Default,
}
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub struct GraniteMoeHybridRopeConfig {
pub factor: f32,
pub low_freq_factor: f32,
pub high_freq_factor: f32,
pub original_max_position_embeddings: usize,
pub rope_type: GraniteMoeHybridRopeType,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct GraniteMoeHybridConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: Option<usize>,
pub rms_norm_eps: f64,
#[serde(default = "default_rope")]
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<u32>,
pub rope_scaling: Option<GraniteMoeHybridRopeConfig>,
pub max_position_embeddings: usize,
#[serde(default)]
pub layer_types: Vec<GraniteMoeHybridLayerType>,
#[serde(default = "default_one")]
pub attention_multiplier: f32,
#[serde(default = "default_one")]
pub embedding_multiplier: f32,
#[serde(default = "default_one")]
pub residual_multiplier: f32,
#[serde(default = "default_one")]
pub logits_scaling: f32,
#[serde(default)]
pub shared_intermediate_size: Option<usize>,
}
impl GraniteMoeHybridConfig {
pub fn num_key_value_heads(&self) -> usize {
self.num_key_value_heads.unwrap_or(self.num_attention_heads)
}
}
fn default_rope() -> f32 {
10_000.0
}
fn default_one() -> f32 {
1.0
}
#[derive(Debug, Clone, serde::Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum GraniteMoeHybridLayerType {
#[default]
Attention,
Mamba,
}
impl GraniteMoeHybridConfig {
pub fn into_config(self, use_flash_attn: bool) -> GraniteMoeHybridInternalConfig {
let layer_types = if self.layer_types.is_empty() {
vec![GraniteMoeHybridLayerType::Attention; self.num_hidden_layers]
} else {
self.layer_types.clone()
};
let shared_intermediate_size = self
.shared_intermediate_size
.unwrap_or(self.intermediate_size);
GraniteMoeHybridInternalConfig {
hidden_size: self.hidden_size,
intermediate_size: self.intermediate_size,
shared_intermediate_size,
vocab_size: self.vocab_size,
num_hidden_layers: self.num_hidden_layers,
num_attention_heads: self.num_attention_heads,
num_key_value_heads: self.num_key_value_heads(),
use_flash_attn,
rms_norm_eps: self.rms_norm_eps,
rope_theta: self.rope_theta,
bos_token_id: self.bos_token_id,
eos_token_id: self.eos_token_id,
rope_scaling: self.rope_scaling,
max_position_embeddings: self.max_position_embeddings,
layer_types,
attention_multiplier: self.attention_multiplier,
embedding_multiplier: self.embedding_multiplier,
residual_multiplier: self.residual_multiplier,
logits_scaling: self.logits_scaling,
}
}
}
#[derive(Debug, Clone)]
pub struct GraniteMoeHybridInternalConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub shared_intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub use_flash_attn: bool,
pub rms_norm_eps: f64,
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<u32>,
pub rope_scaling: Option<GraniteMoeHybridRopeConfig>,
pub max_position_embeddings: usize,
pub layer_types: Vec<GraniteMoeHybridLayerType>,
pub attention_multiplier: f32,
pub embedding_multiplier: f32,
pub residual_multiplier: f32,
pub logits_scaling: f32,
}
#[derive(Debug, Clone)]
pub struct GraniteMoeHybridCache {
masks: HashMap<usize, Tensor>,
pub use_kv_cache: bool,
kvs: Vec<Option<(Tensor, Tensor)>>,
cos: Tensor,
sin: Tensor,
device: Device,
}
fn calculate_default_inv_freq(cfg: &GraniteMoeHybridInternalConfig) -> Vec<f32> {
let head_dim = cfg.hidden_size / cfg.num_attention_heads;
(0..head_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / head_dim as f32))
.collect()
}
impl GraniteMoeHybridCache {
pub fn new(
use_kv_cache: bool,
dtype: DType,
config: &GraniteMoeHybridInternalConfig,
device: &Device,
) -> Result<Self> {
// precompute freqs_cis
let theta = match &config.rope_scaling {
None
| Some(GraniteMoeHybridRopeConfig {
rope_type: GraniteMoeHybridRopeType::Default,
..
}) => calculate_default_inv_freq(config),
Some(rope_scaling) => {
let low_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.low_freq_factor;
let high_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.high_freq_factor;
calculate_default_inv_freq(config)
.into_iter()
.map(|freq| {
let wavelen = 2. * PI / freq;
if wavelen < high_freq_wavelen {
freq
} else if wavelen > low_freq_wavelen {
freq / rope_scaling.factor
} else {
let smooth = (rope_scaling.original_max_position_embeddings as f32
/ wavelen
- rope_scaling.low_freq_factor)
/ (rope_scaling.high_freq_factor - rope_scaling.low_freq_factor);
(1. - smooth) * freq / rope_scaling.factor + smooth * freq
}
})
.collect::<Vec<_>>()
}
};
let theta = Tensor::new(theta, device)?;
let idx_theta = Tensor::arange(0, config.max_position_embeddings as u32, device)?
.to_dtype(DType::F32)?
.reshape((config.max_position_embeddings, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?.to_dtype(dtype)?;
let sin = idx_theta.sin()?.to_dtype(dtype)?;
Ok(Self {
masks: HashMap::new(),
use_kv_cache,
kvs: vec![None; config.num_hidden_layers],
device: device.clone(),
cos,
sin,
})
}
fn mask(&mut self, t: usize) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mut mask: Vec<u8> = Vec::with_capacity(t * t);
(0..t).for_each(|i| {
mask.extend(repeat_n(0, i + 1));
mask.extend(repeat_n(1, t - i - 1));
});
let mask = Tensor::from_slice(&mask, (t, t), &self.device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
}
#[derive(Debug, Clone)]
struct CausalSelfAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: usize,
use_flash_attn: bool,
span: tracing::Span,
span_rot: tracing::Span,
max_position_embeddings: usize,
attention_multiplier: f32,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl CausalSelfAttention {
fn apply_rotary_emb(
&self,
x: &Tensor,
index_pos: usize,
cache: &GraniteMoeHybridCache,
) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _, seq_len, _hidden_size) = x.dims4()?;
let cos = cache.cos.narrow(0, index_pos, seq_len)?;
let sin = cache.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(x, &cos, &sin)
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut GraniteMoeHybridCache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seq_len, hidden_size) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let mut v = v
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let q = self.apply_rotary_emb(&q, index_pos, cache)?;
let mut k = self.apply_rotary_emb(&k, index_pos, cache)?;
if cache.use_kv_cache {
if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] {
k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?;
let k_seq_len = k.dims()[1];
if k_seq_len > self.max_position_embeddings {
k = k
.narrow(
D::Minus1,
k_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
let v_seq_len = v.dims()[1];
if v_seq_len > 2 * self.max_position_embeddings {
v = v
.narrow(
D::Minus1,
v_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
}
cache.kvs[block_idx] = Some((k.clone(), v.clone()))
}
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let y = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
flash_attn(&q, &k, &v, self.attention_multiplier, seq_len > 1)?.transpose(1, 2)?
} else {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
let att = q
.matmul(&k.t()?)?
.affine(self.attention_multiplier as f64, 0.)?;
let att = if seq_len == 1 {
att
} else {
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
masked_fill(&att, &mask, f32::NEG_INFINITY)?
};
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, hidden_size])?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
crate::utils::repeat_kv(x, self.num_attention_heads / self.num_key_value_heads)
}
fn load(vb: VarBuilder, cfg: &GraniteMoeHybridInternalConfig) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let size_in = cfg.hidden_size;
let size_q = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_attention_heads;
let size_kv = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_key_value_heads;
let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?;
let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?;
let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?;
let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_attention_heads: cfg.num_attention_heads,
num_key_value_heads: cfg.num_key_value_heads,
head_dim: cfg.hidden_size / cfg.num_attention_heads,
use_flash_attn: cfg.use_flash_attn,
span,
span_rot,
max_position_embeddings: cfg.max_position_embeddings,
attention_multiplier: cfg.attention_multiplier,
})
}
}
/// Utility function to fill elements of a tensor based on a boolean mask.
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
// A simple feed forward network with a gated activation
// (GeLU, SiLU, etc.). The goal is to add non-linearity and
// increase the model's capacity to learn complex patterns.
#[derive(Debug, Clone)]
struct MultiLayerPercepton {
input_linear: Linear,
output_linear: Linear,
span: tracing::Span,
}
impl MultiLayerPercepton {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let projected = self.input_linear.forward(x)?;
let chunks = projected.chunk(2, D::Minus1)?;
let (left, right) = (&chunks[0], &chunks[1]);
let gated = (candle_nn::ops::silu(left)? * right)?;
self.output_linear.forward(&gated)
}
fn load(vb: VarBuilder, cfg: &GraniteMoeHybridInternalConfig) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "mlp");
let h_size = cfg.hidden_size;
let inter_size = cfg.shared_intermediate_size;
let input_linear = linear(h_size, inter_size * 2, vb.pp("shared_mlp.input_linear"))?;
let output_linear = linear(inter_size, h_size, vb.pp("shared_mlp.output_linear"))?;
Ok(Self {
input_linear,
output_linear,
span,
})
}
}
// A Block is a actually a Transformer layer, consisting of
// a self-attention mechanism followed by a feed-forward neural network (MLP).
#[derive(Debug, Clone)]
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
multi_layer_percepton: MultiLayerPercepton,
span: tracing::Span,
residual_scale: f32,
}
impl Block {
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut GraniteMoeHybridCache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = x;
let x = self.rms_1.forward(x)?;
let attn = self.attn.forward(&x, index_pos, block_idx, cache)?;
let attn = scale_tensor(attn, self.residual_scale)?;
let x = (attn + residual)?;
let residual = &x;
let multi_layer_percepton_out = self
.multi_layer_percepton
.forward(&self.rms_2.forward(&x)?)?;
let multi_layer_percepton_out =
scale_tensor(multi_layer_percepton_out, self.residual_scale)?;
let x = (multi_layer_percepton_out + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cfg: &GraniteMoeHybridInternalConfig) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "block");
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?;
let multi_layer_percepton = MultiLayerPercepton::load(vb.clone(), cfg)?;
let rms_1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let rms_2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
rms_1,
attn,
rms_2,
multi_layer_percepton,
span,
residual_scale: cfg.residual_multiplier,
})
}
}
#[derive(Debug, Clone)]
pub struct GraniteMoeHybrid {
word_token_embedding: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
logits_scale: f32,
embedding_scale: f32,
}
impl GraniteMoeHybrid {
pub fn forward(
&self,
x: &Tensor,
index_pos: usize,
cache: &mut GraniteMoeHybridCache,
) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let x = self.word_token_embedding.forward(x)?;
let x = scale_tensor(x, self.embedding_scale)?;
let x = self
.blocks
.iter()
.enumerate()
.try_fold(x, |x, (block_idx, block)| {
block.forward(&x, index_pos, block_idx, cache)
})?;
// Final normalization
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?.contiguous()?;
// Project to vocabulary size
let logits = x.matmul(&self.word_token_embedding.embeddings().t()?)?;
let logits = logits.to_dtype(DType::F32)?;
// Scale the logits if needed (that's also different from Granite 1)
let scaled_logits = if (self.logits_scale - 1.0).abs() < f32::EPSILON {
logits
} else {
logits.affine(self.logits_scale as f64, 0.)?
};
Ok(scaled_logits)
}
pub fn load(vb: VarBuilder, cfg: &GraniteMoeHybridInternalConfig) -> Result<Self> {
let wte = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?;
if cfg.layer_types.len() != cfg.num_hidden_layers {
candle::bail!(
"layer_types length {} does not match num_hidden_layers {}",
cfg.layer_types.len(),
cfg.num_hidden_layers
);
}
let blocks = cfg
.layer_types
.iter()
.enumerate()
.map(|(idx, layer_ty)| match layer_ty {
GraniteMoeHybridLayerType::Attention => {
Block::load(vb.pp(format!("model.layers.{idx}")), cfg)
}
GraniteMoeHybridLayerType::Mamba => {
// TODO: Not supprting Mamba layers (blocks) for now,
// so we only iterate over attention layers.
candle::bail!(
"mamba layers are not yet supported in GraniteMoeHybrid inference"
)
}
})
.collect::<Result<Vec<_>>>()?;
Ok(Self {
word_token_embedding: wte,
blocks,
ln_f,
logits_scale: if cfg.logits_scaling == 0.0 {
1.0
} else {
1.0 / cfg.logits_scaling
},
embedding_scale: cfg.embedding_multiplier,
})
}
}
fn scale_tensor(tensor: Tensor, scale: f32) -> Result<Tensor> {
if (scale - 1.0).abs() < f32::EPSILON {
Ok(tensor)
} else {
tensor.affine(scale as f64, 0.)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/jina_bert.rs | candle-transformers/src/models/jina_bert.rs | //! # JinaBERT inference implementation
//!
//! Based on implementation from huggingface for Jina BERT and its variants
//!
//! See: [Jina Embeddings on HuggingFace](https://huggingface.co/jinaai/jina-embeddings-v2-base-en)
use super::with_tracing::{linear, linear_no_bias, Embedding, Linear};
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, Module, VarBuilder};
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PositionEmbeddingType {
Absolute,
Alibi,
}
// https://huggingface.co/jinaai/jina-bert-implementation/blob/main/configuration_bert.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub hidden_act: candle_nn::Activation,
pub max_position_embeddings: usize,
pub type_vocab_size: usize,
pub initializer_range: f64,
pub layer_norm_eps: f64,
pub pad_token_id: usize,
pub position_embedding_type: PositionEmbeddingType,
}
impl Config {
pub fn v2_base() -> Self {
// https://huggingface.co/jinaai/jina-embeddings-v2-base-en/blob/main/config.json
Self {
vocab_size: 30528,
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: candle_nn::Activation::Gelu,
max_position_embeddings: 8192,
type_vocab_size: 2,
initializer_range: 0.02,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Alibi,
}
}
#[allow(clippy::too_many_arguments)]
pub fn new(
vocab_size: usize,
hidden_size: usize,
num_hidden_layers: usize,
num_attention_heads: usize,
intermediate_size: usize,
hidden_act: candle_nn::Activation,
max_position_embeddings: usize,
type_vocab_size: usize,
initializer_range: f64,
layer_norm_eps: f64,
pad_token_id: usize,
position_embedding_type: PositionEmbeddingType,
) -> Self {
Config {
vocab_size,
hidden_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
max_position_embeddings,
type_vocab_size,
initializer_range,
layer_norm_eps,
pad_token_id,
position_embedding_type,
}
}
}
#[derive(Clone, Debug)]
struct BertEmbeddings {
word_embeddings: Embedding,
// no position_embeddings as we only support alibi.
token_type_embeddings: Embedding,
layer_norm: LayerNorm,
span: tracing::Span,
}
impl BertEmbeddings {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let word_embeddings =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?;
let token_type_embeddings = Embedding::new(
cfg.type_vocab_size,
cfg.hidden_size,
vb.pp("token_type_embeddings"),
)?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self {
word_embeddings,
token_type_embeddings,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
}
impl Module for BertEmbeddings {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let token_type_embeddings = Tensor::zeros(seq_len, DType::U32, input_ids.device())?
.broadcast_left(b_size)?
.apply(&self.token_type_embeddings)?;
let embeddings = (&input_embeddings + token_type_embeddings)?;
let embeddings = self.layer_norm.forward(&embeddings)?;
Ok(embeddings)
}
}
#[derive(Clone, Debug)]
struct BertSelfAttention {
query: Linear,
key: Linear,
value: Linear,
num_attention_heads: usize,
attention_head_size: usize,
span: tracing::Span,
span_softmax: tracing::Span,
}
impl BertSelfAttention {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let attention_head_size = cfg.hidden_size / cfg.num_attention_heads;
let all_head_size = cfg.num_attention_heads * attention_head_size;
let hidden_size = cfg.hidden_size;
let query = linear(hidden_size, all_head_size, vb.pp("query"))?;
let value = linear(hidden_size, all_head_size, vb.pp("value"))?;
let key = linear(hidden_size, all_head_size, vb.pp("key"))?;
Ok(Self {
query,
key,
value,
num_attention_heads: cfg.num_attention_heads,
attention_head_size,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"),
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let mut x_shape = xs.dims().to_vec();
x_shape.pop();
x_shape.push(self.num_attention_heads);
x_shape.push(self.attention_head_size);
xs.reshape(x_shape)?.transpose(1, 2)?.contiguous()
}
fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let query_layer = self.query.forward(xs)?;
let key_layer = self.key.forward(xs)?;
let value_layer = self.value.forward(xs)?;
let query_layer = self.transpose_for_scores(&query_layer)?;
let key_layer = self.transpose_for_scores(&key_layer)?;
let value_layer = self.transpose_for_scores(&value_layer)?;
let attention_scores = query_layer.matmul(&key_layer.t()?)?;
let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?;
let attention_scores = attention_scores.broadcast_add(bias)?;
let attention_probs = {
let _enter_sm = self.span_softmax.enter();
candle_nn::ops::softmax_last_dim(&attention_scores)?
};
let context_layer = attention_probs.matmul(&value_layer)?;
let context_layer = context_layer.transpose(1, 2)?.contiguous()?;
let context_layer = context_layer.flatten_from(D::Minus2)?;
Ok(context_layer)
}
}
#[derive(Clone, Debug)]
struct BertSelfOutput {
dense: Linear,
layer_norm: LayerNorm,
span: tracing::Span,
}
impl BertSelfOutput {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self {
dense,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "self-out"),
})
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.dense.forward(xs)?;
self.layer_norm.forward(&(xs + input_tensor)?)
}
}
#[derive(Clone, Debug)]
struct BertAttention {
self_attention: BertSelfAttention,
self_output: BertSelfOutput,
span: tracing::Span,
}
impl BertAttention {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let self_attention = BertSelfAttention::new(vb.pp("self"), cfg)?;
let self_output = BertSelfOutput::new(vb.pp("output"), cfg)?;
Ok(Self {
self_attention,
self_output,
span: tracing::span!(tracing::Level::TRACE, "attn"),
})
}
fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let self_outputs = self.self_attention.forward(xs, bias)?;
let attention_output = self.self_output.forward(&self_outputs, xs)?;
Ok(attention_output)
}
}
#[derive(Clone, Debug)]
struct BertGLUMLP {
gated_layers: Linear,
act: candle_nn::Activation,
wo: Linear,
layernorm: LayerNorm,
intermediate_size: usize,
}
impl BertGLUMLP {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let gated_layers = linear_no_bias(
cfg.hidden_size,
cfg.intermediate_size * 2,
vb.pp("gated_layers"),
)?;
let act = candle_nn::Activation::Gelu; // geglu
let wo = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("wo"))?;
let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layernorm"))?;
Ok(Self {
gated_layers,
act,
wo,
layernorm,
intermediate_size: cfg.intermediate_size,
})
}
}
impl Module for BertGLUMLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.gated_layers)?;
let gated = xs.narrow(D::Minus1, 0, self.intermediate_size)?;
let non_gated = xs.narrow(D::Minus1, self.intermediate_size, self.intermediate_size)?;
let xs = (gated.apply(&self.act) * non_gated)?.apply(&self.wo);
(xs + residual)?.apply(&self.layernorm)
}
}
#[derive(Clone, Debug)]
struct BertLayer {
attention: BertAttention,
mlp: BertGLUMLP,
span: tracing::Span,
}
impl BertLayer {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let attention = BertAttention::new(vb.pp("attention"), cfg)?;
let mlp = BertGLUMLP::new(vb.pp("mlp"), cfg)?;
Ok(Self {
attention,
mlp,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.attention.forward(xs, bias)?.apply(&self.mlp)
}
}
fn build_alibi_bias(cfg: &Config) -> Result<Tensor> {
let n_heads = cfg.num_attention_heads;
let seq_len = cfg.max_position_embeddings;
let alibi_bias = Tensor::arange(0, seq_len as i64, &Device::Cpu)?.to_dtype(DType::F32)?;
let alibi_bias = {
let a1 = alibi_bias.reshape((1, seq_len))?;
let a2 = alibi_bias.reshape((seq_len, 1))?;
a1.broadcast_sub(&a2)?.abs()?.broadcast_left(n_heads)?
};
let mut n_heads2 = 1;
while n_heads2 < n_heads {
n_heads2 *= 2
}
let slopes = (1..=n_heads2)
.map(|v| -1f32 / 2f32.powf((v * 8) as f32 / n_heads2 as f32))
.collect::<Vec<_>>();
let slopes = if n_heads2 == n_heads {
slopes
} else {
slopes
.iter()
.skip(1)
.step_by(2)
.chain(slopes.iter().step_by(2))
.take(n_heads)
.cloned()
.collect::<Vec<f32>>()
};
let slopes = Tensor::new(slopes, &Device::Cpu)?.reshape((1, (), 1, 1))?;
alibi_bias.to_dtype(DType::F32)?.broadcast_mul(&slopes)
}
#[derive(Clone, Debug)]
struct BertEncoder {
alibi: Tensor,
layers: Vec<BertLayer>,
span: tracing::Span,
}
impl BertEncoder {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
if cfg.position_embedding_type != PositionEmbeddingType::Alibi {
candle::bail!("only alibi is supported as a position-embedding-type")
}
let layers = (0..cfg.num_hidden_layers)
.map(|index| BertLayer::new(vb.pp(format!("layer.{index}")), cfg))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
let alibi = build_alibi_bias(cfg)?.to_device(vb.device())?;
Ok(Self {
alibi,
layers,
span,
})
}
}
impl Module for BertEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let seq_len = xs.dim(1)?;
let alibi_bias = self.alibi.i((.., .., ..seq_len, ..seq_len))?;
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, &alibi_bias)?
}
Ok(xs)
}
}
#[derive(Clone, Debug)]
pub struct BertModel {
embeddings: BertEmbeddings,
encoder: BertEncoder,
pub device: Device,
span: tracing::Span,
}
impl BertModel {
pub fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let embeddings = BertEmbeddings::new(vb.pp("embeddings"), cfg)?;
let encoder = BertEncoder::new(vb.pp("encoder"), cfg)?;
Ok(Self {
embeddings,
encoder,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
}
impl Module for BertModel {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let embedding_output = self.embeddings.forward(input_ids)?;
let sequence_output = self.encoder.forward(&embedding_output)?;
Ok(sequence_output)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen3.rs | candle-transformers/src/models/qwen3.rs | use crate::{
models::with_tracing::{linear_b, linear_no_bias, Linear, RmsNorm},
utils::repeat_kv,
};
use candle::{DType, Device, Module, Result, Tensor};
use candle_nn::{kv_cache::ConcatKvCache, Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub head_dim: usize,
pub attention_bias: bool,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: Option<usize>,
pub max_window_layers: usize,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub use_sliding_window: bool,
pub hidden_act: Activation,
}
#[derive(Debug, Clone)]
pub(crate) struct Qwen3RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl Qwen3RotaryEmbedding {
pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(DType::F32)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?.to_dtype(dtype)?,
cos: freqs.cos()?.to_dtype(dtype)?,
})
}
/// Apply RoPE (q, k shape: B x H x L x D)
pub(crate) fn apply(&self, q: &Tensor, k: &Tensor, offset: usize) -> Result<(Tensor, Tensor)> {
let (_, _, seq_len, _) = q.dims4()?;
let cos = self.cos.narrow(0, offset, seq_len)?;
let sin = self.sin.narrow(0, offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
pub(crate) struct Qwen3MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl Qwen3MLP {
pub(crate) fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
Ok(Self {
gate_proj: linear_no_bias(cfg.hidden_size, cfg.intermediate_size, vb.pp("gate_proj"))?,
up_proj: linear_no_bias(cfg.hidden_size, cfg.intermediate_size, vb.pp("up_proj"))?,
down_proj: linear_no_bias(cfg.intermediate_size, cfg.hidden_size, vb.pp("down_proj"))?,
act_fn: cfg.hidden_act,
})
}
}
impl Module for Qwen3MLP {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let lhs = x.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = x.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
pub(crate) struct Qwen3Attention {
// projections
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
// norms
q_norm: RmsNorm,
k_norm: RmsNorm,
// hyper params
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
// utils
rotary_emb: Arc<Qwen3RotaryEmbedding>,
kv_cache: ConcatKvCache,
}
impl Qwen3Attention {
pub(crate) fn new(
cfg: &Config,
rotary_emb: Arc<Qwen3RotaryEmbedding>,
vb: VarBuilder,
) -> Result<Self> {
if cfg.use_sliding_window {
candle::bail!("sliding window is not supported")
}
let head_dim = cfg.head_dim;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let q_proj = linear_b(
cfg.hidden_size,
num_heads * head_dim,
cfg.attention_bias,
vb.pp("q_proj"),
)?;
let k_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias,
vb.pp("k_proj"),
)?;
let v_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias,
vb.pp("v_proj"),
)?;
let o_proj = linear_b(
num_heads * head_dim,
cfg.hidden_size,
cfg.attention_bias,
vb.pp("o_proj"),
)?;
let q_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("q_norm"))?;
let k_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("k_norm"))?;
// Necessary because the hidden_size in the config isn't always accurate
let hidden_size = head_dim * cfg.num_attention_heads;
// dim=2 because we concatenate along the sequence dimension
// For tensors of shape [batch, heads, seq, head_dim]
let kv_cache = ConcatKvCache::new(2);
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size,
rotary_emb,
kv_cache,
})
}
pub(crate) fn forward(
&mut self,
x: &Tensor,
attn_mask: Option<&Tensor>,
offset: usize,
) -> Result<Tensor> {
let (b, l, _) = x.dims3()?;
// 1. Proj
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
// 2. Reshape: (B, L, H, D) -> (B, H, L, D)
let q = q
.reshape((b, l, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
// 3. Per‑head RMSNorm
let q_flat = q.flatten(0, 2)?; // (B*H, L, D) -> (BHL, D) after transpose later
let k_flat = k.flatten(0, 2)?;
let q_flat = self.q_norm.forward(&q_flat)?;
let k_flat = self.k_norm.forward(&k_flat)?;
let q = q_flat.reshape((b, self.num_heads, l, self.head_dim))?;
let k = k_flat.reshape((b, self.num_kv_heads, l, self.head_dim))?;
// 4. RoPE
let (q, k) = self.rotary_emb.apply(&q, &k, offset)?;
// 5. Accumulate KV cache
let (k, v) = self.kv_cache.append(&k, &v)?;
// 6. GQA repeat_kv
let k = repeat_kv(k, self.num_kv_groups)?.contiguous()?;
let v = repeat_kv(v, self.num_kv_groups)?.contiguous()?;
// 7. Attention score
let scale = 1.0 / (self.head_dim as f64).sqrt();
let mut scores = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(m) = attn_mask {
scores = scores.broadcast_add(m)?;
}
let probs = candle_nn::ops::softmax_last_dim(&scores)?;
let ctx = probs.matmul(&v)?; // (B, H, L, D)
// 8. Output proj
ctx.transpose(1, 2)?
.reshape((b, l, self.hidden_size))?
.apply(&self.o_proj)
}
pub(crate) fn clear_kv_cache(&mut self) {
self.kv_cache.reset();
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Qwen3Attention,
mlp: Qwen3MLP,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl DecoderLayer {
fn new(cfg: &Config, rotary: Arc<Qwen3RotaryEmbedding>, vb: VarBuilder) -> Result<Self> {
let self_attn = Qwen3Attention::new(cfg, rotary, vb.pp("self_attn"))?;
let mlp = Qwen3MLP::new(cfg, vb.pp("mlp"))?;
let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let ln2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
ln1,
ln2,
})
}
fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let h = self.ln1.forward(x)?;
let h = self.self_attn.forward(&h, mask, offset)?;
let x = (x + h)?;
let h2 = self.ln2.forward(&x)?;
let h2 = h2.apply(&self.mlp)?;
x + h2
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let rotary = Arc::new(Qwen3RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb.pp("model.layers");
for i in 0..cfg.num_hidden_layers {
layers.push(DecoderLayer::new(cfg, rotary.clone(), vb_l.pp(i))?);
}
Ok(Self {
embed_tokens,
layers,
norm: RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn clear_kv_cache(&mut self) {
for l in &mut self.layers {
l.clear_kv_cache();
}
}
fn causal_mask(
&self,
b: usize,
tgt: usize,
offset: usize,
sw: Option<usize>,
) -> Result<Tensor> {
let minf = f32::NEG_INFINITY;
let mask: Vec<_> = (0..tgt)
.flat_map(|i| {
(0..(tgt + offset)).map(move |j| {
let past_ok = j <= i + offset;
let sw_ok = match sw {
Some(w) => (i + offset) as i64 - j as i64 <= w as i64,
None => true,
};
if past_ok && sw_ok {
0.
} else {
minf
}
})
})
.collect();
Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype)
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (b, l) = input.dims2()?;
let mut h = self.embed_tokens.forward(input)?;
let causal = if l == 1 {
None
} else {
Some(self.causal_mask(b, l, offset, None)?)
};
for layer in &mut self.layers {
h = layer.forward(&h, causal.as_ref(), offset)?;
}
self.norm.forward(&h)
}
}
#[derive(Debug, Clone)]
pub struct ModelForCausalLM {
base: Model,
lm_head: Linear,
}
impl ModelForCausalLM {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let base = Model::new(cfg, vb.clone())?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(base.embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
Ok(Self { base, lm_head })
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (_, l) = input.dims2()?;
self.base
.forward(input, offset)?
.narrow(1, l - 1, 1)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
self.base.clear_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/depth_anything_v2.rs | candle-transformers/src/models/depth_anything_v2.rs | //! Implementation of the Depth Anything model from FAIR.
//!
//! See:
//! - ["Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data"](https://github.com/LiheYoung/Depth-Anything)
//!
use std::sync::Arc;
use candle::D::Minus1;
use candle::{Module, Result, Tensor};
use candle_nn::ops::Identity;
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, conv_transpose2d, linear, seq, Activation, BatchNorm,
BatchNormConfig, Conv2d, Conv2dConfig, ConvTranspose2dConfig, Sequential, VarBuilder,
};
use crate::models::dinov2::DinoVisionTransformer;
pub struct DepthAnythingV2Config {
out_channel_sizes: [usize; 4],
in_channel_size: usize, // embed_dim in the Dino model
num_features: usize,
use_batch_norm: bool,
use_class_token: bool,
layer_ids_vits: Vec<usize>,
input_image_size: usize,
target_patch_size: usize,
}
impl DepthAnythingV2Config {
#[allow(clippy::too_many_arguments)]
pub fn new(
out_channel_sizes: [usize; 4],
in_channel_size: usize,
num_features: usize,
use_batch_norm: bool,
use_class_token: bool,
layer_ids_vits: Vec<usize>,
input_image_size: usize,
target_patch_size: usize,
) -> Self {
Self {
out_channel_sizes,
in_channel_size,
num_features,
use_batch_norm,
use_class_token,
layer_ids_vits,
input_image_size,
target_patch_size,
}
}
pub fn vit_small() -> Self {
Self {
out_channel_sizes: [48, 96, 192, 384],
in_channel_size: 384,
num_features: 64,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![2, 5, 8, 11],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
pub fn vit_base() -> Self {
Self {
out_channel_sizes: [96, 192, 384, 768],
in_channel_size: 768,
num_features: 128,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![2, 5, 8, 11],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
pub fn vit_large() -> Self {
Self {
out_channel_sizes: [256, 512, 1024, 1024],
in_channel_size: 1024,
num_features: 256,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![4, 11, 17, 23],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
pub fn vit_giant() -> Self {
Self {
out_channel_sizes: [1536, 1536, 1536, 1536],
in_channel_size: 1536,
num_features: 384,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![9, 19, 29, 39],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
}
pub struct ResidualConvUnit {
activation: Activation,
conv1: Conv2d,
conv2: Conv2d,
batch_norm1: Option<BatchNorm>,
batch_norm2: Option<BatchNorm>,
}
impl ResidualConvUnit {
pub fn new(
conf: &DepthAnythingV2Config,
activation: Activation,
vb: VarBuilder,
) -> Result<Self> {
const KERNEL_SIZE: usize = 3;
let conv_cfg = Conv2dConfig {
padding: 1,
stride: 1,
dilation: 1,
groups: 1,
cudnn_fwd_algo: None,
};
let conv1 = conv2d(
conf.num_features,
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("conv1"),
)?;
let conv2 = conv2d(
conf.num_features,
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("conv2"),
)?;
let (batch_norm1, batch_norm2) = match conf.use_batch_norm {
true => {
let batch_norm_cfg = BatchNormConfig {
eps: 1e-05,
remove_mean: false,
affine: true,
momentum: 0.1,
};
(
Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn1"))?),
Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn2"))?),
)
}
false => (None, None),
};
Ok(Self {
activation,
conv1,
conv2,
batch_norm1,
batch_norm2,
})
}
}
impl Module for ResidualConvUnit {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let out = self.activation.forward(xs)?;
let out = self.conv1.forward(&out)?;
let out = if let Some(batch_norm1) = &self.batch_norm1 {
batch_norm1.forward_train(&out)?
} else {
out
};
let out = self.activation.forward(&out)?;
let out = self.conv2.forward(&out)?;
let out = if let Some(batch_norm2) = &self.batch_norm2 {
batch_norm2.forward_train(&out)?
} else {
out
};
out + xs
}
}
pub struct FeatureFusionBlock {
res_conv_unit1: ResidualConvUnit,
res_conv_unit2: ResidualConvUnit,
output_conv: Conv2d,
target_patch_size: usize,
}
impl FeatureFusionBlock {
pub fn new(
conf: &DepthAnythingV2Config,
target_patch_size: usize,
activation: Activation,
vb: VarBuilder,
) -> Result<Self> {
const KERNEL_SIZE: usize = 1;
let conv_cfg = Conv2dConfig {
padding: 0,
stride: 1,
dilation: 1,
groups: 1,
cudnn_fwd_algo: None,
};
let output_conv = conv2d(
conf.num_features,
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("out_conv"),
)?;
let res_conv_unit1 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit1"))?;
let res_conv_unit2 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit2"))?;
Ok(Self {
res_conv_unit1,
res_conv_unit2,
output_conv,
target_patch_size,
})
}
}
impl Module for FeatureFusionBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let out = self.res_conv_unit2.forward(xs)?;
let out = out.interpolate2d(self.target_patch_size, self.target_patch_size)?;
self.output_conv.forward(&out)
}
}
pub struct Scratch {
layer1_rn: Conv2d,
layer2_rn: Conv2d,
layer3_rn: Conv2d,
layer4_rn: Conv2d,
refine_net1: FeatureFusionBlock,
refine_net2: FeatureFusionBlock,
refine_net3: FeatureFusionBlock,
refine_net4: FeatureFusionBlock,
output_conv1: Conv2d,
output_conv2: Sequential,
}
impl Scratch {
pub fn new(conf: &DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> {
const KERNEL_SIZE: usize = 3;
let conv_cfg = Conv2dConfig {
padding: 1,
stride: 1,
dilation: 1,
groups: 1,
cudnn_fwd_algo: None,
};
let layer1_rn = conv2d_no_bias(
conf.out_channel_sizes[0],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer1_rn"),
)?;
let layer2_rn = conv2d_no_bias(
conf.out_channel_sizes[1],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer2_rn"),
)?;
let layer3_rn = conv2d_no_bias(
conf.out_channel_sizes[2],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer3_rn"),
)?;
let layer4_rn = conv2d_no_bias(
conf.out_channel_sizes[3],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer4_rn"),
)?;
let refine_net1 = FeatureFusionBlock::new(
conf,
conf.target_patch_size * 8,
Activation::Relu,
vb.pp("refinenet1"),
)?;
let refine_net2 = FeatureFusionBlock::new(
conf,
conf.target_patch_size * 4,
Activation::Relu,
vb.pp("refinenet2"),
)?;
let refine_net3 = FeatureFusionBlock::new(
conf,
conf.target_patch_size * 2,
Activation::Relu,
vb.pp("refinenet3"),
)?;
let refine_net4 = FeatureFusionBlock::new(
conf,
conf.target_patch_size,
Activation::Relu,
vb.pp("refinenet4"),
)?;
let conv_cfg = Conv2dConfig {
padding: 1,
stride: 1,
dilation: 1,
groups: 1,
cudnn_fwd_algo: None,
};
let output_conv1 = conv2d(
conf.num_features,
conf.num_features / 2,
KERNEL_SIZE,
conv_cfg,
vb.pp("output_conv1"),
)?;
let output_conv2 = seq();
const HEAD_FEATURES_2: usize = 32;
const OUT_CHANNELS_2: usize = 1;
const KERNEL_SIZE_2: usize = 1;
let output_conv2 = output_conv2.add(conv2d(
conf.num_features / 2,
HEAD_FEATURES_2,
KERNEL_SIZE,
conv_cfg,
vb.pp("output_conv2").pp("0"),
)?);
let output_conv2 = output_conv2
.add(Activation::Relu)
.add(conv2d(
HEAD_FEATURES_2,
OUT_CHANNELS_2,
KERNEL_SIZE_2,
conv_cfg,
vb.pp("output_conv2").pp("2"),
)?)
.add(Activation::Relu);
Ok(Self {
layer1_rn,
layer2_rn,
layer3_rn,
layer4_rn,
refine_net1,
refine_net2,
refine_net3,
refine_net4,
output_conv1,
output_conv2,
})
}
}
const NUM_CHANNELS: usize = 4;
pub struct DPTHead {
projections: Vec<Conv2d>,
resize_layers: Vec<Box<dyn Module>>,
readout_projections: Vec<Sequential>,
scratch: Scratch,
use_class_token: bool,
input_image_size: usize,
target_patch_size: usize,
}
impl DPTHead {
pub fn new(conf: &DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> {
let mut projections: Vec<Conv2d> = Vec::with_capacity(conf.out_channel_sizes.len());
for (conv_index, out_channel_size) in conf.out_channel_sizes.iter().enumerate() {
projections.push(conv2d(
conf.in_channel_size,
*out_channel_size,
1,
Default::default(),
vb.pp("projects").pp(conv_index.to_string()),
)?);
}
let resize_layers: Vec<Box<dyn Module>> = vec![
Box::new(conv_transpose2d(
conf.out_channel_sizes[0],
conf.out_channel_sizes[0],
4,
ConvTranspose2dConfig {
padding: 0,
stride: 4,
dilation: 1,
output_padding: 0,
},
vb.pp("resize_layers").pp("0"),
)?),
Box::new(conv_transpose2d(
conf.out_channel_sizes[1],
conf.out_channel_sizes[1],
2,
ConvTranspose2dConfig {
padding: 0,
stride: 2,
dilation: 1,
output_padding: 0,
},
vb.pp("resize_layers").pp("1"),
)?),
Box::new(Identity::new()),
Box::new(conv2d(
conf.out_channel_sizes[3],
conf.out_channel_sizes[3],
3,
Conv2dConfig {
padding: 1,
stride: 2,
dilation: 1,
groups: 1,
cudnn_fwd_algo: None,
},
vb.pp("resize_layers").pp("3"),
)?),
];
let readout_projections = if conf.use_class_token {
let rop = Vec::with_capacity(NUM_CHANNELS);
for rop_index in 0..NUM_CHANNELS {
seq()
.add(linear(
2 * conf.in_channel_size,
conf.in_channel_size,
vb.pp("readout_projects").pp(rop_index.to_string()),
)?)
.add(Activation::Gelu);
}
rop
} else {
vec![]
};
let scratch = Scratch::new(conf, vb.pp("scratch"))?;
Ok(Self {
projections,
resize_layers,
readout_projections,
scratch,
use_class_token: conf.use_class_token,
input_image_size: conf.input_image_size,
target_patch_size: conf.target_patch_size,
})
}
}
impl Module for DPTHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut out: Vec<Tensor> = Vec::with_capacity(NUM_CHANNELS);
for i in 0..NUM_CHANNELS {
let x = if self.use_class_token {
let x = xs.get(i)?.get(0)?;
let class_token = xs.get(i)?.get(1)?;
let readout = class_token.unsqueeze(1)?.expand(x.shape())?;
let to_cat = [x, readout];
let cat = Tensor::cat(&to_cat, Minus1)?;
self.readout_projections[i].forward(&cat)?
} else {
xs.get(i)?
};
let x_dims = x.dims();
let x = x.permute((0, 2, 1))?.reshape((
x_dims[0],
x_dims[x_dims.len() - 1],
self.target_patch_size,
self.target_patch_size,
))?;
let x = self.projections[i].forward(&x)?;
let x = self.resize_layers[i].forward(&x)?;
out.push(x);
}
let layer_1_rn = self.scratch.layer1_rn.forward(&out[0])?;
let layer_2_rn = self.scratch.layer2_rn.forward(&out[1])?;
let layer_3_rn = self.scratch.layer3_rn.forward(&out[2])?;
let layer_4_rn = self.scratch.layer4_rn.forward(&out[3])?;
let path4 = self.scratch.refine_net4.forward(&layer_4_rn)?;
let res3_out = self
.scratch
.refine_net3
.res_conv_unit1
.forward(&layer_3_rn)?;
let res3_out = path4.add(&res3_out)?;
let path3 = self.scratch.refine_net3.forward(&res3_out)?;
let res2_out = self
.scratch
.refine_net2
.res_conv_unit1
.forward(&layer_2_rn)?;
let res2_out = path3.add(&res2_out)?;
let path2 = self.scratch.refine_net2.forward(&res2_out)?;
let res1_out = self
.scratch
.refine_net1
.res_conv_unit1
.forward(&layer_1_rn)?;
let res1_out = path2.add(&res1_out)?;
let path1 = self.scratch.refine_net1.forward(&res1_out)?;
let out = self.scratch.output_conv1.forward(&path1)?;
let out = out.interpolate2d(self.input_image_size, self.input_image_size)?;
self.scratch.output_conv2.forward(&out)
}
}
pub struct DepthAnythingV2 {
pretrained: Arc<DinoVisionTransformer>,
depth_head: DPTHead,
conf: DepthAnythingV2Config,
}
impl DepthAnythingV2 {
pub fn new(
pretrained: Arc<DinoVisionTransformer>,
conf: DepthAnythingV2Config,
vb: VarBuilder,
) -> Result<Self> {
let depth_head = DPTHead::new(&conf, vb.pp("depth_head"))?;
Ok(Self {
pretrained,
depth_head,
conf,
})
}
}
impl Module for DepthAnythingV2 {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let features = self.pretrained.get_intermediate_layers(
xs,
&self.conf.layer_ids_vits,
false,
false,
true,
)?;
let depth = self.depth_head.forward(&features)?;
depth.relu()
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/metavoice.rs | candle-transformers/src/models/metavoice.rs | //! MetaVoice Studio ML Models
//!
//! See MetaVoice's TTS and voice cloning models:
//! - [GitHub](https://github.com/metavoiceio/metavoice-src)
//! - [Website](https://studio.metavoice.ai/)
use candle::{DType, Device, Error as E, IndexOp, Module, Result, Tensor, D};
use candle_nn::{embedding, linear_b, rms_norm, Embedding, Linear, RmsNorm, VarBuilder};
// Equivalent to torch.repeat_interleave
pub(crate) fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> {
let img = img.unsqueeze(dim + 1)?;
let mut dims = img.dims().to_vec();
dims[dim + 1] = repeats;
img.broadcast_as(dims)?.flatten(dim, dim + 1)
}
pub mod speaker_encoder {
use super::*;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub sampling_rate: usize,
pub partial_n_frames: usize,
pub model_hidden_size: usize,
pub model_embedding_size: usize,
pub model_num_layers: usize,
pub mel_window_length: usize,
pub mel_window_step: usize,
pub mel_n_channels: usize,
}
impl Config {
pub fn cfg() -> Self {
Self {
sampling_rate: 16_000,
partial_n_frames: 160,
model_hidden_size: 256,
model_embedding_size: 256,
model_num_layers: 3,
mel_window_length: 25,
mel_window_step: 10,
mel_n_channels: 40,
}
}
}
pub struct Model {
lstms: Vec<candle_nn::LSTM>,
linear: Linear,
cfg: Config,
}
type Slice = (usize, usize);
impl Model {
pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> {
let mut lstms = Vec::with_capacity(cfg.model_num_layers);
let vb_l = vb.pp("lstm");
for layer_idx in 0..cfg.model_num_layers {
let c = candle_nn::LSTMConfig {
layer_idx,
..Default::default()
};
let lstm = candle_nn::lstm(
cfg.mel_n_channels,
cfg.model_hidden_size,
c,
vb_l.pp(layer_idx),
)?;
lstms.push(lstm)
}
let linear = linear_b(
cfg.model_hidden_size,
cfg.model_embedding_size,
true,
vb.pp("linear"),
)?;
Ok(Self { lstms, linear, cfg })
}
fn compute_partial_slices(
&self,
n_samples: usize,
rate: f64,
min_coverage: f64,
) -> (Vec<Slice>, Vec<Slice>) {
let c = &self.cfg;
// Compute how many frames separate two partial utterances
let samples_per_frame = c.sampling_rate * c.mel_window_step / 1000;
let n_frames = n_samples / samples_per_frame + 1;
let frame_step =
(c.sampling_rate as f64 / rate / samples_per_frame as f64).round() as usize;
let steps = (n_frames + frame_step).saturating_sub(c.partial_n_frames) + 1;
// Compute the slices.
let mut wav_slices = vec![];
let mut mel_slices = vec![];
for i in (0..steps).step_by(frame_step) {
let mel_range = (i, i + c.partial_n_frames);
let wav_range = (
i * samples_per_frame,
(i + c.partial_n_frames) * samples_per_frame,
);
mel_slices.push(mel_range);
wav_slices.push(wav_range);
}
// Evaluate whether extra padding is warranted or not.
let last_wav_range = match wav_slices.last() {
None => return (wav_slices, mel_slices),
Some(l) => *l,
};
let coverage = (n_samples - last_wav_range.0) as f64
/ (last_wav_range.1 - last_wav_range.0) as f64;
if coverage > min_coverage && mel_slices.len() > 1 {
mel_slices.pop();
wav_slices.pop();
}
(wav_slices, mel_slices)
}
pub fn embed_utterance(
&self,
wav: &[f32],
mel_filters: &[f32],
rate: f64,
min_c: f64,
device: &Device,
) -> Result<Tensor> {
let (wav_slices, mel_slices) = self.compute_partial_slices(wav.len(), rate, min_c);
let max_wave_length = match wav_slices.last() {
Some(v) => v.1,
None => candle::bail!("empty wav slices"),
};
let wav = if max_wave_length > wav.len() {
let mut wav = wav.to_vec();
wav.resize(max_wave_length - wav.len(), 0.0);
std::borrow::Cow::Owned(wav)
} else {
std::borrow::Cow::Borrowed(wav)
};
let mel = crate::models::whisper::audio::log_mel_spectrogram_(
wav.as_ref(),
mel_filters,
/* fft_size */ self.cfg.mel_window_length,
/* fft_step */ self.cfg.mel_window_step,
self.cfg.mel_n_channels,
false,
);
let mels = mel_slices
.iter()
.flat_map(|s| [mel[s.0], mel[s.1]])
.collect::<Vec<_>>();
let mels = Tensor::from_vec(mels, (mel_slices.len(), 2), device)?;
let partial_embeds = self.forward(&mels)?;
let raw_embed = partial_embeds.mean(0)?;
let norm = raw_embed.sqr()?.sum_all()?.sqrt()?;
raw_embed.broadcast_div(&norm)
}
}
impl Module for Model {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
use candle_nn::RNN;
// This is different from the Python transformers version as candle LSTM is batch first.
let xs = xs.t()?;
let mut xs = xs.clone();
for layer in self.lstms.iter() {
let states = layer.seq(&xs)?;
xs = layer.states_to_tensor(&states)?;
}
let xs = xs.t()?;
let embeds_raw = xs.apply(&self.linear)?.relu()?;
let norm = embeds_raw.sqr()?.sum_keepdim(1)?.sqrt()?;
embeds_raw.broadcast_div(&norm)
}
}
}
type Rank = u32;
pub mod tokenizers {
use super::*;
use std::collections::HashMap;
pub struct BPE {
pub re: fancy_regex::Regex,
pub end_of_text: usize,
pub offset: usize,
pub ranks: HashMap<Vec<u8>, Rank>,
span: tracing::Span,
}
impl BPE {
pub fn from_json(json: &serde_json::Value, end_of_text: usize) -> Result<Self> {
let json = match json.as_object() {
None => candle::bail!("json value is not an object"),
Some(json) => json,
};
let re = match json.get("pat_str") {
None => candle::bail!("json object has no pat_str field"),
Some(pat_str) => match pat_str.as_str() {
None => candle::bail!("pat_str field is not a string"),
Some(pat_str) => fancy_regex::Regex::new(pat_str).map_err(E::wrap)?,
},
};
let offset = match json.get("offset") {
None => candle::bail!("json object has no offset field"),
Some(offset) => match offset.as_u64() {
None => candle::bail!("offset field is not a positive int"),
Some(offset) => offset as usize,
},
};
let mut ranks = HashMap::new();
for id in 0u8..=255 {
ranks.insert(vec![id], id as u32);
}
let mergeable_ranks = match json.get("mergeable_ranks") {
None => candle::bail!("json object has no mergeable_ranks field"),
Some(mr) => match mr.as_object() {
None => candle::bail!("mergeable_ranks is not an object"),
Some(mr) => mr,
},
};
for (key, value) in mergeable_ranks.iter() {
let value = match value.as_u64() {
None => candle::bail!("mergeable_ranks '{key}' is not a u64"),
Some(value) => value as u32,
};
if value < 256 {
continue;
}
// No escaping for other keys.
let key = key.as_bytes().to_vec();
ranks.insert(key, value);
}
Ok(Self {
re,
end_of_text,
offset,
ranks,
span: tracing::span!(tracing::Level::TRACE, "bpe"),
})
}
// Taken from:
// https://github.com/openai/tiktoken/blob/1b9faf2779855124f05174adf1383e53689ed94b/src/lib.rs#L16C1-L82C2
fn _byte_pair_merge(&self, piece: &[u8]) -> Vec<(usize, Rank)> {
// This is a vector of (start, rank).
// The rank is of the pair starting at position start.
let mut parts = Vec::with_capacity(piece.len() + 1);
// Note that we hash bytes when indexing into `ranks`, not token pairs. As long as we train BPE
// the way we currently do, this is equivalent. An easy way to break this would be to decouple
// merge priority from token index or to prevent specific token merges.
let mut min_rank: (Rank, usize) = (Rank::MAX, usize::MAX);
for i in 0..piece.len() - 1 {
let rank = *self.ranks.get(&piece[i..i + 2]).unwrap_or(&Rank::MAX);
if rank < min_rank.0 {
min_rank = (rank, i);
}
parts.push((i, rank));
}
parts.push((piece.len() - 1, Rank::MAX));
parts.push((piece.len(), Rank::MAX));
let get_rank = {
#[inline(always)]
|parts: &Vec<(usize, Rank)>, i: usize| {
if (i + 3) < parts.len() {
// Similar to `piece[i..i + 2]` above. The +3 is because we haven't yet deleted
// parts[i + 1], see comment in the main loop.
*self
.ranks
.get(&piece[parts[i].0..parts[i + 3].0])
.unwrap_or(&Rank::MAX)
} else {
Rank::MAX
}
}
};
// If you have n parts and m merges, this does O(mn) work.
// We could do something with a heap and do O(m log n) work.
// n is often very small so considerations like cache-locality outweigh the algorithmic
// complexity downsides of the `parts` vector.
while min_rank.0 != Rank::MAX {
let i = min_rank.1;
// Update parts[i] and parts[i - 1] before removing parts[i + 1], since
// `parts.remove(i + 1)` will thrash the cache.
if i > 0 {
parts[i - 1].1 = get_rank(&parts, i - 1);
}
parts[i].1 = get_rank(&parts, i);
parts.remove(i + 1);
min_rank = (Rank::MAX, usize::MAX);
for (i, &(_, rank)) in parts[..parts.len() - 1].iter().enumerate() {
if rank < min_rank.0 {
min_rank = (rank, i);
}
}
}
parts
}
pub fn byte_pair_encode(&self, piece: &[u8]) -> Vec<Rank> {
if piece.is_empty() {
return Vec::new();
}
if piece.len() == 1 {
return vec![self.ranks[piece]];
}
assert!(piece.len() > 1);
self._byte_pair_merge(piece)
.windows(2)
.map(|part| self.ranks[&piece[part[0].0..part[1].0]])
.collect()
}
pub fn encode(&self, text: &str) -> Result<Vec<u32>> {
let _enter = self.span.enter();
let mut bpe_tokens: Vec<u32> = Vec::new();
for word in self.re.find_iter(text) {
let word = word.map_err(E::wrap)?;
let word_tokens = self.byte_pair_encode(word.as_str().as_bytes());
for &token in word_tokens.iter() {
bpe_tokens.push(token + self.offset as u32)
}
}
bpe_tokens.push((self.end_of_text + self.offset) as u32);
Ok(bpe_tokens)
}
}
}
pub mod gpt {
use super::*;
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum NormType {
LayerNorm,
RMSNorm,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum AttnKernelType {
Fa2,
TorchAttn,
Hand,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum NonLinearityType {
Gelu,
Swiglu,
}
enum Norm {
RMSNorm(candle_nn::RmsNorm),
LayerNorm(candle_nn::LayerNorm),
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L27
#[derive(Debug, Clone)]
pub struct Config {
pub block_size: usize,
pub vocab_sizes: Vec<usize>,
pub target_vocab_sizes: Vec<usize>,
pub n_layer: usize,
pub n_head: usize,
pub n_embd: usize,
pub bias: bool,
pub causal: bool,
pub spk_emb_on_text: bool,
pub norm_type: NormType,
pub rmsnorm_eps: f64,
pub nonlinearity_type: NonLinearityType,
pub swiglu_multiple_of: Option<usize>,
pub attn_kernel_type: AttnKernelType,
pub kv_cache_enabled: bool,
}
impl Config {
pub fn cfg1b_v0_1() -> Self {
Self {
n_layer: 6,
n_head: 6,
n_embd: 384,
block_size: 1024,
bias: false,
vocab_sizes: vec![1538, 1025],
causal: false,
target_vocab_sizes: vec![1025, 1025, 1025, 1025, 1025, 1025],
swiglu_multiple_of: Some(256),
norm_type: NormType::LayerNorm,
kv_cache_enabled: false,
attn_kernel_type: AttnKernelType::TorchAttn,
spk_emb_on_text: true,
nonlinearity_type: NonLinearityType::Gelu,
rmsnorm_eps: 1e-5,
}
}
}
impl Norm {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
match cfg.norm_type {
NormType::RMSNorm => {
let rms_norm = candle_nn::rms_norm(cfg.n_embd, cfg.rmsnorm_eps, vb)?;
Ok(Self::RMSNorm(rms_norm))
}
NormType::LayerNorm => {
let ln_cfg = candle_nn::LayerNormConfig {
affine: cfg.bias,
..Default::default()
};
let layer_norm = candle_nn::layer_norm(cfg.n_embd, ln_cfg, vb)?;
Ok(Self::LayerNorm(layer_norm))
}
}
}
}
impl Module for Norm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::RMSNorm(m) => m.forward(xs),
Self::LayerNorm(m) => m.forward(xs),
}
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/attn.py#L18
struct SelfAttention {
c_attn: Linear,
c_proj: Linear,
n_head: usize,
span: tracing::Span,
}
impl SelfAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
// The different attention variants are likely to be identical but still we only accept
// TorchAttn for now.
if cfg.attn_kernel_type != AttnKernelType::TorchAttn {
candle::bail!("only TorchAttn is supported")
}
if cfg.kv_cache_enabled {
candle::bail!("kv_cache_enabled=true is not supported")
}
let c_attn = linear_b(cfg.n_embd, cfg.n_embd * 3, cfg.bias, vb.pp("c_attn"))?;
let c_proj = linear_b(cfg.n_embd, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Ok(Self {
c_attn,
c_proj,
n_head: cfg.n_head,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
})
}
}
impl Module for SelfAttention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, t, c) = xs.dims3()?;
let c_x = xs
.apply(&self.c_attn)?
.reshape((b, t, 3, self.n_head, c / self.n_head))?;
let q = c_x.i((.., .., 0))?;
let k = c_x.i((.., .., 1))?;
let v = c_x.i((.., .., 2))?;
let q = q.transpose(1, 2)?.contiguous()?;
let k = k.transpose(1, 2)?.contiguous()?;
let v = v.transpose(1, 2)?.contiguous()?;
let att = (q.matmul(&k.t()?)? / (k.dim(D::Minus1)? as f64).sqrt())?;
// TODO: causal mask
let att = candle_nn::ops::softmax_last_dim(&att)?;
let att = att.matmul(&v)?.transpose(1, 2)?;
att.reshape((b, t, c))?.apply(&self.c_proj)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/layers.py#L43
#[allow(clippy::upper_case_acronyms)]
enum MLP {
Gelu {
c_fc: Linear,
c_proj: Linear,
span: tracing::Span,
},
Swiglu {
w1: Linear,
w3: Linear,
c_proj: Linear,
span: tracing::Span,
},
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_dim = 4 * cfg.n_embd;
let slf = match cfg.nonlinearity_type {
NonLinearityType::Gelu => {
let c_fc = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("c_fc"))?;
let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Self::Gelu {
c_fc,
c_proj,
span: tracing::span!(tracing::Level::TRACE, "mlp-gelu"),
}
}
NonLinearityType::Swiglu => {
let hidden_dim = (2 * hidden_dim) / 3;
let swiglu_multiple_of = match cfg.swiglu_multiple_of {
None => candle::bail!("swiglu-multiple-of has to be set"),
Some(smo) => smo,
};
let hidden_dim = swiglu_multiple_of * (hidden_dim + swiglu_multiple_of - 1)
/ swiglu_multiple_of;
let w1 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w1"))?;
let w3 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w3"))?;
let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Self::Swiglu {
w1,
w3,
c_proj,
span: tracing::span!(tracing::Level::TRACE, "mlp-swiglu"),
}
}
};
Ok(slf)
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Gelu { c_fc, c_proj, span } => {
let _enter = span.enter();
xs.apply(c_fc)?.gelu()?.apply(c_proj)
}
Self::Swiglu {
w1,
w3,
c_proj,
span,
} => {
let _enter = span.enter();
let w1 = xs.apply(w1)?;
let w3 = xs.apply(w3)?;
(w1.silu()? * w3)?.apply(c_proj)
}
}
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/combined.py#L7
struct Block {
ln_1: Norm,
ln_2: Norm,
attn: SelfAttention,
mlp: MLP,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln_1 = Norm::new(cfg, vb.pp("ln_1"))?;
let ln_2 = Norm::new(cfg, vb.pp("ln_2"))?;
let attn = SelfAttention::new(cfg, vb.pp("attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Block {
ln_1,
ln_2,
attn,
mlp,
span: tracing::span!(tracing::Level::TRACE, "gpt-block"),
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = (xs + xs.apply(&self.ln_1)?.apply(&self.attn))?;
let xs = (&xs + xs.apply(&self.ln_2)?.apply(&self.mlp))?;
Ok(xs)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L79
#[allow(clippy::upper_case_acronyms)]
pub struct Model {
wtes: Vec<candle_nn::Embedding>,
wpe: candle_nn::Embedding,
h: Vec<Block>,
ln_f: Norm,
lm_heads: Vec<Linear>,
cfg: Config,
dtype: DType,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> {
let vb_t = vb.pp("transformer");
let ln_f = Norm::new(&cfg, vb_t.pp("ln_f"))?;
let mut wtes = Vec::with_capacity(cfg.vocab_sizes.len());
let vb_w = vb_t.pp("wtes");
for (idx, vocab_size) in cfg.vocab_sizes.iter().enumerate() {
let wte = candle_nn::embedding(*vocab_size, cfg.n_embd, vb_w.pp(idx))?;
wtes.push(wte)
}
let wpe = candle_nn::embedding(cfg.block_size, cfg.n_embd, vb_t.pp("wpe"))?;
let mut h = Vec::with_capacity(cfg.n_layer);
let vb_h = vb_t.pp("h");
for idx in 0..cfg.n_layer {
let block = Block::new(&cfg, vb_h.pp(idx))?;
h.push(block)
}
let mut lm_heads = Vec::with_capacity(cfg.target_vocab_sizes.len());
let vb_l = vb.pp("lm_heads");
for (idx, vocab_size) in cfg.target_vocab_sizes.iter().enumerate() {
let head = linear_b(cfg.n_embd, *vocab_size, false, vb_l.pp(idx))?;
lm_heads.push(head)
}
Ok(Self {
wtes,
wpe,
h,
ln_f,
lm_heads,
cfg,
dtype: vb.dtype(),
span: tracing::span!(tracing::Level::TRACE, "gpt"),
})
}
pub fn config(&self) -> &Config {
&self.cfg
}
pub fn forward(&self, idx: &Tensor) -> Result<Vec<Tensor>> {
let _enter = self.span.enter();
let device = idx.device();
let (b, _num_hierarchies, t) = idx.dims3()?;
let pos = Tensor::arange(0u32, t as u32, device)?;
let pos_emb = pos.apply(&self.wpe)?;
let mut tok_emb = Tensor::zeros((b, t, self.cfg.n_embd), self.dtype, device)?;
for (wte_idx, wte) in self.wtes.iter().enumerate() {
let emb = idx.i((.., wte_idx, ..))?.apply(wte)?;
tok_emb = (tok_emb + emb)?;
}
// TODO: speaker embs.
let spk_emb = 0f64;
let mut xs = (pos_emb.broadcast_add(&tok_emb)? + spk_emb)?;
for block in self.h.iter() {
xs = xs.apply(block)?
}
let xs = xs.apply(&self.ln_f)?;
let mut logits = Vec::with_capacity(self.lm_heads.len());
for lm_head in self.lm_heads.iter() {
// non-causal mode only.
let ys = xs.apply(lm_head)?;
logits.push(ys)
}
Ok(logits)
}
}
}
pub mod transformer {
use super::*;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub block_size: usize,
pub vocab_size: usize,
pub n_layer: usize,
pub n_head: usize,
pub dim: usize,
pub speaker_emb_dim: usize,
pub intermediate_size: Option<usize>,
pub n_local_heads: Option<usize>,
pub norm_eps: f64,
}
impl Config {
pub fn cfg1b_v0_1() -> Self {
Self {
n_layer: 24,
n_head: 16,
dim: 2048,
vocab_size: 2562,
speaker_emb_dim: 256,
block_size: 2048,
intermediate_size: None,
n_local_heads: None,
norm_eps: 1e-5,
}
}
pub(crate) fn n_local_heads(&self) -> usize {
self.n_local_heads.unwrap_or(self.n_head)
}
pub(crate) fn head_dim(&self) -> usize {
self.dim / self.n_head
}
pub(crate) fn intermediate_size(&self) -> usize {
match self.intermediate_size {
Some(intermediate_size) => intermediate_size,
None => {
let hidden_dim = self.dim * 4;
let n_hidden = ((2 * hidden_dim) as f64 / 3.) as usize;
n_hidden.div_ceil(256) * 256
}
}
}
}
#[derive(Debug, Clone)]
struct FeedForward {
w1: Linear,
w2: Linear,
w3: Linear,
span: tracing::Span,
}
impl FeedForward {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let i_size = cfg.intermediate_size();
let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?;
let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?;
let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?;
Ok(Self {
w1,
w2,
w3,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
}
impl Module for FeedForward {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?;
swiglu.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct Attention {
wqkv: Linear,
wo: Linear,
dim: usize,
kv_size: usize,
n_local_heads: usize,
head_dim: usize,
n_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_local_heads = cfg.n_local_heads();
let head_dim = cfg.head_dim();
let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim;
let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?;
let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?;
Ok(Self {
wqkv,
wo,
dim: cfg.dim,
kv_size: n_local_heads * head_dim,
n_local_heads,
head_dim,
n_head: cfg.n_head,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seqlen, _) = xs.dims3()?;
let qkv = xs.apply(&self.wqkv)?;
let q = qkv.narrow(D::Minus1, 0, self.dim)?;
let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?;
let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?;
let q = q
.reshape((b_sz, seqlen, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 2)?;
let v = Tensor::cat(&[prev_v, &v], 2)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?;
let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
let attn_weights = attn_weights.broadcast_add(mask)?;
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, seqlen, self.dim))?
.apply(&self.wo)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Block {
attention: Attention,
feed_forward: FeedForward,
ffn_norm: RmsNorm,
attention_norm: RmsNorm,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?;
let ffn_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?;
let attention_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?;
Ok(Self {
attention,
feed_forward,
ffn_norm,
attention_norm,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hs = xs.apply(&self.attention_norm)?;
let hs = (xs + self.attention.forward(&hs, pos, mask))?;
&hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward)
}
fn clear_kv_cache(&mut self) {
self.attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
tok_embeddings: Embedding,
pos_embeddings: Embedding,
speaker_cond_pos: Linear,
layers: Vec<Block>,
norm: RmsNorm,
output: Linear,
spk_cond_mask: Tensor,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let tok_embeddings = embedding(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?;
let pos_embeddings = embedding(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?;
let speaker_cond_pos = linear_b(
cfg.speaker_emb_dim,
cfg.dim,
false,
vb.pp("speaker_cond_pos"),
)?;
let mut layers = Vec::with_capacity(cfg.n_layer);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.n_layer {
let layer = Block::new(cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("norm"))?;
let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?;
let dtype = vb.dtype();
let spk_cond_mask = Tensor::cat(
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/resnet.rs | candle-transformers/src/models/resnet.rs | //! # ResNet Implementation
//!
//! Implementation of ResNet architectures as described in the paper:
//!
//! ## Reference
//!
//! [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
//! He et al. (2015)
//!
//! This paper introduced ResNet, a deep neural network architecture that utilizes
//! skip connections ("residual connections") to enable training of very deep networks.
use candle::{Result, D};
use candle_nn::{batch_norm, Conv2d, Func, VarBuilder};
fn conv2d(
c_in: usize,
c_out: usize,
ksize: usize,
padding: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Conv2d> {
let conv2d_cfg = candle_nn::Conv2dConfig {
stride,
padding,
..Default::default()
};
candle_nn::conv2d_no_bias(c_in, c_out, ksize, conv2d_cfg, vb)
}
fn downsample(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> {
if stride != 1 || c_in != c_out {
let conv = conv2d(c_in, c_out, 1, 0, stride, vb.pp(0))?;
let bn = batch_norm(c_out, 1e-5, vb.pp(1))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
} else {
Ok(Func::new(|xs| Ok(xs.clone())))
}
}
fn basic_block(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> {
let conv1 = conv2d(c_in, c_out, 3, 1, stride, vb.pp("conv1"))?;
let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?;
let conv2 = conv2d(c_out, c_out, 3, 1, 1, vb.pp("conv2"))?;
let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?;
let downsample = downsample(c_in, c_out, stride, vb.pp("downsample"))?;
Ok(Func::new(move |xs| {
let ys = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.apply(&conv2)?
.apply_t(&bn2, false)?;
(xs.apply(&downsample)? + ys)?.relu()
}))
}
fn basic_layer(
c_in: usize,
c_out: usize,
stride: usize,
cnt: usize,
vb: VarBuilder,
) -> Result<Func> {
let mut layers = Vec::with_capacity(cnt);
for index in 0..cnt {
let l_in = if index == 0 { c_in } else { c_out };
let stride = if index == 0 { stride } else { 1 };
layers.push(basic_block(l_in, c_out, stride, vb.pp(index))?)
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for layer in layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}))
}
fn resnet(
nclasses: Option<usize>,
c1: usize,
c2: usize,
c3: usize,
c4: usize,
vb: VarBuilder,
) -> Result<Func> {
let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?;
let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?;
let layer1 = basic_layer(64, 64, 1, c1, vb.pp("layer1"))?;
let layer2 = basic_layer(64, 128, 2, c2, vb.pp("layer2"))?;
let layer3 = basic_layer(128, 256, 2, c3, vb.pp("layer3"))?;
let layer4 = basic_layer(256, 512, 2, c4, vb.pp("layer4"))?;
let fc = match nclasses {
None => None,
Some(nclasses) => {
let linear = candle_nn::linear(512, nclasses, vb.pp("fc"))?;
Some(linear)
}
};
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.pad_with_same(D::Minus1, 1, 1)?
.pad_with_same(D::Minus2, 1, 1)?
.max_pool2d_with_stride(3, 2)?
.apply(&layer1)?
.apply(&layer2)?
.apply(&layer3)?
.apply(&layer4)?
.mean(D::Minus1)?
.mean(D::Minus1)?;
match &fc {
None => Ok(xs),
Some(fc) => xs.apply(fc),
}
}))
}
/// Creates a ResNet-18 model.
pub fn resnet18(num_classes: usize, vb: VarBuilder) -> Result<Func> {
resnet(Some(num_classes), 2, 2, 2, 2, vb)
}
pub fn resnet18_no_final_layer(vb: VarBuilder) -> Result<Func> {
resnet(None, 2, 2, 2, 2, vb)
}
/// Creates a ResNet-34 model.
pub fn resnet34(num_classes: usize, vb: VarBuilder) -> Result<Func> {
resnet(Some(num_classes), 3, 4, 6, 3, vb)
}
pub fn resnet34_no_final_layer(vb: VarBuilder) -> Result<Func> {
resnet(None, 3, 4, 6, 3, vb)
}
// Bottleneck versions for ResNet 50, 101, and 152.
fn bottleneck_block(
c_in: usize,
c_out: usize,
stride: usize,
e: usize,
vb: VarBuilder,
) -> Result<Func> {
let e_dim = e * c_out;
let conv1 = conv2d(c_in, c_out, 1, 0, 1, vb.pp("conv1"))?;
let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?;
let conv2 = conv2d(c_out, c_out, 3, 1, stride, vb.pp("conv2"))?;
let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?;
let conv3 = conv2d(c_out, e_dim, 1, 0, 1, vb.pp("conv3"))?;
let bn3 = batch_norm(e_dim, 1e-5, vb.pp("bn3"))?;
let downsample = downsample(c_in, e_dim, stride, vb.pp("downsample"))?;
Ok(Func::new(move |xs| {
let ys = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.apply(&conv2)?
.apply_t(&bn2, false)?
.relu()?
.apply(&conv3)?
.apply_t(&bn3, false)?;
(xs.apply(&downsample)? + ys)?.relu()
}))
}
fn bottleneck_layer(
c_in: usize,
c_out: usize,
stride: usize,
cnt: usize,
vb: VarBuilder,
) -> Result<Func> {
let mut layers = Vec::with_capacity(cnt);
for index in 0..cnt {
let l_in = if index == 0 { c_in } else { 4 * c_out };
let stride = if index == 0 { stride } else { 1 };
layers.push(bottleneck_block(l_in, c_out, stride, 4, vb.pp(index))?)
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for layer in layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}))
}
fn bottleneck_resnet(
nclasses: Option<usize>,
c1: usize,
c2: usize,
c3: usize,
c4: usize,
vb: VarBuilder,
) -> Result<Func> {
let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?;
let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?;
let layer1 = bottleneck_layer(64, 64, 1, c1, vb.pp("layer1"))?;
let layer2 = bottleneck_layer(4 * 64, 128, 2, c2, vb.pp("layer2"))?;
let layer3 = bottleneck_layer(4 * 128, 256, 2, c3, vb.pp("layer3"))?;
let layer4 = bottleneck_layer(4 * 256, 512, 2, c4, vb.pp("layer4"))?;
let fc = match nclasses {
None => None,
Some(nclasses) => {
let linear = candle_nn::linear(4 * 512, nclasses, vb.pp("fc"))?;
Some(linear)
}
};
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.pad_with_same(D::Minus1, 1, 1)?
.pad_with_same(D::Minus2, 1, 1)?
.max_pool2d_with_stride(3, 2)?
.apply(&layer1)?
.apply(&layer2)?
.apply(&layer3)?
.apply(&layer4)?
.mean(D::Minus1)?
.mean(D::Minus1)?;
match &fc {
None => Ok(xs),
Some(fc) => xs.apply(fc),
}
}))
}
pub fn resnet50(num_classes: usize, vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(Some(num_classes), 3, 4, 6, 3, vb)
}
pub fn resnet50_no_final_layer(vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(None, 3, 4, 6, 3, vb)
}
pub fn resnet101(num_classes: usize, vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(Some(num_classes), 3, 4, 23, 3, vb)
}
pub fn resnet101_no_final_layer(vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(None, 3, 4, 23, 3, vb)
}
pub fn resnet152(num_classes: usize, vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(Some(num_classes), 3, 8, 36, 3, vb)
}
pub fn resnet152_no_final_layer(vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(None, 3, 8, 36, 3, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/bert.rs | candle-transformers/src/models/bert.rs | //! BERT (Bidirectional Encoder Representations from Transformers)
//!
//! Bert is a general large language model that can be used for various language tasks:
//! - Compute sentence embeddings for a prompt.
//! - Compute similarities between a set of sentences.
//! - [Arxiv](https://arxiv.org/abs/1810.04805) "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"
//! - Upstream [GitHub repo](https://github.com/google-research/bert).
//! - See bert in [candle-examples](https://github.com/huggingface/candle/tree/main/candle-examples/) for runnable code
//!
use super::with_tracing::{layer_norm, linear, LayerNorm, Linear};
use candle::{DType, Device, Result, Tensor};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HiddenAct {
Gelu,
GeluApproximate,
Relu,
}
#[derive(Clone)]
struct HiddenActLayer {
act: HiddenAct,
span: tracing::Span,
}
impl HiddenActLayer {
fn new(act: HiddenAct) -> Self {
let span = tracing::span!(tracing::Level::TRACE, "hidden-act");
Self { act, span }
}
fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> {
let _enter = self.span.enter();
match self.act {
// https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213
HiddenAct::Gelu => xs.gelu_erf(),
HiddenAct::GeluApproximate => xs.gelu(),
HiddenAct::Relu => xs.relu(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum PositionEmbeddingType {
#[default]
Absolute,
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/configuration_bert.py#L1
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub hidden_act: HiddenAct,
pub hidden_dropout_prob: f64,
pub max_position_embeddings: usize,
pub type_vocab_size: usize,
pub initializer_range: f64,
pub layer_norm_eps: f64,
pub pad_token_id: usize,
#[serde(default)]
pub position_embedding_type: PositionEmbeddingType,
#[serde(default)]
pub use_cache: bool,
pub classifier_dropout: Option<f64>,
pub model_type: Option<String>,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 30522,
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: HiddenAct::Gelu,
hidden_dropout_prob: 0.1,
max_position_embeddings: 512,
type_vocab_size: 2,
initializer_range: 0.02,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
classifier_dropout: None,
model_type: Some("bert".to_string()),
}
}
}
impl Config {
fn _all_mini_lm_l6_v2() -> Self {
// https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/blob/main/config.json
Self {
vocab_size: 30522,
hidden_size: 384,
num_hidden_layers: 6,
num_attention_heads: 12,
intermediate_size: 1536,
hidden_act: HiddenAct::Gelu,
hidden_dropout_prob: 0.1,
max_position_embeddings: 512,
type_vocab_size: 2,
initializer_range: 0.02,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
classifier_dropout: None,
model_type: Some("bert".to_string()),
}
}
}
#[derive(Clone)]
struct Dropout {
#[allow(dead_code)]
pr: f64,
}
impl Dropout {
fn new(pr: f64) -> Self {
Self { pr }
}
}
impl Module for Dropout {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
// TODO
Ok(x.clone())
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L180
struct BertEmbeddings {
word_embeddings: Embedding,
position_embeddings: Option<Embedding>,
token_type_embeddings: Embedding,
layer_norm: LayerNorm,
dropout: Dropout,
span: tracing::Span,
}
impl BertEmbeddings {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let word_embeddings = embedding(
config.vocab_size,
config.hidden_size,
vb.pp("word_embeddings"),
)?;
let position_embeddings = embedding(
config.max_position_embeddings,
config.hidden_size,
vb.pp("position_embeddings"),
)?;
let token_type_embeddings = embedding(
config.type_vocab_size,
config.hidden_size,
vb.pp("token_type_embeddings"),
)?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
Ok(Self {
word_embeddings,
position_embeddings: Some(position_embeddings),
token_type_embeddings,
layer_norm,
dropout: Dropout::new(config.hidden_dropout_prob),
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
fn forward(&self, input_ids: &Tensor, token_type_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_bsize, seq_len) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)?;
let mut embeddings = (&input_embeddings + token_type_embeddings)?;
if let Some(position_embeddings) = &self.position_embeddings {
// TODO: Proper absolute positions?
let position_ids = (0..seq_len as u32).collect::<Vec<_>>();
let position_ids = Tensor::new(&position_ids[..], input_ids.device())?;
embeddings = embeddings.broadcast_add(&position_embeddings.forward(&position_ids)?)?
}
let embeddings = self.layer_norm.forward(&embeddings)?;
let embeddings = self.dropout.forward(&embeddings)?;
Ok(embeddings)
}
}
#[derive(Clone)]
struct BertSelfAttention {
query: Linear,
key: Linear,
value: Linear,
dropout: Dropout,
num_attention_heads: usize,
attention_head_size: usize,
span: tracing::Span,
span_softmax: tracing::Span,
}
impl BertSelfAttention {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention_head_size = config.hidden_size / config.num_attention_heads;
let all_head_size = config.num_attention_heads * attention_head_size;
let dropout = Dropout::new(config.hidden_dropout_prob);
let hidden_size = config.hidden_size;
let query = linear(hidden_size, all_head_size, vb.pp("query"))?;
let value = linear(hidden_size, all_head_size, vb.pp("value"))?;
let key = linear(hidden_size, all_head_size, vb.pp("key"))?;
Ok(Self {
query,
key,
value,
dropout,
num_attention_heads: config.num_attention_heads,
attention_head_size,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"),
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let mut new_x_shape = xs.dims().to_vec();
new_x_shape.pop();
new_x_shape.push(self.num_attention_heads);
new_x_shape.push(self.attention_head_size);
let xs = xs.reshape(new_x_shape.as_slice())?.transpose(1, 2)?;
xs.contiguous()
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let query_layer = self.query.forward(hidden_states)?;
let key_layer = self.key.forward(hidden_states)?;
let value_layer = self.value.forward(hidden_states)?;
let query_layer = self.transpose_for_scores(&query_layer)?;
let key_layer = self.transpose_for_scores(&key_layer)?;
let value_layer = self.transpose_for_scores(&value_layer)?;
let attention_scores = query_layer.matmul(&key_layer.t()?)?;
let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?;
let attention_scores = attention_scores.broadcast_add(attention_mask)?;
let attention_probs = {
let _enter_sm = self.span_softmax.enter();
candle_nn::ops::softmax(&attention_scores, candle::D::Minus1)?
};
let attention_probs = self.dropout.forward(&attention_probs)?;
let context_layer = attention_probs.matmul(&value_layer)?;
let context_layer = context_layer.transpose(1, 2)?.contiguous()?;
let context_layer = context_layer.flatten_from(candle::D::Minus2)?;
Ok(context_layer)
}
}
#[derive(Clone)]
struct BertSelfOutput {
dense: Linear,
layer_norm: LayerNorm,
dropout: Dropout,
span: tracing::Span,
}
impl BertSelfOutput {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.hidden_size, config.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
let dropout = Dropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
span: tracing::span!(tracing::Level::TRACE, "self-out"),
})
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.dropout.forward(&hidden_states)?;
self.layer_norm.forward(&(hidden_states + input_tensor)?)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L392
#[derive(Clone)]
struct BertAttention {
self_attention: BertSelfAttention,
self_output: BertSelfOutput,
span: tracing::Span,
}
impl BertAttention {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let self_attention = BertSelfAttention::load(vb.pp("self"), config)?;
let self_output = BertSelfOutput::load(vb.pp("output"), config)?;
Ok(Self {
self_attention,
self_output,
span: tracing::span!(tracing::Level::TRACE, "attn"),
})
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let self_outputs = self.self_attention.forward(hidden_states, attention_mask)?;
let attention_output = self.self_output.forward(&self_outputs, hidden_states)?;
Ok(attention_output)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L441
#[derive(Clone)]
struct BertIntermediate {
dense: Linear,
intermediate_act: HiddenActLayer,
span: tracing::Span,
}
impl BertIntermediate {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.hidden_size, config.intermediate_size, vb.pp("dense"))?;
Ok(Self {
dense,
intermediate_act: HiddenActLayer::new(config.hidden_act),
span: tracing::span!(tracing::Level::TRACE, "inter"),
})
}
}
impl Module for BertIntermediate {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let ys = self.intermediate_act.forward(&hidden_states)?;
Ok(ys)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L456
#[derive(Clone)]
struct BertOutput {
dense: Linear,
layer_norm: LayerNorm,
dropout: Dropout,
span: tracing::Span,
}
impl BertOutput {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.intermediate_size, config.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
let dropout = Dropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
span: tracing::span!(tracing::Level::TRACE, "out"),
})
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.dropout.forward(&hidden_states)?;
self.layer_norm.forward(&(hidden_states + input_tensor)?)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L470
#[derive(Clone)]
pub struct BertLayer {
attention: BertAttention,
intermediate: BertIntermediate,
output: BertOutput,
span: tracing::Span,
}
impl BertLayer {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention = BertAttention::load(vb.pp("attention"), config)?;
let intermediate = BertIntermediate::load(vb.pp("intermediate"), config)?;
let output = BertOutput::load(vb.pp("output"), config)?;
Ok(Self {
attention,
intermediate,
output,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let attention_output = self.attention.forward(hidden_states, attention_mask)?;
// TODO: Support cross-attention?
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523
// TODO: Support something similar to `apply_chunking_to_forward`?
let intermediate_output = self.intermediate.forward(&attention_output)?;
let layer_output = self
.output
.forward(&intermediate_output, &attention_output)?;
Ok(layer_output)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556
#[derive(Clone)]
pub struct BertEncoder {
pub layers: Vec<BertLayer>,
span: tracing::Span,
}
impl BertEncoder {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let layers = (0..config.num_hidden_layers)
.map(|index| BertLayer::load(vb.pp(format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
Ok(BertEncoder { layers, span })
}
pub fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut hidden_states = hidden_states.clone();
// Use a loop rather than a fold as it's easier to modify when adding debug/...
for layer in self.layers.iter() {
hidden_states = layer.forward(&hidden_states, attention_mask)?
}
Ok(hidden_states)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874
pub struct BertModel {
embeddings: BertEmbeddings,
encoder: BertEncoder,
pub device: Device,
span: tracing::Span,
}
impl BertModel {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let (embeddings, encoder) = match (
BertEmbeddings::load(vb.pp("embeddings"), config),
BertEncoder::load(vb.pp("encoder"), config),
) {
(Ok(embeddings), Ok(encoder)) => (embeddings, encoder),
(Err(err), _) | (_, Err(err)) => {
if let Some(model_type) = &config.model_type {
if let (Ok(embeddings), Ok(encoder)) = (
BertEmbeddings::load(vb.pp(format!("{model_type}.embeddings")), config),
BertEncoder::load(vb.pp(format!("{model_type}.encoder")), config),
) {
(embeddings, encoder)
} else {
return Err(err);
}
} else {
return Err(err);
}
}
};
Ok(Self {
embeddings,
encoder,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
pub fn forward(
&self,
input_ids: &Tensor,
token_type_ids: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let embedding_output = self.embeddings.forward(input_ids, token_type_ids)?;
let attention_mask = match attention_mask {
Some(attention_mask) => attention_mask.clone(),
None => input_ids.ones_like()?,
};
let dtype = embedding_output.dtype();
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L995
let attention_mask = get_extended_attention_mask(&attention_mask, dtype)?;
let sequence_output = self.encoder.forward(&embedding_output, &attention_mask)?;
Ok(sequence_output)
}
}
fn get_extended_attention_mask(attention_mask: &Tensor, dtype: DType) -> Result<Tensor> {
let attention_mask = match attention_mask.rank() {
3 => attention_mask.unsqueeze(1)?,
2 => attention_mask.unsqueeze(1)?.unsqueeze(1)?,
_ => candle::bail!("Wrong shape for input_ids or attention_mask"),
};
let attention_mask = attention_mask.to_dtype(dtype)?;
// torch.finfo(dtype).min
(attention_mask.ones_like()? - &attention_mask)?.broadcast_mul(
&Tensor::try_from(f32::MIN)?
.to_device(attention_mask.device())?
.to_dtype(dtype)?,
)
}
//https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L752-L766
struct BertPredictionHeadTransform {
dense: Linear,
activation: HiddenActLayer,
layer_norm: LayerNorm,
}
impl BertPredictionHeadTransform {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.hidden_size, config.hidden_size, vb.pp("dense"))?;
let activation = HiddenActLayer::new(config.hidden_act);
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
Ok(Self {
dense,
activation,
layer_norm,
})
}
}
impl Module for BertPredictionHeadTransform {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let hidden_states = self
.activation
.forward(&self.dense.forward(hidden_states)?)?;
self.layer_norm.forward(&hidden_states)
}
}
// https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L769C1-L790C1
pub struct BertLMPredictionHead {
transform: BertPredictionHeadTransform,
decoder: Linear,
}
impl BertLMPredictionHead {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let transform = BertPredictionHeadTransform::load(vb.pp("transform"), config)?;
let decoder = linear(config.hidden_size, config.vocab_size, vb.pp("decoder"))?;
Ok(Self { transform, decoder })
}
}
impl Module for BertLMPredictionHead {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
self.decoder
.forward(&self.transform.forward(hidden_states)?)
}
}
// https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L792
pub struct BertOnlyMLMHead {
predictions: BertLMPredictionHead,
}
impl BertOnlyMLMHead {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let predictions = BertLMPredictionHead::load(vb.pp("predictions"), config)?;
Ok(Self { predictions })
}
}
impl Module for BertOnlyMLMHead {
fn forward(&self, sequence_output: &Tensor) -> Result<Tensor> {
self.predictions.forward(sequence_output)
}
}
pub struct BertForMaskedLM {
bert: BertModel,
cls: BertOnlyMLMHead,
}
impl BertForMaskedLM {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let bert = BertModel::load(vb.pp("bert"), config)?;
let cls = BertOnlyMLMHead::load(vb.pp("cls"), config)?;
Ok(Self { bert, cls })
}
pub fn forward(
&self,
input_ids: &Tensor,
token_type_ids: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let sequence_output = self
.bert
.forward(input_ids, token_type_ids, attention_mask)?;
self.cls.forward(&sequence_output)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/vit.rs | candle-transformers/src/models/vit.rs | //! Vision Transformer (ViT) implementation.
//!
//! Vision Transformer applies transformer architecture to image classification
//! by splitting images into patches and processing them as a sequence.
//!
//! Key characteristics:
//! - Image patches as sequence tokens
//! - Self-attention between patches
//! - Position embeddings
//! - CLS token for classification
//! - Layer normalization
//!
//! References:
//! - [ViT Paper](https://arxiv.org/abs/2010.11929)
//! - [Model Card](https://huggingface.co/google/vit-base-patch16-224)
//!
use crate::models::with_tracing::{conv2d, linear, linear_no_bias, Conv2d, Linear};
use candle::{IndexOp, Module, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, VarBuilder};
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/configuration_vit.py
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub hidden_act: candle_nn::Activation,
pub layer_norm_eps: f64,
pub image_size: usize,
pub patch_size: usize,
pub num_channels: usize,
pub qkv_bias: bool,
}
impl Config {
// https://huggingface.co/google/vit-base-patch16-224/blob/main/config.json
pub fn vit_base_patch16_224() -> Self {
Self {
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: candle_nn::Activation::Gelu,
layer_norm_eps: 1e-12,
image_size: 224,
patch_size: 16,
num_channels: 3,
qkv_bias: true,
}
}
pub fn microsoft_trocr_base_handwritten() -> Self {
Self {
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: candle_nn::Activation::Gelu,
layer_norm_eps: 1e-12,
image_size: 384,
patch_size: 16,
num_channels: 3,
qkv_bias: false,
}
}
}
#[derive(Debug, Clone)]
struct PatchEmbeddings {
num_patches: usize,
projection: Conv2d,
}
impl PatchEmbeddings {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let image_size = cfg.image_size;
let patch_size = cfg.patch_size;
let num_patches = (image_size / patch_size) * (image_size / patch_size);
let conv_cfg = candle_nn::Conv2dConfig {
stride: patch_size,
..Default::default()
};
let projection = conv2d(
cfg.num_channels,
cfg.hidden_size,
patch_size,
conv_cfg,
vb.pp("projection"),
)?;
Ok(Self {
num_patches,
projection,
})
}
}
impl Module for PatchEmbeddings {
fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> {
let (_b_size, _num_channels, _height, _width) = pixel_values.dims4()?;
self.projection
.forward(pixel_values)?
.flatten_from(2)?
.transpose(1, 2)
}
}
#[derive(Debug, Clone)]
pub struct Embeddings {
cls_token: Tensor,
mask_token: Option<Tensor>,
patch_embeddings: PatchEmbeddings,
position_embeddings: Tensor,
hidden_size: usize,
}
impl Embeddings {
pub fn new(cfg: &Config, use_mask_token: bool, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let cls_token = vb.get((1, 1, hidden_size), "cls_token")?;
let mask_token = if use_mask_token {
Some(vb.get((1, 1, hidden_size), "mask_token")?)
} else {
None
};
let patch_embeddings = PatchEmbeddings::new(cfg, vb.pp("patch_embeddings"))?;
let num_patches = patch_embeddings.num_patches;
let position_embeddings =
vb.get((1, num_patches + 1, hidden_size), "position_embeddings")?;
Ok(Self {
cls_token,
mask_token,
patch_embeddings,
position_embeddings,
hidden_size,
})
}
fn interpolate_pos_encoding(
&self,
_embeddings: &Tensor,
_height: usize,
_width: usize,
) -> Result<Tensor> {
todo!()
}
pub fn forward(
&self,
pixel_values: &Tensor,
bool_masked_pos: Option<&Tensor>,
interpolate_pos_encoding: bool,
) -> Result<Tensor> {
let (b_size, _num_channels, height, width) = pixel_values.dims4()?;
let embeddings = self.patch_embeddings.forward(pixel_values)?;
let embeddings = match (bool_masked_pos, &self.mask_token) {
(None, _) => embeddings,
(Some(_), None) => candle::bail!("bool_masked_pos set without mask_token"),
(Some(bool_masked_pos), Some(mask_tokens)) => {
let seq_len = embeddings.dim(1)?;
let mask_tokens = mask_tokens.broadcast_as((b_size, seq_len, self.hidden_size))?;
let mask = bool_masked_pos
.unsqueeze(D::Minus1)?
.to_dtype(mask_tokens.dtype())?;
((mask_tokens * &mask)? - (embeddings * (mask - 1.)?)?)?
}
};
let cls_tokens = self.cls_token.broadcast_as((b_size, 1, self.hidden_size))?;
let embeddings = Tensor::cat(&[&cls_tokens, &embeddings], 1)?;
if interpolate_pos_encoding {
let pos = self.interpolate_pos_encoding(&embeddings, height, width)?;
embeddings.broadcast_add(&pos)
} else {
embeddings.broadcast_add(&self.position_embeddings)
}
}
}
#[derive(Debug, Clone)]
struct SelfAttention {
query: Linear,
key: Linear,
value: Linear,
num_attention_heads: usize,
attention_head_size: usize,
}
impl SelfAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention_head_size = cfg.hidden_size / cfg.num_attention_heads;
let num_attention_heads = cfg.num_attention_heads;
let all_head_size = num_attention_heads * attention_head_size;
let linear = |name| {
if cfg.qkv_bias {
linear(cfg.hidden_size, all_head_size, vb.pp(name))
} else {
linear_no_bias(cfg.hidden_size, all_head_size, vb.pp(name))
}
};
let query = linear("query")?;
let key = linear("key")?;
let value = linear("value")?;
Ok(Self {
query,
key,
value,
num_attention_heads,
attention_head_size,
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, _) = xs.dims3()?;
xs.reshape((
b_size,
seq_len,
self.num_attention_heads,
self.attention_head_size,
))?
.permute((0, 2, 1, 3))
}
}
impl Module for SelfAttention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let query = self.query.forward(xs)?;
let key = self.key.forward(xs)?;
let value = self.value.forward(xs)?;
let query = self.transpose_for_scores(&query)?.contiguous()?;
let key = self.transpose_for_scores(&key)?.contiguous()?;
let value = self.transpose_for_scores(&value)?.contiguous()?;
let attention_scores =
(query.matmul(&key.t()?)? / f64::sqrt(self.attention_head_size as f64))?;
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
attention_probs
.matmul(&value)?
.permute((0, 2, 1, 3))?
.contiguous()?
.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct SelfOutput {
dense: Linear,
}
impl SelfOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
Ok(Self { dense })
}
}
impl Module for SelfOutput {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)
}
}
#[derive(Debug, Clone)]
struct Attention {
attention: SelfAttention,
output: SelfOutput,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = SelfAttention::new(cfg, vb.pp("attention"))?;
let output = SelfOutput::new(cfg, vb.pp("output"))?;
Ok(Self { attention, output })
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.attention)?.apply(&self.output)
}
}
#[derive(Debug, Clone)]
struct Intermediate {
dense: Linear,
intermediate_act_fn: candle_nn::Activation,
}
impl Intermediate {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?;
Ok(Self {
dense,
intermediate_act_fn: cfg.hidden_act,
})
}
}
impl Module for Intermediate {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)?.apply(&self.intermediate_act_fn)
}
}
#[derive(Debug, Clone)]
struct Output {
dense: Linear,
}
impl Output {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?;
Ok(Self { dense })
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)? + input_tensor
}
}
#[derive(Debug, Clone)]
struct Layer {
attention: Attention,
intermediate: Intermediate,
output: Output,
layernorm_before: LayerNorm,
layernorm_after: LayerNorm,
}
impl Layer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(cfg, vb.pp("attention"))?;
let intermediate = Intermediate::new(cfg, vb.pp("intermediate"))?;
let output = Output::new(cfg, vb.pp("output"))?;
let h_sz = cfg.hidden_size;
let layernorm_before = layer_norm(h_sz, cfg.layer_norm_eps, vb.pp("layernorm_before"))?;
let layernorm_after = layer_norm(h_sz, cfg.layer_norm_eps, vb.pp("layernorm_after"))?;
Ok(Self {
attention,
intermediate,
output,
layernorm_after,
layernorm_before,
})
}
}
impl Module for Layer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = (xs.apply(&self.layernorm_before)?.apply(&self.attention)? + xs)?;
let ys = xs.apply(&self.layernorm_after)?.apply(&self.intermediate)?;
self.output.forward(&ys, &xs)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
layers: Vec<Layer>,
}
impl Encoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("layer");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for i in 0..cfg.num_hidden_layers {
let layer = Layer::new(cfg, vb.pp(i))?;
layers.push(layer)
}
Ok(Self { layers })
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embeddings,
encoder: Encoder,
layernorm: LayerNorm,
// no need for pooling layer for image classification
classifier: Linear,
}
impl Model {
pub fn new(cfg: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> {
let vb_v = vb.pp("vit");
let embeddings = Embeddings::new(cfg, false, vb_v.pp("embeddings"))?;
let encoder = Encoder::new(cfg, vb_v.pp("encoder"))?;
let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_v.pp("layernorm"))?;
let classifier = linear(cfg.hidden_size, num_labels, vb.pp("classifier"))?;
Ok(Self {
embeddings,
encoder,
layernorm,
classifier,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let embedding_output = self.embeddings.forward(xs, None, false)?;
let encoder_outputs = self.encoder.forward(&embedding_output)?;
encoder_outputs
.i((.., 0, ..))?
.apply(&self.layernorm)?
.apply(&self.classifier)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen2_moe.rs | candle-transformers/src/models/qwen2_moe.rs | //! Qwen2 model implementation with Mixture of Experts support.
//!
//! Qwen2 is a large language model using sparse Mixture of Experts (MoE).
//! This implementation provides support for sparsely activated MoE layers.
//!
//! Key characteristics:
//! - Mixture of Experts architecture
//! - Sparse expert activation
//! - Shared expert routing mechanism
//! - Grouped query attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//!
//! References:
//! - [Qwen2 Paper](https://arxiv.org/abs/2401.08985)
//! - [Model Card](https://huggingface.co/Qwen/Qwen2-7B-beta)
//!
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: usize,
pub max_window_layers: usize,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub use_sliding_window: bool,
pub hidden_act: Activation,
pub decoder_sparse_step: usize,
pub moe_intermediate_size: usize,
pub shared_expert_intermediate_size: usize,
pub num_experts_per_tok: usize,
pub num_experts: usize,
pub norm_topk_prob: bool,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(intermediate_sz: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
// https://github.com/huggingface/transformers/blob/536ea2aca234fb48c5c69769431d643b0d93b233/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py#L800
#[derive(Debug, Clone)]
struct SparseMoeBlock {
gate: Linear,
experts: Vec<MLP>,
shared_expert: MLP,
shared_expert_gate: Linear,
norm_topk_prob: bool,
num_experts_per_tok: usize,
}
impl SparseMoeBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let gate = linear_no_bias(cfg.hidden_size, cfg.num_experts, vb.pp("gate"))?;
let mut experts = Vec::with_capacity(cfg.num_experts);
let vb_e = vb.pp("experts");
for idx in 0..cfg.num_experts {
let expert = MLP::new(cfg.moe_intermediate_size, cfg, vb_e.pp(idx))?;
experts.push(expert)
}
let shared_expert = MLP::new(
cfg.shared_expert_intermediate_size,
cfg,
vb.pp("shared_expert"),
)?;
let shared_expert_gate = linear_no_bias(cfg.hidden_size, 1, vb.pp("shared_expert_gate"))?;
Ok(Self {
gate,
experts,
shared_expert,
shared_expert_gate,
norm_topk_prob: cfg.norm_topk_prob,
num_experts_per_tok: cfg.num_experts_per_tok,
})
}
}
impl Module for SparseMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = xs.apply(&self.gate)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// In order to extract topk, we extract the data from the tensor and manipulate it
// directly. Maybe we will want to use some custom ops instead at some point.
let experts_per_tok = routing_weights
.arg_sort_last_dim(false)?
.narrow(D::Minus1, 0, self.num_experts_per_tok)?
.contiguous()?;
let routing_weights = routing_weights.gather(&experts_per_tok, D::Minus1)?;
// routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
// top_x contains the row indexes to evaluate for each expert.
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
let experts_per_tok = experts_per_tok.to_vec2::<u32>()?;
let mut top_x = vec![vec![]; self.experts.len()];
let mut selected_experts = vec![vec![]; self.experts.len()];
for (row_idx, (rw, expert_idxs)) in routing_weights
.iter()
.zip(experts_per_tok.iter())
.enumerate()
{
let sum_rw = rw.iter().sum::<f32>();
for (&rw, &expert_idx) in rw.iter().zip(expert_idxs.iter()) {
top_x[expert_idx as usize].push(row_idx as u32);
let rw = if self.norm_topk_prob { rw / sum_rw } else { rw };
selected_experts[expert_idx as usize].push(rw)
}
}
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in self.experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_experts =
Tensor::new(selected_experts[expert_idx].as_slice(), xs.device())?
.reshape(((), 1))?
.to_dtype(xs.dtype())?;
// Index the correct hidden states and compute the expert hidden state for
// the current expert. We need to make sure to multiply the output hidden
// states by `routing_weights` on the corresponding tokens (top-1 and top-2)
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
// current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None])
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states = current_hidden_states.broadcast_mul(&selected_experts)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
let shared_expert_output = xs.apply(&self.shared_expert)?;
let shared_expert_output = shared_expert_output.broadcast_mul(&candle_nn::ops::sigmoid(
&xs.apply(&self.shared_expert_gate)?,
)?)?;
let ys = (ys + shared_expert_output)?;
let ys = ys.reshape((b_size, seq_len, hidden_dim))?;
Ok(ys)
}
}
#[derive(Debug, Clone)]
enum MlpOrMoeBlock {
Mlp(MLP),
MoeBlock(SparseMoeBlock),
}
impl Module for MlpOrMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::MoeBlock(m) => m.forward(xs),
Self::Mlp(m) => m.forward(xs),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MlpOrMoeBlock,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(
layer_idx: usize,
rotary_emb: Arc<RotaryEmbedding>,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = if cfg.num_experts > 0 && (layer_idx + 1).is_multiple_of(cfg.decoder_sparse_step)
{
MlpOrMoeBlock::MoeBlock(SparseMoeBlock::new(cfg, vb.pp("mlp"))?)
} else {
MlpOrMoeBlock::Mlp(MLP::new(cfg.intermediate_size, cfg, vb.pp("mlp"))?)
};
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(layer_idx, rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + self.sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_qwen3_moe.rs | candle-transformers/src/models/quantized_qwen3_moe.rs | use super::quantized_qwen3::{Gguf, RotaryEmbedding};
use super::with_tracing::QMatMul;
use crate::fused_moe::{FusedMoeGGUF, MoeCfg};
use crate::quantized_nn::RmsNorm;
use crate::utils::repeat_kv;
use candle::quantized::gguf_file;
use candle::{DType, Device, Result, Tensor};
use candle_nn::kv_cache::ConcatKvCache;
use candle_nn::Linear;
use candle_nn::{Embedding, Module};
use std::sync::Arc;
#[derive(Debug, Clone)]
struct Mlp {
feed_forward_w1: QMatMul,
feed_forward_w2: QMatMul,
feed_forward_w3: QMatMul,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let w1 = self.feed_forward_w1.forward(xs)?;
let w3 = self.feed_forward_w3.forward(xs)?;
self.feed_forward_w2
.forward(&(candle_nn::ops::silu(&w1)? * w3)?)
}
}
enum MoeOrMlp {
FusedMoe(FusedMoeGGUF),
Mlp(Mlp),
}
impl MoeOrMlp {
fn forward(&self, xs: &Tensor, is_prefill: bool) -> Result<Tensor> {
match self {
Self::Mlp(m) => m.forward(xs),
Self::FusedMoe(m) => m.forward(xs, is_prefill),
}
}
}
pub struct QuantizedAttention {
attention_wq: QMatMul,
attention_wk: QMatMul,
attention_wv: QMatMul,
attention_bq: Option<Tensor>,
attention_bk: Option<Tensor>,
attention_bv: Option<Tensor>,
attention_wo: QMatMul,
q_norm: Option<RmsNorm>,
k_norm: Option<RmsNorm>,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
num_kv_groups: usize,
rotary_emb: Arc<RotaryEmbedding>,
dtype: DType,
kv_cache: ConcatKvCache,
}
impl QuantizedAttention {
#[allow(clippy::too_many_arguments)]
pub fn new<R: std::io::Seek + std::io::Read>(
gg: &mut Gguf<R>,
prefix: &str,
dtype: DType,
num_heads: usize,
num_kv_heads: usize,
head_dim: usize,
rms_norm_eps: f64,
device: &Device,
rotary_emb: Arc<RotaryEmbedding>,
) -> Result<Self> {
let num_kv_groups = num_heads / num_kv_heads;
let attention_wq = gg.qmatmul(&format!("{prefix}.attn_q.weight"))?;
let attention_wk = gg.qmatmul(&format!("{prefix}.attn_k.weight"))?;
let attention_wv = gg.qmatmul(&format!("{prefix}.attn_v.weight"))?;
let attention_bq = gg.tensor(&format!("{prefix}.attn_q.bias"));
let attention_bk = gg.tensor(&format!("{prefix}.attn_k.bias"));
let attention_bv = gg.tensor(&format!("{prefix}.attn_v.bias"));
let attention_bq = if let Ok(attention_bq) = attention_bq {
Some(attention_bq.dequantize(device)?.to_dtype(DType::F32)?)
} else {
None
};
let attention_bk = if let Ok(attention_bk) = attention_bk {
Some(attention_bk.dequantize(device)?.to_dtype(DType::F32)?)
} else {
None
};
let attention_bv = if let Ok(attention_bv) = attention_bv {
Some(attention_bv.dequantize(device)?.to_dtype(DType::F32)?)
} else {
None
};
let attention_wo = gg.qmatmul(&format!("{prefix}.attn_output.weight"))?;
let q_norm = Some(gg.rms_norm(&format!("{prefix}.attn_q_norm.weight"), rms_norm_eps)?);
let k_norm = Some(gg.rms_norm(&format!("{prefix}.attn_k_norm.weight"), rms_norm_eps)?);
let kv_cache = ConcatKvCache::new(2);
Ok(QuantizedAttention {
attention_wq,
attention_wk,
attention_wv,
attention_bq,
attention_bk,
attention_bv,
attention_wo,
q_norm,
k_norm,
n_head: num_heads,
n_kv_head: num_kv_heads,
head_dim,
num_kv_groups,
rotary_emb: rotary_emb.clone(),
dtype,
kv_cache,
})
}
pub fn forward(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
input_pos: usize,
) -> Result<Tensor> {
let (b, seq_len, _) = x.dims3()?;
let in_dtype = x.dtype();
let q = self.attention_wq.forward(x)?;
let k = self.attention_wk.forward(x)?;
let v = self.attention_wv.forward(x)?;
let q = if self.attention_bq.is_some() {
q.broadcast_add(self.attention_bq.as_ref().unwrap())?
} else {
q
};
let k = if self.attention_bk.is_some() {
k.broadcast_add(self.attention_bk.as_ref().unwrap())?
} else {
k
};
let v = if self.attention_bv.is_some() {
v.broadcast_add(self.attention_bv.as_ref().unwrap())?
} else {
v
};
let q = q
.reshape((1, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((1, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let v = v
.reshape((1, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let (q, k) = if let (Some(q_norm), Some(k_norm)) = (&self.q_norm, &self.k_norm) {
// Per‑head RMSNorm in qwen3
let q_flat = q.flatten(0, 2)?; // (B*H, L, D) -> (BHL, D) after transpose later
let k_flat = k.flatten(0, 2)?;
// q_norm and k_norm weights stored in f32 format in qwen3 gguf
let q_flat = q_norm.forward(&q_flat)?;
let k_flat = k_norm.forward(&k_flat)?;
let q = q_flat.reshape((1, self.n_head, seq_len, self.head_dim))?;
let k = k_flat.reshape((1, self.n_kv_head, seq_len, self.head_dim))?;
(q, k)
} else {
(q, k)
};
let (q, k, v) = (
q.to_dtype(self.dtype)?,
k.to_dtype(self.dtype)?,
v.to_dtype(self.dtype)?,
);
let (q, k) = self.rotary_emb.apply(&q, &k, input_pos)?;
let (k, v) = self.kv_cache.append(&k, &v)?;
let k = repeat_kv(k, self.num_kv_groups)?.contiguous()?;
let v = repeat_kv(v, self.num_kv_groups)?.contiguous()?;
let scale = 1.0 / (self.head_dim as f64).sqrt();
let mut scores = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(m) = mask {
let m_dtype = m.dtype();
let scores_dtype = scores.dtype();
let mask = if m_dtype != scores_dtype {
m.to_dtype(scores_dtype)?
} else {
m.clone()
};
scores = scores.broadcast_add(&mask)?;
}
let probs = candle_nn::ops::softmax_last_dim(&scores)?;
let ctx = probs.matmul(&v)?; // (B, H, L, D)
let reshaped_ctx =
ctx.transpose(1, 2)?
.reshape((b, seq_len, self.n_head * self.head_dim))?;
self.attention_wo.forward(&reshaped_ctx.to_dtype(in_dtype)?)
}
}
struct LayerWeights {
self_attn: QuantizedAttention,
attention_norm: RmsNorm,
mlp: MoeOrMlp,
ffn_norm: RmsNorm,
}
impl LayerWeights {
fn forward_attn(&mut self, x: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
self.self_attn.forward(x, mask, offset)
}
}
pub struct GGUFQWenMoE {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
norm: RmsNorm,
output: QMatMul,
dtype: DType,
device: Device,
}
impl GGUFQWenMoE {
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
dtype: DType,
) -> Result<Self> {
let mut gg = Gguf::new(ct, reader, device.clone());
let md_get = |s: &str| match gg.metadata().get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
let arch = md_get("general.architecture")?.to_string()?;
let head_count =
md_get(format!("{arch}.attention.head_count").as_str())?.to_u32()? as usize;
let head_count_kv =
md_get(format!("{arch}.attention.head_count_kv").as_str())?.to_u32()? as usize;
let head_dim = md_get(format!("{arch}.attention.key_length").as_str());
let embedding_length =
md_get(format!("{arch}.embedding_length").as_str())?.to_u32()? as usize;
let head_dim = if let Ok(head_dim) = head_dim {
head_dim.to_u32()? as usize
} else {
embedding_length / head_count
};
let context_length = md_get(format!("{arch}.context_length").as_str())?.to_u32()? as usize;
let block_count = md_get(format!("{arch}.block_count").as_str())?.to_u32()? as usize;
let rms_norm_eps =
md_get(format!("{arch}.attention.layer_norm_rms_epsilon").as_str())?.to_f32()? as f64;
let rope_freq_base = md_get(format!("{arch}.rope.freq_base").as_str())
.and_then(|m| m.to_f32())
.unwrap_or(10000f32);
let expert_shared_feed_forward_length =
md_get(format!("{arch}.expert_shared_feed_forward_length").as_str());
let shared_expert_intermediate_size = match expert_shared_feed_forward_length {
Ok(length) => {
if length.to_u32()? > 0 {
Some(length.to_u32()? as usize)
} else {
None
}
}
_ => None,
};
let moe_cfg = MoeCfg {
moe_intermediate_size: md_get(format!("{arch}.expert_feed_forward_length").as_str())?
.to_u32()? as usize,
num_experts: md_get(format!("{arch}.expert_count").as_str())?.to_u32()? as usize,
norm_topk_prob: shared_expert_intermediate_size.is_none(),
num_experts_per_tok: md_get(format!("{arch}.expert_used_count").as_str())?.to_u32()?
as usize,
hidden_size: head_dim,
act: candle_nn::Activation::Silu,
decoder_sparse_step: None,
};
let tok_embeddings = gg.tensor("token_embd.weight")?;
let tok_embeddings = tok_embeddings.dequantize(device)?;
let norm = gg.rms_norm("output_norm.weight", rms_norm_eps)?;
let output = match gg.qmatmul("output.weight") {
Ok(v) => v,
_ => {
// use tie_word_embeddings
gg.qmatmul("token_embd.weight")?
}
};
let rotary_emb = Arc::new(RotaryEmbedding::new(
dtype,
head_dim,
context_length,
rope_freq_base as f64,
device,
)?);
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let mlp = if moe_cfg.num_experts > 0
&& (layer_idx + 1) % moe_cfg.decoder_sparse_step.unwrap_or(1) == 0
{
let gate_ws = gg
.tensor(&format!("{prefix}.ffn_gate_inp.weight"))?
.dequantize(device)?
.to_dtype(DType::F32)?;
let gate = Linear::new(gate_ws, None);
let gate_experts = Arc::new(gg.tensor(&format!("{prefix}.ffn_gate_exps.weight"))?);
let up_experts = Arc::new(gg.tensor(&format!("{prefix}.ffn_up_exps.weight"))?);
let down_experts = Arc::new(gg.tensor(&format!("{prefix}.ffn_down_exps.weight"))?);
let moe = FusedMoeGGUF {
gate,
gate_experts,
up_experts,
down_experts,
act: candle_nn::Activation::Silu,
norm_topk_prob: moe_cfg.norm_topk_prob,
num_experts_per_tok: moe_cfg.num_experts_per_tok,
dtype,
};
MoeOrMlp::FusedMoe(moe)
} else {
let mlp = {
let feed_forward_w1 = gg.qmatmul(&format!("{prefix}.ffn_gate.weight"))?;
let feed_forward_w2 = gg.qmatmul(&format!("{prefix}.ffn_down.weight"))?;
let feed_forward_w3 = gg.qmatmul(&format!("{prefix}.ffn_up.weight"))?;
Mlp {
feed_forward_w1,
feed_forward_w2,
feed_forward_w3,
}
};
MoeOrMlp::Mlp(mlp)
};
let attention_norm =
gg.rms_norm(&format!("{prefix}.attn_norm.weight"), rms_norm_eps)?;
let ffn_norm = gg.rms_norm(&format!("{prefix}.ffn_norm.weight"), rms_norm_eps)?;
let self_attn = QuantizedAttention::new(
&mut gg,
&prefix,
dtype,
head_count,
head_count_kv,
head_dim,
rms_norm_eps,
device,
rotary_emb.clone(),
)?;
layers.push(LayerWeights {
self_attn,
attention_norm,
mlp,
ffn_norm,
});
}
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
norm,
output,
dtype,
device: device.clone(),
})
}
fn causal_mask(
&self,
b: usize,
tgt: usize,
offset: usize,
sw: Option<usize>,
) -> Result<Tensor> {
let minf = f32::NEG_INFINITY;
let mask: Vec<_> = (0..tgt)
.flat_map(|i| {
(0..(tgt + offset)).map(move |j| {
let past_ok = j <= i + offset;
let sw_ok = match sw {
Some(w) => (i + offset) as i64 - j as i64 <= w as i64,
None => true,
};
if past_ok && sw_ok {
0.
} else {
minf
}
})
})
.collect();
Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype)
}
pub fn forward(&mut self, x: &Tensor, offset: usize) -> Result<Tensor> {
let mut xs = self.tok_embeddings.forward(x)?;
let (b, l) = x.dims2()?;
let causal_mask = if l == 1 {
None
} else {
Some(self.causal_mask(b, l, offset, None)?)
};
for layer in self.layers.iter_mut() {
let x = xs;
let residual = &x;
let x = layer.attention_norm.forward(&x)?;
let attn = layer.forward_attn(&x, causal_mask.as_ref(), offset)?;
let x = (attn + residual)?;
// MLP
let residual = &x;
let x = layer.ffn_norm.forward(&x)?;
let x = layer.mlp.forward(&x, causal_mask.is_some())?;
let x = (x + residual)?;
xs = x
}
let xs = xs.narrow(1, l - 1, 1)?;
let xs = self.norm.forward(&xs)?;
self.output.forward(&xs)?.to_dtype(DType::F32)?.squeeze(1)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/glm4_new.rs | candle-transformers/src/models/glm4_new.rs | use crate::models::glm4::EosTokenId;
use crate::{
models::with_tracing::{linear_b, linear_no_bias, Linear, RmsNorm},
utils::repeat_kv,
};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{kv_cache::KvCache, Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub head_dim: Option<usize>,
pub partial_rotary_factor: Option<f32>,
pub attention_bias: Option<bool>,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: Option<usize>,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub hidden_act: Activation,
pub eos_token_id: Option<EosTokenId>,
}
#[derive(Debug, Clone)]
pub(crate) struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
rotary_dim: usize,
}
impl RotaryEmbedding {
pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg
.head_dim
.unwrap_or(cfg.hidden_size / cfg.num_attention_heads);
let rotary_dim = if cfg.partial_rotary_factor.is_some() {
(cfg.partial_rotary_factor.unwrap() * dim as f32) as usize
} else {
dim
};
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..rotary_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / rotary_dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
rotary_dim,
})
}
pub(crate) fn apply(&self, xs: &Tensor, offset: usize) -> Result<Tensor> {
let (_, _, seq_len, _) = xs.dims4()?;
let (s, e) = (offset, offset + seq_len);
let cos = self.cos.i((s..e, ..))?.contiguous()?;
let sin = self.sin.i((s..e, ..))?.contiguous()?;
let xs_rot = xs
.i((0, .., .., ..self.rotary_dim))?
.unsqueeze(0)?
.contiguous()?;
let xs_pass = xs.i((0, .., .., self.rotary_dim..))?.unsqueeze(0)?;
let xs_rot = candle_nn::rotary_emb::rope_i(&xs_rot, &cos, &sin).unwrap();
Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1)?.contiguous()
}
}
#[derive(Debug, Clone)]
pub(crate) struct Mlp {
gate_up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl Mlp {
pub(crate) fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
Ok(Self {
gate_up_proj: linear_no_bias(
cfg.hidden_size,
cfg.intermediate_size * 2,
vb.pp("gate_up_proj"),
)?,
down_proj: linear_no_bias(cfg.intermediate_size, cfg.hidden_size, vb.pp("down_proj"))?,
act_fn: cfg.hidden_act,
})
}
}
impl Module for Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let w = self.gate_up_proj.forward(x)?;
let dim = w.dims().len() - 1;
let gate = w.narrow(dim, 0, w.dim(dim)? / 2)?.contiguous()?;
let gate = gate.apply(&self.act_fn)?;
let up_states = w
.narrow(dim, w.dim(dim)? / 2, w.dim(dim)? / 2)?
.contiguous()?;
self.down_proj.forward(&(gate * up_states)?)
}
}
#[derive(Debug, Clone)]
pub(crate) struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: KvCache,
}
impl Attention {
pub(crate) fn new(
cfg: &Config,
rotary_emb: Arc<RotaryEmbedding>,
vb: VarBuilder,
) -> Result<Self> {
let head_dim = cfg
.head_dim
.unwrap_or(cfg.hidden_size / cfg.num_attention_heads);
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let q_proj = linear_b(
cfg.hidden_size,
num_heads * head_dim,
cfg.attention_bias.unwrap_or(false),
vb.pp("q_proj"),
)?;
let k_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias.unwrap_or(false),
vb.pp("k_proj"),
)?;
let v_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias.unwrap_or(false),
vb.pp("v_proj"),
)?;
let o_proj = linear_b(
num_heads * head_dim,
cfg.hidden_size,
false,
vb.pp("o_proj"),
)?;
// Necessary because the hidden_size in the config isn't always accurate
let hidden_size = head_dim * cfg.num_attention_heads;
// Initialize KV cache with 512 tokens capacity to reduce initial memory allocation.
// The cache will grow in chunks of 512 tokens when needed.
let kv_cache = KvCache::new(2, 512);
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size,
rotary_emb,
kv_cache,
})
}
pub(crate) fn forward(
&mut self,
x: &Tensor,
attn_mask: Option<&Tensor>,
offset: usize,
) -> Result<Tensor> {
let (b, l, _) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q
.reshape((b, l, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let q = self.rotary_emb.apply(&q, offset)?;
let k = self.rotary_emb.apply(&k, offset)?;
let (k, v) = self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?;
let k = repeat_kv(k, self.num_kv_groups)?;
let v = repeat_kv(v, self.num_kv_groups)?;
let scale = 1.0 / (self.head_dim as f64).sqrt();
let mut scores = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(m) = attn_mask {
scores = scores.broadcast_add(m)?;
}
let probs = candle_nn::ops::softmax_last_dim(&scores)?;
let ctx = probs.matmul(&v)?;
ctx.transpose(1, 2)?
.reshape((b, l, self.hidden_size))?
.apply(&self.o_proj)
}
pub(crate) fn clear_kv_cache(&mut self) {
self.kv_cache.reset();
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: Mlp,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
post_mlp_layernorm: RmsNorm,
post_self_attn_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(cfg: &Config, rotary: Arc<RotaryEmbedding>, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, rotary, vb.pp("self_attn"))?;
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
let post_self_attn_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_self_attn_layernorm"),
)?;
let post_mlp_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_mlp_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
post_self_attn_layernorm,
post_mlp_layernorm,
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let residual = xs;
let hidden_states = self.input_layernorm.forward(xs)?;
let hidden_states = self.self_attn.forward(&hidden_states, mask, offset)?;
let hidden_states = self.post_self_attn_layernorm.forward(&hidden_states)?;
let hidden_states = (residual + hidden_states)?;
let residual = &hidden_states;
let hidden_states = self.post_attention_layernorm.forward(&hidden_states)?;
let hidden_states = self.mlp.forward(&hidden_states)?;
let hidden_states = self.post_mlp_layernorm.forward(&hidden_states)?;
residual + hidden_states
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let rotary = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb.pp("model.layers");
for i in 0..cfg.num_hidden_layers {
layers.push(DecoderLayer::new(cfg, rotary.clone(), vb_l.pp(i))?);
}
Ok(Self {
embed_tokens,
layers,
norm: RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn clear_kv_cache(&mut self) {
for l in &mut self.layers {
l.clear_kv_cache();
}
}
fn causal_mask(
&self,
b: usize,
tgt: usize,
offset: usize,
sw: Option<usize>,
) -> Result<Tensor> {
let minf = f32::NEG_INFINITY;
let mask: Vec<_> = (0..tgt)
.flat_map(|i| {
(0..(tgt + offset)).map(move |j| {
let past_ok = j <= i + offset;
let sw_ok = match sw {
Some(w) => (i + offset) as i64 - j as i64 <= w as i64,
None => true,
};
if past_ok && sw_ok {
0.
} else {
minf
}
})
})
.collect();
Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype)
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (b, l) = input.dims2()?;
let mut h = self.embed_tokens.forward(input)?;
let causal = if l == 1 {
None
} else {
Some(self.causal_mask(b, l, offset, None)?)
};
for layer in &mut self.layers {
h = layer.forward(&h, causal.as_ref(), offset)?;
}
self.norm.forward(&h)
}
}
#[derive(Debug, Clone)]
pub struct ModelForCausalLM {
base: Model,
lm_head: Linear,
}
impl ModelForCausalLM {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let base = Model::new(cfg, vb.clone())?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(base.embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
Ok(Self { base, lm_head })
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (_, l) = input.dims2()?;
self.base
.forward(input, offset)?
.narrow(1, l - 1, 1)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
self.base.clear_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mobilenetv4.rs | candle-transformers/src/models/mobilenetv4.rs | //! # MobileNet-v4
//!
//! MobileNet-v4 inference implementation based on timm.
//!
//! ## Paper
//!
//! ["MobileNetV4 - Universal Models for the Mobile Ecosystem"](https://arxiv.org/abs/2404.10518)
//!
//! ## References
//!
//! - [PyTorch Implementation](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/mobilenetv3.py)
use candle::{Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d_no_bias, linear, ops::softmax, Activation, Conv2dConfig, Func, VarBuilder,
};
#[derive(Clone, Debug)]
enum BlockType {
Convolutional {
out_channels: usize,
kernel: usize,
stride: usize,
},
UniversalBottleneck {
out_channels: usize,
start_kernel: usize,
mid_kernel: usize,
stride: usize,
expand: usize,
},
EdgeResidual {
out_channels: usize,
kernel: usize,
stride: usize,
expand: usize,
},
Attention {
out_channels: usize,
heads: usize,
kernel: usize,
stride: usize,
kv_dim: usize,
kv_stride: usize,
},
}
#[derive(Clone, Debug)]
pub struct Config {
stem_dim: usize,
activation: Activation,
stages: [Vec<BlockType>; 5],
}
#[rustfmt::skip]
impl Config {
pub fn small() -> Self {
Self {
stem_dim: 32,
activation: Activation::Relu,
stages: [
vec![
BlockType::Convolutional { out_channels: 32, kernel: 3, stride: 2},
BlockType::Convolutional { out_channels: 32, kernel: 1, stride: 1},
],
vec![
BlockType::Convolutional { out_channels: 96, kernel: 3, stride: 2},
BlockType::Convolutional { out_channels: 64, kernel: 1, stride: 1},
],
vec![
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 3},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 3, mid_kernel: 3, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 5, stride: 1, expand: 3},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn medium() -> Self {
Self {
stem_dim: 32,
activation: Activation::Relu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 2},
],
vec![
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 2},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn hybrid_medium() -> Self {
Self {
stem_dim: 32,
activation: Activation::Relu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 2},
],
vec![
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn large() -> Self {
Self {
stem_dim: 24,
activation: Activation::Relu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn hybrid_large() -> Self {
Self {
stem_dim: 24,
activation: Activation::Gelu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
}
fn depthwise_conv(
channels: usize,
kernel: usize,
stride: usize,
padding: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding,
groups: channels,
..Default::default()
};
let bn = batch_norm(channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(channels, channels, kernel, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
fn pointwise_conv(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
//Universal block that uses two pointwise convolutions and all combinations of two depthwise convolutions.
#[allow(clippy::too_many_arguments)]
fn universal_inverted_bottleneck_block(
cfg: &Config,
in_channels: usize,
out_channels: usize,
expand: usize,
start_kernel: usize,
mid_kernel: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let act = cfg.activation;
let skip_connection = (in_channels == out_channels) && (stride == 1);
let dw_start_stride = if mid_kernel > 0 { 1 } else { stride };
let dw_start = depthwise_conv(
in_channels,
start_kernel,
dw_start_stride,
start_kernel / 2,
vb.pp("dw_start"),
);
let pw_exp = pointwise_conv(in_channels, in_channels * expand, vb.pp("pw_exp"))?;
let dw_mid = depthwise_conv(
in_channels * expand,
mid_kernel,
stride,
mid_kernel / 2,
vb.pp("dw_mid"),
);
let pw_proj = pointwise_conv(in_channels * expand, out_channels, vb.pp("pw_proj"))?;
let gamma = vb.get(out_channels, "layer_scale.gamma");
Ok(Func::new(move |xs| {
let residual = xs.clone();
let mut xs = xs.clone();
if let Ok(f) = &dw_start {
xs = xs.apply(f)?;
}
xs = xs.apply(&pw_exp)?.apply(&act)?;
if let Ok(f) = &dw_mid {
xs = xs.apply(f)?.apply(&act)?;
}
xs = xs.apply(&pw_proj)?;
if let Ok(g) = &gamma {
xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?;
};
if skip_connection {
xs = (xs + residual)?;
}
Ok(xs)
}))
}
// Convolutional block including norm and activation.
fn conv_block(
cfg: &Config,
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding: kernel / 2,
..Default::default()
};
let act = cfg.activation;
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn1"))?;
let conv = conv2d_no_bias(in_channels, out_channels, kernel, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| {
xs.apply(&conv)?.apply_t(&bn, false)?.apply(&act)
}))
}
fn edge_residual_block(
cfg: &Config,
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
expand: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv_exp_cfg = Conv2dConfig {
stride,
padding: kernel / 2,
..Default::default()
};
let conv_pwl_cfg = Conv2dConfig {
..Default::default()
};
let act = cfg.activation;
let mid_channels = in_channels * expand;
let conv_exp = conv2d_no_bias(
in_channels,
mid_channels,
kernel,
conv_exp_cfg,
vb.pp("conv_exp"),
)?;
let bn1 = batch_norm(mid_channels, 1e-5, vb.pp("bn1"))?;
let conv_pwl = conv2d_no_bias(
mid_channels,
out_channels,
1,
conv_pwl_cfg,
vb.pp("conv_pwl"),
)?;
let bn2 = batch_norm(out_channels, 1e-5, vb.pp("bn2"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv_exp)?
.apply_t(&bn1, false)?
.apply(&act)?
.apply(&conv_pwl)?
.apply_t(&bn2, false)?;
Ok(xs)
}))
}
fn reshape_kv(t: &Tensor) -> Result<Tensor> {
let d = t.dims4()?;
let t = t
.reshape((d.0, d.1, ()))?
.transpose(1, 2)?
.unsqueeze(1)?
.contiguous()?;
Ok(t)
}
fn reshape_query(t: &Tensor, heads: usize, kv_dim: usize) -> Result<Tensor> {
let d = t.dims4()?;
let t = t
.reshape((d.0, heads, kv_dim, ()))?
.transpose(D::Minus1, D::Minus2)?
.contiguous()?;
Ok(t)
}
fn reshape_output(t: &Tensor, heads: usize, h: usize, w: usize) -> Result<Tensor> {
let d = t.dims4()?;
let t = t.transpose(1, 2)?;
let t = t
.reshape((d.0, h, w, d.3 * heads))?
.permute((0, 3, 1, 2))?
.contiguous()?;
Ok(t)
}
// Mobile multi-query attention
#[allow(clippy::too_many_arguments)]
fn mqa_block(
in_channels: usize,
out_channels: usize,
heads: usize,
kernel: usize,
stride: usize,
kv_dim: usize,
kv_stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let down_conv2d_cfg = Conv2dConfig {
stride: kv_stride,
padding: kernel / 2,
groups: in_channels,
..Default::default()
};
let proj_conv2d_cfg = Conv2dConfig {
stride,
..Default::default()
};
let skip_connection = (in_channels == out_channels) && (stride == 1);
let gamma = vb.get(out_channels, "layer_scale.gamma");
let norm = batch_norm(out_channels, 1e-5, vb.pp("norm"))?;
let scale = (kv_dim as f64).powf(-0.5);
let vb = vb.pp("attn");
let query_proj = conv2d_no_bias(
out_channels,
kv_dim * heads,
1,
proj_conv2d_cfg,
vb.pp("query.proj"),
)?;
let key_down_conv = conv2d_no_bias(
in_channels,
out_channels,
kernel,
down_conv2d_cfg,
vb.pp("key.down_conv"),
);
let key_norm = batch_norm(out_channels, 1e-5, vb.pp("key.norm"));
let key_proj = conv2d_no_bias(out_channels, kv_dim, 1, proj_conv2d_cfg, vb.pp("key.proj"))?;
let value_down_conv = conv2d_no_bias(
in_channels,
out_channels,
kernel,
down_conv2d_cfg,
vb.pp("value.down_conv"),
);
let value_norm = batch_norm(out_channels, 1e-5, vb.pp("value.norm"));
let value_proj = conv2d_no_bias(
out_channels,
kv_dim,
1,
proj_conv2d_cfg,
vb.pp("value.proj"),
)?;
let output_proj = conv2d_no_bias(
kv_dim * heads,
out_channels,
1,
proj_conv2d_cfg,
vb.pp("output.proj"),
)?;
Ok(Func::new(move |xs| {
let (_, _, h, w) = xs.dims4()?;
let residual = xs.clone();
let xs = xs.apply_t(&norm, false)?;
// Query
let q = xs.apply(&query_proj)?;
let q = reshape_query(&q, heads, kv_dim)?;
let q = (q * scale)?;
// Keys
let mut k = xs.clone();
if let (Ok(kd), Ok(n)) = (&key_down_conv, &key_norm) {
k = k.apply(kd)?.apply_t(n, false)?;
}
let k = k.apply(&key_proj)?;
let k = reshape_kv(&k)?;
// Value
let mut v = xs.clone();
if let (Ok(vd), Ok(n)) = (&value_down_conv, &value_norm) {
v = v.apply(vd)?;
v = v.apply_t(n, false)?;
}
let v = v.apply(&value_proj)?;
let v = reshape_kv(&v)?;
let attn = q.broadcast_matmul(&(k.transpose(D::Minus2, D::Minus1)?))?;
let attn = softmax(&attn, D::Minus1)?;
let o = attn.broadcast_matmul(&v)?;
let o = reshape_output(&o, heads, h, w)?;
let mut xs = o.apply(&output_proj)?;
// Layer scale
if let Ok(g) = &gamma {
xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?;
};
if skip_connection {
xs = (xs + residual)?;
}
Ok(xs)
}))
}
// Stem.
fn mobilenetv4_stem(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 2,
padding: 1,
..Default::default()
};
let act = cfg.activation;
let out_channels = cfg.stem_dim;
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn1"))?;
let conv = conv2d_no_bias(3, out_channels, 3, conv2d_cfg, vb.pp("conv_stem"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?.apply_t(&bn, false)?.apply(&act)?;
Ok(xs)
}))
}
// The blocks in all the 5 stages of the model.
fn mobilenetv4_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let mut in_channels = cfg.stem_dim;
let mut blocks = Vec::new();
for stage in 0..5 {
let nblocks = cfg.stages[stage].len();
for block in 0..nblocks {
match cfg.stages[stage][block] {
BlockType::Convolutional {
out_channels,
kernel,
stride,
} => {
blocks.push(conv_block(
cfg,
in_channels,
out_channels,
kernel,
stride,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
BlockType::EdgeResidual {
out_channels,
kernel,
stride,
expand,
} => {
blocks.push(edge_residual_block(
cfg,
in_channels,
out_channels,
kernel,
stride,
expand,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
BlockType::UniversalBottleneck {
out_channels,
start_kernel,
mid_kernel,
stride,
expand,
} => {
blocks.push(universal_inverted_bottleneck_block(
cfg,
in_channels,
out_channels,
expand,
start_kernel,
mid_kernel,
stride,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
BlockType::Attention {
out_channels,
heads,
kernel,
stride,
kv_dim,
kv_stride,
} => {
blocks.push(mqa_block(
in_channels,
out_channels,
heads,
kernel,
stride,
kv_dim,
kv_stride,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
}
}
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
// Classification head.
fn mobilenetv4_head(
cfg: &Config,
outputs: usize,
nclasses: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let act = cfg.activation;
let conv = conv2d_no_bias(960, outputs, 1, conv2d_cfg, vb.pp("conv_head"))?;
let norm = batch_norm(outputs, 1e-5, vb.pp("norm_head"))?;
let cls = linear(outputs, nclasses, vb.pp("classifier"))?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
xs = xs.apply(&conv)?;
xs = xs.apply_t(&norm, false)?.apply(&act)?;
xs = xs.flatten_from(1)?;
xs = xs.apply(&cls)?;
Ok(xs)
}))
}
// Build a mobilenetv4 model for a given configuration.
fn mobilenetv4_model(
cfg: &Config,
nclasses: Option<usize>,
vb: VarBuilder,
) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = 1280;
let head = mobilenetv4_head(cfg, outputs, nclasses, vb.clone())?;
Some(head)
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_gemma3.rs | candle-transformers/src/models/quantized_gemma3.rs | //! Gemma 3 model implementation with quantization support.
//!
//! Gemma 3 is a family of multimodal language models developed by Google.
//! This implementation provides quantization for reduced memory usage and faster inference.
//!
//! Key characteristics:
//! - Group-Query Attention (GQA) with specialized key-value heads
//! - RMSNorm for layer normalization
//! - Specialized attention patterns with separate normalization for Q/K/V
//! - Feed-forward network with SwiGLU activation
//! - Support for 2/3/4/8-bit quantization
//!
//! References:
//! - [Gemma 3 Models](https://blog.google/technology/developers/gemma-3/)
//!
use crate::quantized_nn::RmsNorm;
use candle::quantized::gguf_file;
use candle::quantized::QTensor;
use candle::D;
use candle::{DType, Device, IndexOp, Result, Tensor};
use candle_nn::{Embedding, Module};
pub const MAX_SEQ_LEN: usize = 131072; // Gemma 3 supports 128K context window
pub const DEFAULT_SLIDING_WINDOW_TYPE: usize = 6;
pub const DEFAULT_ROPE_FREQUENCY: f32 = 1_000_000.;
pub const DEFAULT_ROPE_FREQUENCY_SLIDING: f32 = 10_000.;
pub const DEFAULT_ROPE_FREQUENCY_SCALE_FACTOR: f32 = 1.;
#[derive(Debug, Clone)]
struct QMatMul {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QMatMul {
fn from_qtensor(qtensor: QTensor) -> Result<Self> {
let inner = candle::quantized::QMatMul::from_qtensor(qtensor)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
struct Mlp {
feed_forward_gate: QMatMul, // ffn_gate in GGUF
feed_forward_up: QMatMul, // ffn_up in GGUF
feed_forward_down: QMatMul, // ffn_down in GGUF
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let gate = self.feed_forward_gate.forward(xs)?;
let up = self.feed_forward_up.forward(xs)?;
let silu = candle_nn::ops::silu(&gate)?;
let gated = (silu * up)?;
self.feed_forward_down.forward(&gated)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(head_dim: usize, rope_frequency: f32, device: &Device) -> Result<Self> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / rope_frequency.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)?
.to_dtype(DType::F32)?
.reshape((MAX_SEQ_LEN, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok(Self { sin, cos })
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
index_pos: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
struct LayerWeights {
// Attention components
attention_wq: QMatMul,
attention_wk: QMatMul,
attention_wv: QMatMul,
attention_wo: QMatMul,
// Specialized normalization for Q and K
attention_q_norm: RmsNorm,
attention_k_norm: RmsNorm,
// Layer normalization
attention_norm: RmsNorm, // Applied before attention
post_attention_norm: RmsNorm, // Applied after attention
ffn_norm: RmsNorm, // Applied before feedforward
post_ffn_norm: RmsNorm, // Applied after feedforward
// Feed-forward network
mlp: Mlp,
// Attention parameters
n_head: usize, // Number of query heads
n_kv_head: usize, // Number of key-value heads
head_dim: usize, // Dimension of each head
q_dim: usize, // Total dimension for queries
sliding_window_size: Option<usize>,
rotary_embedding: RotaryEmbedding,
neg_inf: Tensor,
// Cache
kv_cache: Option<(Tensor, Tensor)>,
// Tracing
span_attn: tracing::Span,
span_mlp: tracing::Span,
}
impl LayerWeights {
fn mask(
&self,
b_sz: usize,
seq_len: usize,
index_pos: usize,
dtype: DType,
device: &Device,
) -> Result<Tensor> {
let mask: Vec<_> = if let Some(sliding_window_size) = self.sliding_window_size {
(0..seq_len)
.flat_map(|i| {
(0..seq_len).map(move |j| {
if i < j || j + sliding_window_size < i {
0u32
} else {
1u32
}
})
})
.collect()
} else {
(0..seq_len)
.flat_map(|i| (0..seq_len).map(move |j| if i < j { 0u32 } else { 1u32 }))
.collect()
};
let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?;
let mask = if index_pos > 0 {
let mask0 = Tensor::zeros((seq_len, index_pos), DType::F32, device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_sz, 1, seq_len, seq_len + index_pos))?
.to_dtype(dtype)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, _) = x.dims3()?;
let q = self.attention_wq.forward(x)?;
let k = self.attention_wk.forward(x)?;
let v = self.attention_wv.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let q = self.attention_q_norm.forward(&q.contiguous()?)?;
let k = self.attention_k_norm.forward(&k.contiguous()?)?;
let (q, k) = self
.rotary_embedding
.apply_rotary_emb_qkv(&q, &k, index_pos)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((k_cache, v_cache)) => {
if index_pos == 0 {
(k, v)
} else {
let k = Tensor::cat(&[k_cache, &k], 2)?; // concat on seq dim
let v = Tensor::cat(&[v_cache, &v], 2)?;
(k, v)
}
}
};
self.kv_cache = Some((k.clone(), v.clone())); // update cache
// Repeat KV for GQA
let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?;
// Scaled Dot-Product Attention
let scale = 1.0 / (self.head_dim as f64).sqrt();
let mut attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(mask) = mask {
let mask = mask.broadcast_as(attn_weights.shape())?;
let neg_inf = self.neg_inf.broadcast_as(attn_weights.dims())?;
attn_weights = mask.eq(0u32)?.where_cond(&neg_inf, &attn_weights)?;
}
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
let attn_output = attn_output
.transpose(1, 2)?
.reshape((b_sz, seq_len, self.q_dim))?;
self.attention_wo.forward(&attn_output)
}
}
#[derive(Debug, Clone)]
pub struct ModelWeights {
tok_embeddings: Embedding,
embedding_length: usize,
layers: Vec<LayerWeights>,
norm: RmsNorm,
output: QMatMul,
span: tracing::Span,
span_output: tracing::Span,
}
impl ModelWeights {
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
// Detect architecture prefix by probing which keys exist in metadata.
// This supports gemma3, gemma2, gemma, gemma-embedding, and future variants.
let prefix = ["gemma3", "gemma2", "gemma", "gemma-embedding"]
.iter()
.find(|p| {
ct.metadata
.contains_key(&format!("{}.attention.head_count", p))
})
.copied()
.unwrap_or("gemma3");
let md_get = |s: &str| {
let key = format!("{prefix}.{s}");
match ct.metadata.get(&key) {
None => candle::bail!("cannot find {key} in metadata"),
Some(v) => Ok(v),
}
};
let head_count = md_get("attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("attention.head_count_kv")?.to_u32()? as usize;
let block_count = md_get("block_count")?.to_u32()? as usize;
let embedding_length = md_get("embedding_length")?.to_u32()? as usize;
let key_length = md_get("attention.key_length")?.to_u32()? as usize;
let _value_length = md_get("attention.value_length")?.to_u32()? as usize;
let rms_norm_eps = md_get("attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let sliding_window_size = md_get("attention.sliding_window")?.to_u32()? as usize;
let sliding_window_type = md_get("attention.sliding_window_type")
.and_then(|m| Ok(m.to_u32()? as usize))
.unwrap_or(DEFAULT_SLIDING_WINDOW_TYPE);
let rope_freq_base = md_get("rope.freq_base")
.and_then(|m| m.to_f32())
.unwrap_or(DEFAULT_ROPE_FREQUENCY);
let rope_freq_base_sliding = md_get("rope.local_freq_base")
.and_then(|m| m.to_f32())
.unwrap_or(DEFAULT_ROPE_FREQUENCY_SLIDING);
// Unused in Llama.cpp so we aren't using it here.
let _rope_freq_scaling_factor = md_get("rope.scaling.factor")
.and_then(|m| m.to_f32())
.unwrap_or(DEFAULT_ROPE_FREQUENCY_SCALE_FACTOR);
// Compute the dimensions for queries, keys, and values
// These are the total dimensions when projected across all heads
let q_dim = head_count * key_length;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
// Load token embeddings and output projection
let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings.dequantize(device)?;
let norm = RmsNorm::from_qtensor(
ct.tensor(reader, "output_norm.weight", device)?,
rms_norm_eps,
)?;
let output = match ct.tensor(reader, "output.weight", device) {
Ok(tensor) => tensor,
Err(_) => ct.tensor(reader, "token_embd.weight", device)?, // Use tied weights if output.weight doesn't exist
};
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?;
let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?;
let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?;
let attention_wo =
ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?;
let attention_q_norm = RmsNorm::from_qtensor(
ct.tensor(reader, &format!("{prefix}.attn_q_norm.weight"), device)?,
rms_norm_eps,
)?;
let attention_k_norm = RmsNorm::from_qtensor(
ct.tensor(reader, &format!("{prefix}.attn_k_norm.weight"), device)?,
rms_norm_eps,
)?;
let attention_norm = RmsNorm::from_qtensor(
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?,
rms_norm_eps,
)?;
let post_attention_norm = RmsNorm::from_qtensor(
ct.tensor(
reader,
&format!("{prefix}.post_attention_norm.weight"),
device,
)?,
rms_norm_eps,
)?;
let ffn_norm = RmsNorm::from_qtensor(
ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?,
rms_norm_eps,
)?;
let post_ffn_norm = RmsNorm::from_qtensor(
ct.tensor(reader, &format!("{prefix}.post_ffw_norm.weight"), device)?,
rms_norm_eps,
)?;
let feed_forward_gate =
ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?;
let feed_forward_up = ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?;
let feed_forward_down =
ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?;
let mlp = Mlp {
feed_forward_gate: QMatMul::from_qtensor(feed_forward_gate)?,
feed_forward_up: QMatMul::from_qtensor(feed_forward_up)?,
feed_forward_down: QMatMul::from_qtensor(feed_forward_down)?,
};
// Sliding window pattern hardcoded to 6 because it's not explicitly defined
let is_sliding = (layer_idx + 1) % sliding_window_type > 0;
let sliding_window_size = is_sliding.then_some(sliding_window_size);
let layer_rope_frequency = if is_sliding {
rope_freq_base_sliding
} else {
rope_freq_base
};
let rotary_embedding = RotaryEmbedding::new(key_length, layer_rope_frequency, device)?;
// Tracing spans
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp");
layers.push(LayerWeights {
attention_wq: QMatMul::from_qtensor(attention_wq)?,
attention_wk: QMatMul::from_qtensor(attention_wk)?,
attention_wv: QMatMul::from_qtensor(attention_wv)?,
attention_wo: QMatMul::from_qtensor(attention_wo)?,
attention_q_norm,
attention_k_norm,
attention_norm,
post_attention_norm,
ffn_norm,
post_ffn_norm,
mlp,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim: key_length,
q_dim,
sliding_window_size,
rotary_embedding,
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_mlp,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
embedding_length,
layers,
norm,
output: QMatMul::from_qtensor(output)?,
span,
span_output,
})
}
pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let (b_sz, seq_len) = x.dims2()?;
let _enter = self.span.enter();
let mut layer_in = self.tok_embeddings.forward(x)?;
layer_in = (layer_in * (self.embedding_length as f64).sqrt())?;
for layer in self.layers.iter_mut() {
let attention_mask = if seq_len == 1 {
None
} else {
Some(layer.mask(b_sz, seq_len, index_pos, x.dtype(), x.device())?)
};
// Attention block
let residual = &layer_in;
let x = layer.attention_norm.forward(&layer_in)?;
let x = layer.forward_attn(&x, attention_mask.as_ref(), index_pos)?;
let x = layer.post_attention_norm.forward(&x)?;
let x = (x + residual)?;
// Feed-forward block
let _enter = layer.span_mlp.enter();
let residual = &x;
let x = layer.ffn_norm.forward(&x)?;
let x = layer.mlp.forward(&x)?;
let x = layer.post_ffn_norm.forward(&x)?;
let x = (x + residual)?;
drop(_enter);
layer_in = x;
}
let _enter = self.span_output.enter();
let x = layer_in.i((.., seq_len - 1, ..))?;
let x = self.norm.forward(&x)?;
let output = self.output.forward(&x)?;
Ok(output)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/convmixer.rs | candle-transformers/src/models/convmixer.rs | //! ConvMixer implementation.
//!
//! See "Patches Are All You Need?" by Trockman et al. 2022
//!
//! - 📝 [Arxiv](https://arxiv.org/abs/2201.09792)
//! - 💻 [GitHub](https://github.com/locuslab/convmixer)
//!
use candle::Result;
use candle_nn::{batch_norm, Conv2dConfig, Module, VarBuilder};
#[allow(clippy::many_single_char_names)]
fn conv2d_same(
i: usize,
o: usize,
k: usize,
c: Conv2dConfig,
vb: VarBuilder,
) -> Result<impl Module> {
let conv2d = candle_nn::conv2d(i, o, k, c, vb)?;
let s = c.stride;
let module = candle_nn::func(move |xs| {
let ih = xs.dim(2)?;
let iw = xs.dim(3)?;
let oh = ih.div_ceil(s);
let ow = iw.div_ceil(s);
let pad_h = usize::max((oh - 1) * s + k - ih, 0);
let pad_w = usize::max((ow - 1) * s + k - iw, 0);
if pad_h > 0 || pad_w > 0 {
xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?
.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?
.apply(&conv2d)
} else {
xs.apply(&conv2d)
}
});
Ok(module)
}
fn block(dim: usize, kernel_size: usize, vb: VarBuilder) -> Result<impl Module> {
let conv2d_cfg = Conv2dConfig {
groups: dim,
..Default::default()
};
let vb_fn = vb.pp(0).pp("fn");
let conv1 = conv2d_same(dim, dim, kernel_size, conv2d_cfg, vb_fn.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb_fn.pp(2))?;
let conv2 = candle_nn::conv2d(dim, dim, 1, Default::default(), vb.pp(1))?;
let bn2 = batch_norm(dim, 1e-5, vb.pp(3))?;
Ok(candle_nn::func(move |xs| {
let ys = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
(xs + ys)?.apply(&conv2)?.gelu_erf()?.apply_t(&bn2, false)
}))
}
fn convmixer(
nclasses: usize,
dim: usize,
depth: usize,
kernel_size: usize,
patch_size: usize,
vb: VarBuilder,
) -> Result<candle_nn::Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: patch_size,
..Default::default()
};
let conv1 = candle_nn::conv2d(3, dim, patch_size, conv2d_cfg, vb.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb.pp(2))?;
let blocks: Vec<_> = (0..depth)
.map(|index| block(dim, kernel_size, vb.pp(3 + index)))
.collect::<Result<Vec<_>>>()?;
let fc = candle_nn::linear(dim, nclasses, vb.pp(25))?;
Ok(candle_nn::func(move |xs| {
let mut xs = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
for block in blocks.iter() {
xs = xs.apply(block)?
}
// This performs the adaptive average pooling with a target size of (1, 1).
xs.mean(3)?.mean(2)?.apply(&fc)
}))
}
pub fn c1536_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1536, 20, 9, 7, vb)
}
pub fn c1024_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1024, 20, 9, 14, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/segformer.rs | candle-transformers/src/models/segformer.rs | //! Segformer model implementation for semantic segmentation and image classification.
//!
//! Segformer is a transformer-based model designed for vision tasks. It uses a hierarchical
//! structure that progressively generates features at different scales.
//!
//! Key characteristics:
//! - Efficient self-attention with sequence reduction
//! - Hierarchical feature generation
//! - Mix-FFN for local and global feature interaction
//! - Lightweight all-MLP decode head
//!
//! References:
//! - [SegFormer Paper](https://arxiv.org/abs/2105.15203)
//! - [Model Card](https://huggingface.co/nvidia/mit-b0)
//!
use crate::models::with_tracing::{conv2d, linear, Conv2d, Linear};
use candle::{Context, Module, ModuleT, Result, Tensor, D};
use candle_nn::{conv2d_no_bias, layer_norm, Activation, Conv2dConfig, VarBuilder};
use serde::Deserialize;
use std::collections::HashMap;
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/segformer/configuration_segformer.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
#[serde(default)]
pub id2label: HashMap<String, String>,
pub num_channels: usize,
pub num_encoder_blocks: usize,
pub depths: Vec<usize>,
pub sr_ratios: Vec<usize>,
pub hidden_sizes: Vec<usize>,
pub patch_sizes: Vec<usize>,
pub strides: Vec<usize>,
pub num_attention_heads: Vec<usize>,
pub mlp_ratios: Vec<usize>,
pub hidden_act: candle_nn::Activation,
pub layer_norm_eps: f64,
pub decoder_hidden_size: usize,
}
#[derive(Debug, Clone)]
struct SegformerOverlapPatchEmbeddings {
projection: Conv2d,
layer_norm: candle_nn::LayerNorm,
}
impl SegformerOverlapPatchEmbeddings {
fn new(
config: &Config,
patch_size: usize,
stride: usize,
num_channels: usize,
hidden_size: usize,
vb: VarBuilder,
) -> Result<Self> {
let projection = conv2d(
num_channels,
hidden_size,
patch_size,
Conv2dConfig {
stride,
padding: patch_size / 2,
..Default::default()
},
vb.pp("proj"),
)?;
let layer_norm =
candle_nn::layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm"))?;
Ok(Self {
projection,
layer_norm,
})
}
}
impl Module for SegformerOverlapPatchEmbeddings {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let embeddings = self.projection.forward(x)?;
let shape = embeddings.shape();
// [B, C, H, W] -> [B, H * W, C]
let embeddings = embeddings.flatten_from(2)?.transpose(1, 2)?;
let embeddings = self.layer_norm.forward(&embeddings)?;
// [B, H * W, C] -> [B, C, H, W]
let embeddings = embeddings.transpose(1, 2)?.reshape(shape)?;
Ok(embeddings)
}
}
#[derive(Debug, Clone)]
struct SegformerEfficientSelfAttention {
num_attention_heads: usize,
attention_head_size: usize,
query: Linear,
key: Linear,
value: Linear,
sr: Option<Conv2d>,
layer_norm: Option<layer_norm::LayerNorm>,
}
impl SegformerEfficientSelfAttention {
fn new(
config: &Config,
hidden_size: usize,
num_attention_heads: usize,
sequence_reduction_ratio: usize,
vb: VarBuilder,
) -> Result<Self> {
if !hidden_size.is_multiple_of(num_attention_heads) {
candle::bail!(
"The hidden size {} is not a multiple of the number of attention heads {}",
hidden_size,
num_attention_heads
)
}
let attention_head_size = hidden_size / num_attention_heads;
let all_head_size = num_attention_heads * attention_head_size;
let query = linear(hidden_size, all_head_size, vb.pp("query"))?;
let key = linear(hidden_size, all_head_size, vb.pp("key"))?;
let value = linear(hidden_size, all_head_size, vb.pp("value"))?;
let (sr, layer_norm) = if sequence_reduction_ratio > 1 {
(
Some(conv2d(
hidden_size,
hidden_size,
sequence_reduction_ratio,
Conv2dConfig {
stride: sequence_reduction_ratio,
..Default::default()
},
vb.pp("sr"),
)?),
Some(candle_nn::layer_norm(
hidden_size,
config.layer_norm_eps,
vb.pp("layer_norm"),
)?),
)
} else {
(None, None)
};
Ok(Self {
num_attention_heads,
attention_head_size,
query,
key,
value,
sr,
layer_norm,
})
}
fn transpose_for_scores(&self, hidden_states: Tensor) -> Result<Tensor> {
let (batch, seq_length, _) = hidden_states.shape().dims3()?;
let new_shape = &[
batch,
seq_length,
self.num_attention_heads,
self.attention_head_size,
];
let hidden_states = hidden_states.reshape(new_shape)?;
let hidden_states = hidden_states.permute((0, 2, 1, 3))?;
Ok(hidden_states)
}
}
impl Module for SegformerEfficientSelfAttention {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
// [B, C, H, W] -> [B, H * W, C]
let hidden_states = x.flatten_from(2)?.permute((0, 2, 1))?;
let query = self
.transpose_for_scores(self.query.forward(&hidden_states)?)?
.contiguous()?;
let hidden_states = if let (Some(sr), Some(layer_norm)) = (&self.sr, &self.layer_norm) {
let hidden_states = sr.forward(x)?;
// [B, C, H, W] -> [B, H * W, C]
let hidden_states = hidden_states.flatten_from(2)?.permute((0, 2, 1))?;
layer_norm.forward(&hidden_states)?
} else {
// already [B, H * W, C]
hidden_states
};
// standard self-attention
let key = self
.transpose_for_scores(self.key.forward(&hidden_states)?)?
.contiguous()?;
let value = self
.transpose_for_scores(self.value.forward(&hidden_states)?)?
.contiguous()?;
let attention_scores =
(query.matmul(&key.t()?)? / f64::sqrt(self.attention_head_size as f64))?;
let attention_scores = candle_nn::ops::softmax_last_dim(&attention_scores)?;
let result = attention_scores.matmul(&value)?;
let result = result.permute((0, 2, 1, 3))?.contiguous()?;
result.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct SegformerSelfOutput {
dense: Linear,
}
impl SegformerSelfOutput {
fn new(hidden_size: usize, vb: VarBuilder) -> Result<Self> {
let dense = linear(hidden_size, hidden_size, vb.pp("dense"))?;
Ok(Self { dense })
}
}
impl Module for SegformerSelfOutput {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.dense.forward(x)
}
}
#[derive(Debug, Clone)]
struct SegformerAttention {
attention: SegformerEfficientSelfAttention,
output: SegformerSelfOutput,
}
impl SegformerAttention {
fn new(
config: &Config,
hidden_size: usize,
num_attention_heads: usize,
sequence_reduction_ratio: usize,
vb: VarBuilder,
) -> Result<Self> {
let attention = SegformerEfficientSelfAttention::new(
config,
hidden_size,
num_attention_heads,
sequence_reduction_ratio,
vb.pp("self"),
)?;
let output = SegformerSelfOutput::new(hidden_size, vb.pp("output"))?;
Ok(Self { attention, output })
}
}
impl Module for SegformerAttention {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let attention_output = self.attention.forward(x)?;
self.output.forward(&attention_output)
}
}
#[derive(Debug, Clone)]
struct SegformerDWConv {
dw_conv: Conv2d,
}
impl SegformerDWConv {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let dw_conv = conv2d(
dim,
dim,
3,
Conv2dConfig {
stride: 1,
padding: 1,
groups: dim,
..Default::default()
},
vb.pp("dwconv"),
)?;
Ok(Self { dw_conv })
}
}
impl Module for SegformerDWConv {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.dw_conv.forward(x)
}
}
#[derive(Debug, Clone)]
struct SegformerMixFFN {
dense1: Linear,
dw_conv: SegformerDWConv,
act: Activation,
dense2: Linear,
}
impl SegformerMixFFN {
fn new(
config: &Config,
in_features: usize,
hidden_features: usize,
out_features: usize,
vb: VarBuilder,
) -> Result<Self> {
let dense1 = linear(in_features, hidden_features, vb.pp("dense1"))?;
let dw_conv = SegformerDWConv::new(hidden_features, vb.pp("dwconv"))?;
let act = config.hidden_act;
let dense2 = linear(hidden_features, out_features, vb.pp("dense2"))?;
Ok(Self {
dense1,
dw_conv,
act,
dense2,
})
}
}
impl Module for SegformerMixFFN {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let (batch, _, height, width) = x.shape().dims4()?;
let hidden_states = self
.dense1
.forward(&x.flatten_from(2)?.permute((0, 2, 1))?)?;
let channels = hidden_states.dim(2)?;
let hidden_states = self.dw_conv.forward(
&hidden_states
.permute((0, 2, 1))?
.reshape((batch, channels, height, width))?,
)?;
let hidden_states = self.act.forward(&hidden_states)?;
let hidden_states = self
.dense2
.forward(&hidden_states.flatten_from(2)?.permute((0, 2, 1))?)?;
let channels = hidden_states.dim(2)?;
hidden_states
.permute((0, 2, 1))?
.reshape((batch, channels, height, width))
}
}
#[derive(Debug, Clone)]
struct SegformerLayer {
layer_norm_1: candle_nn::LayerNorm,
attention: SegformerAttention,
layer_norm_2: candle_nn::LayerNorm,
mlp: SegformerMixFFN,
}
impl SegformerLayer {
fn new(
config: &Config,
hidden_size: usize,
num_attention_heads: usize,
sequence_reduction_ratio: usize,
mlp_ratio: usize,
vb: VarBuilder,
) -> Result<Self> {
let layer_norm_1 = layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm_1"))?;
let attention = SegformerAttention::new(
config,
hidden_size,
num_attention_heads,
sequence_reduction_ratio,
vb.pp("attention"),
)?;
let layer_norm_2 = layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm_2"))?;
let mlp = SegformerMixFFN::new(
config,
hidden_size,
hidden_size * mlp_ratio,
hidden_size,
vb.pp("mlp"),
)?;
Ok(Self {
layer_norm_1,
attention,
layer_norm_2,
mlp,
})
}
}
impl Module for SegformerLayer {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let shape = x.shape().dims4()?;
// [B, C, H, W] -> [B, H * W, C]
let hidden_states = x.flatten_from(2)?.permute((0, 2, 1))?;
let layer_norm_output = self.layer_norm_1.forward(&hidden_states)?;
let layer_norm_output = layer_norm_output.permute((0, 2, 1))?.reshape(shape)?;
// attention takes in [B, C, H, W] in order to properly do conv2d (and output [B, H * W, C])
let attention_output = self.attention.forward(&layer_norm_output)?;
let hidden_states = (attention_output + hidden_states)?;
let layer_norm_output = self.layer_norm_2.forward(&hidden_states)?;
let mlp_output = self
.mlp
.forward(&layer_norm_output.permute((0, 2, 1))?.reshape(shape)?)?;
hidden_states.permute((0, 2, 1))?.reshape(shape)? + mlp_output
}
}
#[derive(Debug, Clone)]
struct SegformerEncoder {
/// config file
config: Config,
/// a list of embeddings
patch_embeddings: Vec<SegformerOverlapPatchEmbeddings>,
/// a list of attention blocks, each consisting of layers
blocks: Vec<Vec<SegformerLayer>>,
/// a final list of layer norms
layer_norms: Vec<candle_nn::LayerNorm>,
}
impl SegformerEncoder {
fn new(config: Config, vb: VarBuilder) -> Result<Self> {
let mut patch_embeddings = Vec::with_capacity(config.num_encoder_blocks);
let mut blocks = Vec::with_capacity(config.num_encoder_blocks);
let mut layer_norms = Vec::with_capacity(config.num_encoder_blocks);
for i in 0..config.num_encoder_blocks {
let patch_size = config.patch_sizes[i];
let stride = config.strides[i];
let hidden_size = config.hidden_sizes[i];
let num_channels = if i == 0 {
config.num_channels
} else {
config.hidden_sizes[i - 1]
};
patch_embeddings.push(SegformerOverlapPatchEmbeddings::new(
&config,
patch_size,
stride,
num_channels,
hidden_size,
vb.pp(format!("patch_embeddings.{i}")),
)?);
let mut layers = Vec::with_capacity(config.depths[i]);
for j in 0..config.depths[i] {
let sequence_reduction_ratio = config.sr_ratios[i];
let num_attention_heads = config.num_attention_heads[i];
let mlp_ratio = config.mlp_ratios[i];
layers.push(SegformerLayer::new(
&config,
hidden_size,
num_attention_heads,
sequence_reduction_ratio,
mlp_ratio,
vb.pp(format!("block.{i}.{j}")),
)?);
}
blocks.push(layers);
layer_norms.push(layer_norm(
hidden_size,
config.layer_norm_eps,
vb.pp(format!("layer_norm.{i}")),
)?);
}
Ok(Self {
config,
patch_embeddings,
blocks,
layer_norms,
})
}
}
impl ModuleWithHiddenStates for SegformerEncoder {
fn forward(&self, x: &Tensor) -> Result<Vec<Tensor>> {
let mut all_hidden_states = Vec::with_capacity(self.config.num_encoder_blocks);
let mut hidden_states = x.clone();
for i in 0..self.config.num_encoder_blocks {
hidden_states = self.patch_embeddings[i].forward(&hidden_states)?;
for layer in &self.blocks[i] {
hidden_states = layer.forward(&hidden_states)?;
}
let shape = hidden_states.shape().dims4()?;
hidden_states =
self.layer_norms[i].forward(&hidden_states.flatten_from(2)?.permute((0, 2, 1))?)?;
hidden_states = hidden_states.permute((0, 2, 1))?.reshape(shape)?;
all_hidden_states.push(hidden_states.clone());
}
Ok(all_hidden_states)
}
}
#[derive(Debug, Clone)]
struct SegformerModel {
encoder: SegformerEncoder,
}
impl SegformerModel {
fn new(config: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = SegformerEncoder::new(config.clone(), vb.pp("encoder"))?;
Ok(Self { encoder })
}
}
impl ModuleWithHiddenStates for SegformerModel {
fn forward(&self, x: &Tensor) -> Result<Vec<Tensor>> {
self.encoder.forward(x)
}
}
#[derive(Debug, Clone)]
struct SegformerMLP {
proj: Linear,
}
impl SegformerMLP {
fn new(config: &Config, input_dim: usize, vb: VarBuilder) -> Result<Self> {
let proj = linear(input_dim, config.decoder_hidden_size, vb.pp("proj"))?;
Ok(Self { proj })
}
}
impl Module for SegformerMLP {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.proj.forward(x)
}
}
#[derive(Debug, Clone)]
struct SegformerDecodeHead {
linear_c: Vec<SegformerMLP>,
linear_fuse: candle_nn::Conv2d,
batch_norm: candle_nn::BatchNorm,
classifier: candle_nn::Conv2d,
}
impl SegformerDecodeHead {
fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> {
let mut linear_c = Vec::with_capacity(config.num_encoder_blocks);
for i in 0..config.num_encoder_blocks {
let hidden_size = config.hidden_sizes[i];
linear_c.push(SegformerMLP::new(
config,
hidden_size,
vb.pp(format!("linear_c.{i}")),
)?);
}
let linear_fuse = conv2d_no_bias(
config.decoder_hidden_size * config.num_encoder_blocks,
config.decoder_hidden_size,
1,
Conv2dConfig::default(),
vb.pp("linear_fuse"),
)?;
let batch_norm = candle_nn::batch_norm(
config.decoder_hidden_size,
config.layer_norm_eps,
vb.pp("batch_norm"),
)?;
let classifier = conv2d_no_bias(
config.decoder_hidden_size,
num_labels,
1,
Conv2dConfig::default(),
vb.pp("classifier"),
)?;
Ok(Self {
linear_c,
linear_fuse,
batch_norm,
classifier,
})
}
fn forward(&self, encoder_hidden_states: &[Tensor]) -> Result<Tensor> {
if encoder_hidden_states.len() != self.linear_c.len() {
candle::bail!(
"The number of encoder hidden states {} is not equal to the number of linear layers {}",
encoder_hidden_states.len(),
self.linear_c.len()
)
}
// most fine layer
let (_, _, upsample_height, upsample_width) = encoder_hidden_states[0].shape().dims4()?;
let mut hidden_states = Vec::with_capacity(self.linear_c.len());
for (hidden_state, mlp) in encoder_hidden_states.iter().zip(&self.linear_c) {
let (batch, _, height, width) = hidden_state.shape().dims4()?;
let hidden_state = mlp.forward(&hidden_state.flatten_from(2)?.permute((0, 2, 1))?)?;
let hidden_state = hidden_state.permute((0, 2, 1))?.reshape((
batch,
hidden_state.dim(2)?,
height,
width,
))?;
let hidden_state = hidden_state.upsample_nearest2d(upsample_height, upsample_width)?;
hidden_states.push(hidden_state);
}
hidden_states.reverse();
let hidden_states = Tensor::cat(&hidden_states, 1)?;
let hidden_states = self.linear_fuse.forward(&hidden_states)?;
let hidden_states = self.batch_norm.forward_t(&hidden_states, false)?;
let hidden_states = hidden_states.relu()?;
self.classifier.forward(&hidden_states)
}
}
trait ModuleWithHiddenStates {
fn forward(&self, xs: &Tensor) -> Result<Vec<Tensor>>;
}
#[derive(Debug, Clone)]
pub struct SemanticSegmentationModel {
segformer: SegformerModel,
decode_head: SegformerDecodeHead,
}
impl SemanticSegmentationModel {
pub fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> {
let segformer = SegformerModel::new(config, vb.pp("segformer"))?;
let decode_head = SegformerDecodeHead::new(config, num_labels, vb.pp("decode_head"))?;
Ok(Self {
segformer,
decode_head,
})
}
}
impl Module for SemanticSegmentationModel {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let hidden_states = self.segformer.forward(x)?;
self.decode_head.forward(&hidden_states)
}
}
#[derive(Debug, Clone)]
pub struct ImageClassificationModel {
segformer: SegformerModel,
classifier: Linear,
}
impl ImageClassificationModel {
pub fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> {
let segformer = SegformerModel::new(config, vb.pp("segformer"))?;
let classifier = linear(config.decoder_hidden_size, num_labels, vb.pp("classifier"))?;
Ok(Self {
segformer,
classifier,
})
}
}
impl Module for ImageClassificationModel {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let all_hidden_states = self.segformer.forward(x)?;
let hidden_states = all_hidden_states.last().context("no last")?;
let hidden_states = hidden_states.flatten_from(2)?.permute((0, 2, 1))?;
let mean = hidden_states.mean(1)?;
self.classifier.forward(&mean)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_config_json_load() {
let raw_json = r#"{
"architectures": [
"SegformerForImageClassification"
],
"attention_probs_dropout_prob": 0.0,
"classifier_dropout_prob": 0.1,
"decoder_hidden_size": 256,
"depths": [
2,
2,
2,
2
],
"downsampling_rates": [
1,
4,
8,
16
],
"drop_path_rate": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_sizes": [
32,
64,
160,
256
],
"image_size": 224,
"initializer_range": 0.02,
"layer_norm_eps": 1e-06,
"mlp_ratios": [
4,
4,
4,
4
],
"model_type": "segformer",
"num_attention_heads": [
1,
2,
5,
8
],
"num_channels": 3,
"num_encoder_blocks": 4,
"patch_sizes": [
7,
3,
3,
3
],
"sr_ratios": [
8,
4,
2,
1
],
"strides": [
4,
2,
2,
2
],
"torch_dtype": "float32",
"transformers_version": "4.12.0.dev0"
}"#;
let config: Config = serde_json::from_str(raw_json).unwrap();
assert_eq!(vec![4, 2, 2, 2], config.strides);
assert_eq!(1e-6, config.layer_norm_eps);
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/stella_en_v5.rs | candle-transformers/src/models/stella_en_v5.rs | //! Stella v5 model implementation.
//!
//! Stella is a dense text embedding model optimized for retrieval and similarity tasks.
//! This implementation provides support for multiple embedding dimensions.
//!
//! Key characteristics:
//! - Dense text embeddings optimized for similarity search
//! - Multiple output dimension support (256 to 8192)
//! - Grouped query attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//!
//! References:
//! - [MRL Framework](https://arxiv.org/abs/2205.13147)
//! - [Model Card](https://huggingface.co/dunzhang/stella_en_1.5B_v5)
//!
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, Error, IndexOp, Module, Result, Tensor, D};
use candle_nn::{layer_norm, Activation, LayerNorm, VarBuilder};
use std::sync::Arc;
// internal representation for identifying which model is being used
#[derive(Debug, Default, Copy, Clone, PartialEq, serde::Deserialize)]
pub enum ModelVariant {
#[default]
Large, // 1.5B
Small, // 400M
}
// Same as `qwen2` family of models with the exception being the `embed_head`
// The final `output` causal modelling head is swapped with a learned `dense` layer, `embed_head`
#[derive(Debug, Default, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub variant: ModelVariant,
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub max_position_embeddings: usize,
pub rope_theta: f64,
pub embed_head: EmbedHead,
pub norm_eps: f64, // RMSNorm for 1.5B || LayerNorm for 400M
pub activation_fn: Activation, // Silu for 1.5B || Gelu for 400M
// Unique to 1.5B
pub num_key_value_heads: usize,
// Unique to 400M
pub type_vocab_size: usize,
pub scaling_factor: f64,
}
// Excerpt from `stella` model card:
// `Stella_en_1.5B_v5` models have been trained on [MRL](https://arxiv.org/abs/2205.13147) enabling multiple output dimensions
// Embed head represents the config for various embedding dims supported
#[derive(Debug, Default, Clone, PartialEq, serde::Deserialize)]
pub struct EmbedHead {
pub in_features: usize,
pub out_features: usize,
}
/// An enum variant representing the Embedding head dimensions `stella` is trained on
/// As the [model-card](https://huggingface.co/dunzhang/stella_en_1.5B_v5#introduction) suggests, D1024 is good enough for most cases
#[derive(Debug, Default, Clone, Copy)]
pub enum EmbedDim {
Dim256,
Dim768,
#[default]
Dim1024,
Dim2048,
Dim4096,
Dim6144,
Dim8192,
}
impl EmbedDim {
pub fn config(&self, in_features: usize) -> EmbedHead {
EmbedHead {
in_features,
out_features: match &self {
Self::Dim256 => 256,
Self::Dim768 => 768,
Self::Dim1024 => 1024,
Self::Dim2048 => 2048,
Self::Dim4096 => 4096,
Self::Dim6144 => 6144,
Self::Dim8192 => 8192,
},
}
}
}
// Initialize a new `stella_en` model - with 400M variant or 1.5B variant
impl Config {
/// Initialize a new `stella_en_1.5B_v5`` model with given embedding dim
pub fn new_1_5_b_v5(embed_dim: EmbedDim) -> Self {
// Representing config.json at https://huggingface.co/dunzhang/stella_en_1.5B_v5/blob/main/config.json
// Removed `sliding_window` related config which is basically being carried forward from `qwen2` but not used here
Self {
variant: ModelVariant::Large,
activation_fn: candle_nn::Activation::Silu,
vocab_size: 151646,
hidden_size: 1536,
intermediate_size: 8960,
num_hidden_layers: 28,
num_attention_heads: 12,
num_key_value_heads: 2,
max_position_embeddings: 131072,
rope_theta: 1000000.,
norm_eps: 1e-06,
embed_head: embed_dim.config(1536),
..Default::default()
}
}
/// Initialize new `stella_en_400M_v5`
pub fn new_400_m_v5(embed_dim: EmbedDim) -> Self {
Self {
variant: ModelVariant::Small,
vocab_size: 30528,
hidden_size: 1024,
intermediate_size: 4096,
num_hidden_layers: 24,
num_attention_heads: 16,
max_position_embeddings: 8192,
type_vocab_size: 2,
norm_eps: 1e-12,
scaling_factor: 2.0,
rope_theta: 160000.0,
activation_fn: Activation::Gelu,
embed_head: embed_dim.config(1024),
..Default::default()
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
// Factoring in `scaling factor` for `400M` variant
let max_seq_len = if cfg.scaling_factor == 0. {
cfg.max_position_embeddings
} else {
((cfg.max_position_embeddings as f64) * cfg.scaling_factor) as usize
};
// let rot_dim = if cfg.variant == ModelVariant::Small { dim / 2 } else { dim };
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| {
// Scaled rope_theta for 400M variant
let rope_theta = if cfg.scaling_factor == 0. {
cfg.rope_theta
} else {
cfg.rope_theta * cfg.scaling_factor
};
let mut freq = 1. / rope_theta.powf(i as f64 / dim as f64);
if cfg.scaling_factor != 0. {
freq /= cfg.scaling_factor.powf(2.0 / (dim as f64))
}
freq as f32
})
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
// Calculate position embeddings with scaled sequence length
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
// if cfg.variant == ModelVariant::Small {
// freqs = Tensor::cat(&[&freqs, &freqs], 1)?
// }
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
// TODO: re-visit this
fn apply_rotary_emb_qkv(&self, q: &Tensor, k: &Tensor) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, 0, seq_len)?;
let sin = self.sin.narrow(0, 0, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
variant: ModelVariant,
gate_proj: Linear,
up_proj: Option<Linear>, // `up_proj` only for 1.5B variant
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let (gate_proj, up_proj, down_proj) = match cfg.variant {
ModelVariant::Large => (
linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?,
Some(linear_no_bias(
hidden_sz,
intermediate_sz,
vb.pp("up_proj"),
)?),
linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?,
),
ModelVariant::Small => (
linear_no_bias(hidden_sz, intermediate_sz * 2, vb.pp("up_gate_proj"))?,
None,
linear(intermediate_sz, hidden_sz, vb.pp("down_proj"))?,
),
};
Ok(Self {
variant: cfg.variant,
gate_proj,
up_proj,
down_proj,
act_fn: cfg.activation_fn,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let up = self.gate_proj.forward(xs)?;
let (lhs, rhs) = match self.variant {
ModelVariant::Large => {
let lhs = up.apply(&self.act_fn)?;
let rhs = xs.apply(self.up_proj.as_ref().unwrap())?;
(lhs, rhs)
}
ModelVariant::Small => {
// Get the dimensions
let (_batch_size, _seq_len, hidden_dim) = up.dims3()?;
let split_size = hidden_dim / 2;
// Split along the last dimension (hidden_dim)
let up_states = up.narrow(2, 0, split_size)?;
let gate = up.narrow(2, split_size, split_size)?.apply(&self.act_fn)?;
(up_states, gate)
}
};
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
qkv_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
variant: ModelVariant,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = if num_kv_heads > 0 {
num_heads / num_kv_heads
} else {
0
};
let head_dim = hidden_sz / num_heads;
let (qkv_proj, o_proj) = match cfg.variant {
ModelVariant::Large => {
// The 1.5B variant comes with separate `q, k, v` layers, let's merge it and standardize
// Weights
let q_w = vb
.pp("q_proj")
.get((num_heads * head_dim, hidden_sz), "weight")?;
let k_w = vb
.pp("k_proj")
.get((num_kv_heads * head_dim, hidden_sz), "weight")?;
let v_w = vb
.pp("v_proj")
.get((num_kv_heads * head_dim, hidden_sz), "weight")?;
// Biases
let q_b = vb.pp("q_proj").get(num_heads * head_dim, "bias")?;
let k_b = vb.pp("k_proj").get(num_kv_heads * head_dim, "bias")?;
let v_b = vb.pp("v_proj").get(num_kv_heads * head_dim, "bias")?;
let qkv_w = Tensor::cat(&[&q_w, &k_w, &v_w], 0)?;
let qkv_b = Tensor::cat(&[&q_b, &k_b, &v_b], 0)?;
(
Linear::from_weights(qkv_w, Some(qkv_b)),
linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?,
)
}
ModelVariant::Small => (
linear(hidden_sz, 3 * num_heads * head_dim, vb.pp("qkv_proj"))?,
linear(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?,
),
};
Ok(Self {
qkv_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
variant: cfg.variant,
})
}
fn forward(&mut self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let qkv = self.qkv_proj.forward(xs)?;
let n_kv_heads = match self.variant {
ModelVariant::Large => self.num_kv_heads,
ModelVariant::Small => self.num_heads,
};
let (query_states, key_states, value_states) = match self.variant {
ModelVariant::Large => {
let q_sz = self.num_heads * self.head_dim;
let kv_sz = n_kv_heads * self.head_dim;
let q = qkv.narrow(D::Minus1, 0, q_sz)?.reshape((
b_sz,
q_len,
self.num_heads,
self.head_dim,
))?;
let k = qkv.narrow(D::Minus1, q_sz, kv_sz)?.reshape((
b_sz,
q_len,
n_kv_heads,
self.head_dim,
))?;
let v = qkv.narrow(D::Minus1, q_sz + kv_sz, kv_sz)?.reshape((
b_sz,
q_len,
n_kv_heads,
self.head_dim,
))?;
(q, k, v)
}
ModelVariant::Small => {
// Split into Q, K, V and reshape to match PyTorch shapes
let qkv = qkv.reshape((b_sz, q_len, 3, self.num_heads, self.head_dim))?;
(
qkv.i((.., .., 0, .., ..))?,
qkv.i((.., .., 1, .., ..))?,
qkv.i((.., .., 2, .., ..))?,
)
}
};
let query_states = query_states.transpose(1, 2)?.contiguous()?;
let key_states = key_states.transpose(1, 2)?.contiguous()?;
let value_states = value_states.transpose(1, 2)?.contiguous()?;
let (query_states, key_states) = self
.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states)?;
// The 1.5B is expected to have grouped query attention
let (key_states, value_states) = if self.variant == ModelVariant::Large {
(
crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?,
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?,
)
} else {
(key_states, value_states)
};
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = query_states.matmul(&key_states.transpose(2, 3)?)?;
let attn_weights = (attn_weights * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
enum NormType {
Layer(LayerNorm),
Rms(RmsNorm),
}
#[derive(Debug, Clone)]
struct Layer {
variant: ModelVariant,
attention: Attention,
mlp: MLP,
// For 1.5B: this is `input_layernorm`
// For 400M: this is `output_layernorm`
layernorm: NormType,
post_attention_layernorm: NormType,
}
impl Layer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(
rotary_emb,
cfg,
vb.pp(if cfg.variant == ModelVariant::Large {
"self_attn"
} else {
"attention"
}),
)?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let (layernorm, post_attention_layernorm) = match cfg.variant {
ModelVariant::Large => (
NormType::Rms(RmsNorm::new(
cfg.hidden_size,
cfg.norm_eps,
vb.pp("input_layernorm"),
)?),
NormType::Rms(RmsNorm::new(
cfg.hidden_size,
cfg.norm_eps,
vb.pp("post_attention_layernorm"),
)?),
),
ModelVariant::Small => (
NormType::Layer(layer_norm(
cfg.hidden_size,
candle_nn::LayerNormConfig {
eps: cfg.norm_eps,
..Default::default()
},
vb.pp("mlp_ln"),
)?),
NormType::Layer(layer_norm(
cfg.hidden_size,
candle_nn::LayerNormConfig {
eps: cfg.norm_eps,
..Default::default()
},
vb.pp("attn_ln"),
)?),
),
};
Ok(Self {
variant: cfg.variant,
attention,
mlp,
layernorm,
post_attention_layernorm,
})
}
fn forward(&mut self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
// Here, the application of normalizations and activation calculations differ
// For Large [1.5B]:
// residual = x
// state = other_layernorm(xs)
// state = attention(state)
// state += residual
// residual = state
// state = mlp(attention_layernorm(state))
// -> residual + state
// For Small [400M]:
// residual = x;
// state = attention(x)
// state += residual
// state = attention_layernorm(state)
// residual = state
// state = mlp(state)
// state += residual
// -> other_layernorm(state)
let residual = xs;
match self.variant {
ModelVariant::Large => {
let (attn_ln, input_ln) = if let (NormType::Rms(attn_ln), NormType::Rms(input_ln)) =
(&self.post_attention_layernorm, &self.layernorm)
{
(attn_ln, input_ln)
} else {
return Err(candle::error::Error::Msg(
"Stella 1.5B expects RMSNorm".to_string(),
));
};
let xs = input_ln.forward(xs)?;
let xs = (self.attention.forward(&xs, attention_mask)? + residual)?;
let residual = &xs;
let xs = xs.apply(attn_ln)?.apply(&self.mlp)?;
residual + xs
}
ModelVariant::Small => {
let (attn_ln, output_ln) =
if let (NormType::Layer(attn_ln), NormType::Layer(input_ln)) =
(&self.post_attention_layernorm, &self.layernorm)
{
(attn_ln, input_ln)
} else {
return Err(candle::error::Error::Msg(
"Stella 400M expects RMSNorm".to_string(),
));
};
let xs = (self.attention.forward(xs, attention_mask)? + residual)?;
let xs = attn_ln.forward(&xs)?;
let residual = &xs;
let xs = (self.mlp.forward(&xs)? + residual)?;
output_ln.forward(&xs)
}
}
}
}
#[derive(Debug, Clone)]
pub struct Embeddings {
variant: ModelVariant,
// For 1.5B: this is the `embed_tokens`
// For 400M: this is the `word_embeddings`
embeddings: candle_nn::Embedding,
// following are specifically for 400M
token_type_embeddings: Option<candle_nn::Embedding>,
layer_norm: Option<LayerNorm>,
position_ids: Option<Tensor>,
}
impl Embeddings {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let (embeddings, token_type_embeddings, layer_norm, position_ids) = match cfg.variant {
ModelVariant::Large => (
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?,
None,
None,
None,
),
ModelVariant::Small => {
let vb = vb.pp("embeddings");
let weight = vb.pp("LayerNorm").get_with_hints(
cfg.hidden_size,
"weight",
candle_nn::Init::Const(1.0),
)?;
let bias = vb.pp("LayerNorm").get_with_hints(
cfg.hidden_size,
"bias",
candle_nn::Init::Const(0.0),
)?;
let dev = bias.device().clone();
let layer_norm = candle_nn::LayerNorm::new(weight, bias, cfg.norm_eps);
(
candle_nn::embedding(
cfg.vocab_size,
cfg.hidden_size,
vb.pp("word_embeddings"),
)?,
Some(candle_nn::embedding(
cfg.type_vocab_size,
cfg.hidden_size,
vb.pp("token_type_embeddings"),
)?),
Some(layer_norm),
Some(Tensor::arange(
0u32,
cfg.max_position_embeddings as u32,
&dev,
)?),
)
}
};
Ok(Self {
variant: cfg.variant,
embeddings,
token_type_embeddings,
layer_norm,
position_ids,
})
}
}
impl Module for Embeddings {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let embd = self.embeddings.forward(xs)?;
// For 1.5B just forward the embeddings
if self.variant == ModelVariant::Large {
return Ok(embd);
}
let (token_type_embed, layer_norm, pos_ids) =
if let (Some(token_type_embd), Some(layer_norm), Some(position_ids)) = (
&self.token_type_embeddings,
&self.layer_norm,
&self.position_ids,
) {
(token_type_embd, layer_norm, position_ids)
} else {
return Err(Error::Msg(
"Stella 400M requires `token_type_embeddings`, `layer_norm` and `position_ids`"
.to_string(),
));
};
let (batch_size, seq_length) = xs.dims2()?;
let pos_ids = pos_ids
.as_ref()
.narrow(0, 0, seq_length)?
.expand((batch_size, seq_length))?;
layer_norm.forward(&embd.add(&token_type_embed.forward(&pos_ids.zeros_like()?)?)?)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embeddings,
layers: Vec<Layer>,
norm: Option<RmsNorm>,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = match cfg.variant {
ModelVariant::Large => vb.pp("model"),
ModelVariant::Small => vb.pp("new"),
};
// let embed_tokens =
// candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let embeddings = Embeddings::new(cfg, vb_m.clone())?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = match cfg.variant {
ModelVariant::Large => vb_m.pp("layers"),
ModelVariant::Small => vb_m.pp("encoder").pp("layer"),
};
for layer_idx in 0..cfg.num_hidden_layers {
let layer = Layer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = match cfg.variant {
ModelVariant::Large => Some(RmsNorm::new(
cfg.hidden_size,
cfg.norm_eps,
vb_m.pp("norm"),
)?),
ModelVariant::Small => None,
};
Ok(Self {
embeddings,
layers,
norm,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> {
let (b_sz, sql_len) = attn_mask.dims2()?;
let mut mask: Vec<Tensor> = vec![];
for b in 0..b_sz {
mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?);
}
let mask = Tensor::cat(&mask, 0)?;
let on_true = mask.zeros_like()?.to_dtype(self.dtype)?;
let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)?
.broadcast_as(mask.shape())?
.to_dtype(self.dtype)?;
mask.where_cond(&on_true, &on_false)
}
pub fn forward(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> {
let (_, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
// This is not a `causal language modelling` task, we'll need to prepare a `non-causal` attention
Some(self.prepare_attention_mask(mask)?)
};
let mut xs = self.embeddings.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref())?
}
if let Some(n) = &self.norm {
xs.apply(n)
} else {
Ok(xs)
}
}
}
#[derive(Debug)]
pub struct EmbeddingModel {
base_model: Model,
lm_head: Linear,
}
impl EmbeddingModel {
pub fn new(cfg: &Config, base_vb: VarBuilder, embed_vb: VarBuilder) -> Result<Self> {
let base_model = Model::new(cfg, base_vb.clone())?;
let lm_head = linear(
cfg.embed_head.in_features,
cfg.embed_head.out_features,
embed_vb.pp("linear"),
)?;
Ok(Self {
base_model,
lm_head,
})
}
pub fn forward(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> {
let x = self.base_model.forward(input_ids, mask)?;
let x = self.pool(&x, mask)?;
// No matter what keeping the final activations as F32 helps with the accuracy
self.lm_head.forward(&x.to_dtype(DType::F32)?) // [B_sz, dim_size]
}
/// Same as forward pass but normalizes the output
pub fn forward_norm(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> {
let x = self.forward(input_ids, mask)?;
// Normalize
x.broadcast_div(&x.sqr()?.sum_keepdim(1)?.sqrt()?)
}
fn pool(&self, x: &Tensor, mask: &Tensor) -> Result<Tensor> {
let mask = mask.to_dtype(x.dtype())?; // [B_Sz, Seq_len]
let (batch_size, seq_len, hidden_dim) = x.dims3()?;
// expanding the shape of the mask from [B_Sz, Seq_len] -> [B_Sz, Seq_len, Hidden_size]
let mask_expanded = mask
.unsqueeze(2)?
.broadcast_as((batch_size, seq_len, hidden_dim))?; // [B_Sz, Seq_len, Hidden_dim]
let x = (x * &mask_expanded)?;
// Sum
let sum_mask = mask
.sum(1)?
.unsqueeze(1)?
.expand((batch_size, hidden_dim))?;
x.sum(1)? / sum_mask
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/qwen3_moe.rs | candle-transformers/src/models/qwen3_moe.rs | use crate::{
fused_moe::{FusedMoe, MoeCfg},
models::{
qwen3::{Config as Qwen3Config, Qwen3Attention, Qwen3MLP, Qwen3RotaryEmbedding},
with_tracing::{linear_no_bias, Linear, RmsNorm},
},
};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub head_dim: usize,
pub attention_bias: bool,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: Option<usize>,
pub max_window_layers: usize,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub use_sliding_window: bool,
pub hidden_act: Activation,
// MoE specific configuration
pub decoder_sparse_step: usize,
pub moe_intermediate_size: usize,
pub num_experts_per_tok: usize,
pub num_experts: usize,
pub norm_topk_prob: bool,
}
impl From<&Config> for Qwen3Config {
fn from(val: &Config) -> Self {
Qwen3Config {
vocab_size: val.vocab_size,
hidden_size: val.hidden_size,
intermediate_size: val.intermediate_size,
num_hidden_layers: val.num_hidden_layers,
num_attention_heads: val.num_attention_heads,
head_dim: val.head_dim,
attention_bias: val.attention_bias,
num_key_value_heads: val.num_key_value_heads,
max_position_embeddings: val.max_position_embeddings,
sliding_window: val.sliding_window,
max_window_layers: val.max_window_layers,
tie_word_embeddings: val.tie_word_embeddings,
rope_theta: val.rope_theta,
rms_norm_eps: val.rms_norm_eps,
use_sliding_window: val.use_sliding_window,
hidden_act: val.hidden_act,
}
}
}
#[derive(Debug, Clone)]
struct Qwen3MLPExpert {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl Qwen3MLPExpert {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
Ok(Self {
gate_proj: linear_no_bias(
cfg.hidden_size,
cfg.moe_intermediate_size,
vb.pp("gate_proj"),
)?,
up_proj: linear_no_bias(cfg.hidden_size, cfg.moe_intermediate_size, vb.pp("up_proj"))?,
down_proj: linear_no_bias(
cfg.moe_intermediate_size,
cfg.hidden_size,
vb.pp("down_proj"),
)?,
act_fn: cfg.hidden_act,
})
}
}
impl Module for Qwen3MLPExpert {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let lhs = x.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = x.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
// Qwen3 Sparse MoE Block implementation
#[derive(Debug, Clone)]
struct Qwen3SparseMoeBlock {
gate: Linear,
experts: Vec<Qwen3MLPExpert>,
norm_topk_prob: bool,
num_experts_per_tok: usize,
}
impl Qwen3SparseMoeBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let gate = linear_no_bias(cfg.hidden_size, cfg.num_experts, vb.pp("gate"))?;
let mut experts = Vec::with_capacity(cfg.num_experts);
let vb_e = vb.pp("experts");
for idx in 0..cfg.num_experts {
let expert = Qwen3MLPExpert::new(cfg, vb_e.pp(idx))?;
experts.push(expert)
}
Ok(Self {
gate,
experts,
norm_topk_prob: cfg.norm_topk_prob,
num_experts_per_tok: cfg.num_experts_per_tok,
})
}
}
impl Module for Qwen3SparseMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = xs.apply(&self.gate)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// Extract topk experts per token
let experts_per_tok = routing_weights
.arg_sort_last_dim(false)?
.narrow(D::Minus1, 0, self.num_experts_per_tok)?
.contiguous()?;
let routing_weights = routing_weights.gather(&experts_per_tok, D::Minus1)?;
// Extract needed data
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
let experts_per_tok = experts_per_tok.to_vec2::<u32>()?;
let mut top_x = vec![vec![]; self.experts.len()];
let mut selected_experts = vec![vec![]; self.experts.len()];
for (row_idx, (rw, expert_idxs)) in routing_weights
.iter()
.zip(experts_per_tok.iter())
.enumerate()
{
let sum_rw = rw.iter().sum::<f32>();
for (&rw, &expert_idx) in rw.iter().zip(expert_idxs.iter()) {
top_x[expert_idx as usize].push(row_idx as u32);
let rw = if self.norm_topk_prob { rw / sum_rw } else { rw };
selected_experts[expert_idx as usize].push(rw)
}
}
// Process through experts
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in self.experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_experts =
Tensor::new(selected_experts[expert_idx].as_slice(), xs.device())?
.reshape(((), 1))?
.to_dtype(xs.dtype())?;
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states = current_hidden_states.broadcast_mul(&selected_experts)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
ys.reshape((b_size, seq_len, hidden_dim))
}
}
// MLP or MoE decision enum
#[derive(Debug, Clone)]
enum Qwen3FeedForward {
Mlp(Qwen3MLP),
NaiveMoE(Qwen3SparseMoeBlock),
FusedMoE(FusedMoe),
}
impl Qwen3FeedForward {
fn forward(&self, xs: &Tensor, is_prefill: bool) -> Result<Tensor> {
match self {
Self::Mlp(m) => m.forward(xs),
Self::NaiveMoE(m) => m.forward(xs),
Self::FusedMoE(m) => m.forward(xs, is_prefill),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Qwen3Attention,
feed_forward: Qwen3FeedForward,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl DecoderLayer {
fn new(
layer_idx: usize,
cfg: &Config,
rotary: Arc<Qwen3RotaryEmbedding>,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Qwen3Attention::new(&cfg.into(), rotary, vb.pp("self_attn"))?;
let moe_cfg = MoeCfg {
hidden_size: cfg.hidden_size,
num_experts: cfg.num_experts,
num_experts_per_tok: cfg.num_experts_per_tok,
moe_intermediate_size: cfg.moe_intermediate_size,
norm_topk_prob: cfg.norm_topk_prob,
act: cfg.hidden_act,
decoder_sparse_step: None,
};
// Decide whether to use MoE or regular MLP based on layer_idx and decoder_sparse_step
let feed_forward =
if cfg.num_experts > 0 && (layer_idx + 1).is_multiple_of(cfg.decoder_sparse_step) {
if cfg!(feature = "cuda") {
// Use fused MoE kernel on CUDA
Qwen3FeedForward::FusedMoE(FusedMoe::new(&moe_cfg, vb.pp("mlp"), vb.dtype())?)
} else {
Qwen3FeedForward::NaiveMoE(Qwen3SparseMoeBlock::new(cfg, vb.pp("mlp"))?)
}
} else {
Qwen3FeedForward::Mlp(Qwen3MLP::new(&cfg.into(), vb.pp("mlp"))?)
};
let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let ln2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
feed_forward,
ln1,
ln2,
})
}
fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let h = self.ln1.forward(x)?;
let h = self.self_attn.forward(&h, mask, offset)?;
let x = (x + h)?;
let h2 = self.ln2.forward(&x)?;
let h2 = self.feed_forward.forward(&h2, mask.is_some())?;
x + h2
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let rotary = Arc::new(Qwen3RotaryEmbedding::new(
vb.dtype(),
&cfg.into(),
vb.device(),
)?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb.pp("model.layers");
for i in 0..cfg.num_hidden_layers {
layers.push(DecoderLayer::new(i, cfg, rotary.clone(), vb_l.pp(i))?);
}
Ok(Self {
embed_tokens,
layers,
norm: RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn clear_kv_cache(&mut self) {
for l in &mut self.layers {
l.clear_kv_cache();
}
}
fn causal_mask(
&self,
b: usize,
tgt: usize,
offset: usize,
sw: Option<usize>,
) -> Result<Tensor> {
let minf = f32::NEG_INFINITY;
let mask: Vec<_> = (0..tgt)
.flat_map(|i| {
(0..(tgt + offset)).map(move |j| {
let past_ok = j <= i + offset;
let sw_ok = match sw {
Some(w) => (i + offset) as i64 - j as i64 <= w as i64,
None => true,
};
if past_ok && sw_ok {
0.
} else {
minf
}
})
})
.collect();
Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype)
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (b, l) = input.dims2()?;
let mut h = self.embed_tokens.forward(input)?;
let causal = if l == 1 {
None
} else {
Some(self.causal_mask(b, l, offset, None)?)
};
for layer in &mut self.layers {
h = layer.forward(&h, causal.as_ref(), offset)?;
}
self.norm.forward(&h)
}
}
#[derive(Debug, Clone)]
pub struct ModelForCausalLM {
base: Model,
lm_head: Linear,
}
impl ModelForCausalLM {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let base = Model::new(cfg, vb.clone())?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(base.embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
Ok(Self { base, lm_head })
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (_, l) = input.dims2()?;
self.base
.forward(input, offset)?
.narrow(1, l - 1, 1)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
self.base.clear_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/dinov2reg4.rs | candle-transformers/src/models/dinov2reg4.rs | //! Implementation of the DINOv2 revision (4 regularization)
//!
//! The DINOv2-reg4 model is a variant of DINOv2 that adds 4 regularization tokens to the
//! original architecture. This implementation is specifically trained for plant species
//! classification on the PlantCLEF2024 dataset with 7,806 classes.
//!
//! - [Paper](https://arxiv.org/abs/2309.16588). DINOv2: Learning Robust Visual Features without Supervision
//! - [GH Repo](https://github.com/facebookresearch/dinov2)
//!
//! # Example
//!
//! ```bash
//! # Download classes names and a plant picture to identify
//! # see candle/examples/dinov2reg4 for full code.
//!
//! # Perform inference
//! cargo run \
//! --example dinov2reg4 \
//! --release -- \
//! --image <orchid-file>
//!
//! > Orchis simia Lam. : 45.55%
//! > Orchis × bergonii Nanteuil: 9.80%
//! > Orchis italica Poir. : 9.66%
//! > Orchis × angusticruris Franch.: 2.76%
//! > Orchis × bivonae Tod. : 2.54%
//! ```
//!
//! <div align=center>
//! <img src="https://bs.plantnet.org/image/o/bd2d3830ac3270218ba82fd24e2290becd01317c" alt="" width=320>
//! </div>
//!
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder};
const IMG_SIZE: usize = 518;
const PATCH_SIZE: usize = 14;
const NUM_CLASSES: usize = 7806; // PlantCLEF2024 DINOv2 (https://zenodo.org/records/10848263)
fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
if bias {
candle_nn::linear(in_dim, out_dim, vb)
} else {
candle_nn::linear_no_bias(in_dim, out_dim, vb)
}
}
#[derive(Debug)]
struct Attention {
qkv: Linear,
proj: Linear,
num_heads: usize,
scale: f64,
}
impl Attention {
fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
qkv_bias: bool,
proj_bias: bool,
) -> Result<Self> {
let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?;
let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?;
let scale = 1. / ((dim / num_heads) as f64).sqrt();
Ok(Self {
qkv,
proj,
num_heads,
scale,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = self
.qkv
.forward(xs)?
.reshape((b, n, 3, self.num_heads, c / self.num_heads))?
.transpose(1, 2)? // 02134
.transpose(0, 1)? // 20134
.transpose(2, 3)?; // 20314
let q = (qkv.i(0)? * self.scale)?;
let k = qkv.i(1)?.contiguous()?;
let v = qkv.i(2)?.contiguous()?;
let attn = candle_nn::ops::softmax(&q.matmul(&k.t()?)?, D::Minus1)?;
let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?;
self.proj.forward(&attn)
}
}
#[derive(Debug)]
struct LayerScale {
gamma: Tensor,
}
impl LayerScale {
fn new(vb: VarBuilder, dim: usize) -> Result<Self> {
let gamma = vb.get(dim, "gamma")?;
Ok(Self { gamma })
}
}
impl Module for LayerScale {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&self.gamma)
}
}
#[derive(Debug)]
struct Mlp {
fc1: Linear,
fc2: Linear,
}
impl Mlp {
fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> {
let out_features = in_features;
let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?;
let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?;
Ok(Self { fc1, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?.gelu()?;
self.fc2.forward(&xs)
}
}
#[derive(Debug)]
struct Block {
norm1: LayerNorm,
attn: Attention,
ls1: LayerScale,
norm2: LayerNorm,
mlp: Mlp,
ls2: LayerScale,
}
impl Block {
fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?;
let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?;
let ls1 = LayerScale::new(vb.pp("ls1"), dim)?;
let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?;
let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?;
let ls2 = LayerScale::new(vb.pp("ls2"), dim)?;
Ok(Self {
norm1,
attn,
ls1,
norm2,
mlp,
ls2,
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = self
.ls1
.forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self
.ls2
.forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?;
xs + residual
}
}
#[derive(Debug)]
struct PatchEmbed {
proj: candle_nn::Conv2d,
patch_size: (usize, usize),
num_patches: usize,
}
impl PatchEmbed {
fn new(
vb: VarBuilder,
img_size: usize,
patch_size: usize,
in_chans: usize,
embed_dim: usize,
) -> Result<Self> {
let config = candle_nn::Conv2dConfig {
stride: patch_size,
..Default::default()
};
let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?;
let num_patches = (img_size / patch_size) * (img_size / patch_size);
Ok(Self {
proj,
patch_size: (patch_size, patch_size),
num_patches,
})
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _c, h, w) = xs.dims4()?;
let (patch_h, patch_w) = self.patch_size;
if (h % patch_h) != 0 {
candle::bail!("image height {h} is not a multiple of patch height {patch_h}")
}
if (w % patch_w) != 0 {
candle::bail!("image width {w} is not a multiple of patch width {patch_w}")
}
let xs = self.proj.forward(xs)?;
let (b, c, h, w) = xs.dims4()?;
// flatten embeddings.
xs.reshape((b, c, h * w))?.transpose(1, 2)
}
}
#[derive(Debug)]
pub struct DinoVisionTransformer {
patch_embed: PatchEmbed,
cls_token: Tensor,
reg_token: Tensor,
pos_embed: Tensor,
blocks: Vec<Block>,
norm: LayerNorm,
head: Linear,
}
impl DinoVisionTransformer {
pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> {
let patch_embed =
PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?;
let cls_token = vb.get((1, 1, embed_dim), "cls_token")?;
let reg_token = vb.get((1, 4, embed_dim), "reg_token")?;
let pos_embed = vb.get((1, patch_embed.num_patches, embed_dim), "pos_embed")?;
let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?;
let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?;
let vb_b = vb.pp("blocks");
let blocks = (0..depth)
.map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
patch_embed,
cls_token,
reg_token,
pos_embed,
blocks,
norm,
head,
})
}
fn interpolate_pos_encoding(&self, xs: &Tensor, w: usize, h: usize) -> Result<Tensor> {
let npatch = xs.dim(1)? - 1;
let n = self.pos_embed.dim(1)? - 1;
let sqrt_n = (n as f64).sqrt();
if npatch == n && w == h {
return Ok(self.pos_embed.clone());
}
let patch_pos_embed = &self.pos_embed;
let dim = xs.dim(D::Minus1)?;
let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1);
let patch_pos_embed = patch_pos_embed
.reshape((1, sqrt_n as usize, sqrt_n as usize, dim))?
.transpose(2, 3)?
.transpose(1, 2)?;
// This uses bicubic interpolation in the original implementation.
let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?;
let el_count = patch_pos_embed.shape().elem_count();
patch_pos_embed
.transpose(1, 2)?
.transpose(2, 3)?
.reshape((1, el_count / dim, dim))
}
fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _nc, w, h) = xs.dims4()?;
if (w != IMG_SIZE) || (h != IMG_SIZE) {
panic!("Error: The input tensor should have the shape: Bx3x518x518.");
}
let xs = self.patch_embed.forward(xs)?;
let xs = (&xs + &self.interpolate_pos_encoding(&xs, w, h)?)?;
let xs = Tensor::cat(&[&self.cls_token, &self.reg_token, &xs], 1)?;
Ok(xs)
}
}
impl Module for DinoVisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
for blk in self.blocks.iter() {
xs = blk.forward(&xs)?
}
let xs = self.norm.forward(&xs)?;
let xs_norm_clstoken = xs.i((.., 0))?;
self.head.forward(&xs_norm_clstoken)
}
}
pub fn vit_small(vb: VarBuilder) -> Result<DinoVisionTransformer> {
DinoVisionTransformer::new(vb, 12, 384, 6)
}
pub fn vit_base(vb: VarBuilder) -> Result<DinoVisionTransformer> {
DinoVisionTransformer::new(vb, 12, 768, 12)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/chatglm.rs | candle-transformers/src/models/chatglm.rs | //! Implementation of the ChatGLM2/3 models from THUDM.
//!
//! - 💻 [GitHub](https://github.com/THUDM/ChatGLM3) ChatGLM3: Advancing Multilingual Conversational Language Models with High-Quality Data
//! - 💻 [GitHub](https://github.com/THUDM/ChatGLM2-6B) ChatGLM2-6B.
//!
use crate::models::with_tracing::{linear_b as linear, Linear};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug, Clone)]
pub struct Config {
pub num_layers: usize,
pub padded_vocab_size: usize,
pub hidden_size: usize,
pub ffn_hidden_size: usize,
pub kv_channels: usize,
pub num_attention_heads: usize,
pub seq_length: usize,
pub layernorm_epsilon: f64,
pub rmsnorm: bool,
pub apply_residual_connection_post_layernorm: bool,
pub post_layer_norm: bool,
pub add_bias_linear: bool,
pub add_qkv_bias: bool,
pub bias_dropout_fusion: bool,
pub multi_query_attention: bool,
pub multi_query_group_num: usize,
pub apply_query_key_layer_scaling: bool,
pub attention_softmax_in_fp32: bool,
pub fp32_residual_connection: bool,
}
impl Config {
pub fn glm3_6b() -> Self {
Self {
num_layers: 28,
padded_vocab_size: 65024,
hidden_size: 4096,
ffn_hidden_size: 13696,
kv_channels: 128,
num_attention_heads: 32,
seq_length: 8192,
layernorm_epsilon: 1e-5,
rmsnorm: true,
apply_residual_connection_post_layernorm: false,
post_layer_norm: true,
add_bias_linear: false,
add_qkv_bias: true,
bias_dropout_fusion: true,
multi_query_attention: true,
multi_query_group_num: 2,
apply_query_key_layer_scaling: true,
attention_softmax_in_fp32: true,
fp32_residual_connection: false,
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
cache: Tensor,
}
impl RotaryEmbedding {
fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> {
let rotary_dim = cfg.kv_channels;
let n_elem = rotary_dim / 2;
let inv_freq: Vec<_> = (0..n_elem)
.step_by(2)
.map(|i| 1f32 / 10_000f64.powf(i as f64 / n_elem as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)?
.to_dtype(dtype)?
.reshape((cfg.seq_length, 1))?;
let freqs = t.matmul(&inv_freq)?;
let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?;
Ok(Self { cache })
}
fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (seqlen, _b, np, _hn) = xs.dims4()?;
let cache = self.cache.narrow(0, seqlen_offset, seqlen)?;
let rot_dim = cache.dim(D::Minus2)? * 2;
let (xs, xs_pass) = (
xs.narrow(D::Minus1, 0, rot_dim)?,
xs.narrow(D::Minus1, rot_dim, rot_dim)?,
);
let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?;
let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?;
let (xshaped0, xshaped1) = (
xshaped.i((.., .., .., .., 0))?,
xshaped.i((.., .., .., .., 1))?,
);
let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?);
let xs_out = Tensor::stack(
&[
(xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?,
(xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?,
],
D::Minus1,
)?;
let xs_out = xs_out.flatten_from(3)?;
Tensor::cat(&[xs_out, xs_pass], D::Minus1)
}
}
#[derive(Debug, Clone)]
struct CoreAttention {
coeff: Option<f64>,
norm_factor: f64,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
impl CoreAttention {
fn new(layer_number: usize, cfg: &Config) -> Result<Self> {
let norm_factor = (cfg.kv_channels as f64).sqrt();
let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling {
let coeff = f64::max(1.0, layer_number as f64);
(norm_factor * coeff, Some(coeff))
} else {
(norm_factor, None)
};
Ok(Self { coeff, norm_factor })
}
fn forward(
&self,
query_layer: &Tensor,
key_layer: &Tensor,
value_layer: &Tensor,
attention_mask: &Option<Tensor>,
) -> Result<Tensor> {
let output_size = (
query_layer.dim(1)?, // b
query_layer.dim(2)?, // np
query_layer.dim(0)?, // sq
key_layer.dim(0)?, // sk
);
let query_layer =
query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?;
let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?;
let matmul_result = Tensor::matmul(
&query_layer.transpose(0, 1)?,
&key_layer.transpose(0, 1)?.transpose(1, 2)?,
)?;
let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?;
let matmul_result = match self.coeff {
None => matmul_result,
Some(coeff) => (matmul_result * coeff)?,
};
let attention_scores = match attention_mask {
Some(mask) => masked_fill(
&matmul_result,
&mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?,
f32::NEG_INFINITY,
)?,
None => matmul_result,
};
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
let output_size = (
value_layer.dim(1)?,
value_layer.dim(2)?,
query_layer.dim(0)?,
value_layer.dim(3)?,
);
let value_layer =
value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?;
let attention_probs =
attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?;
let context_layer = Tensor::matmul(&attention_probs, &value_layer.transpose(0, 1)?)?;
let context_layer = context_layer.reshape(output_size)?;
let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?;
context_layer.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct SelfAttention {
query_key_value: Linear,
core_attention: CoreAttention,
dense: Linear,
multi_query_attention: bool,
num_attention_heads_per_partition: usize,
num_multi_query_groups_per_partition: usize,
hidden_size_per_attention_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
}
impl SelfAttention {
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let projection_size = cfg.kv_channels * cfg.num_attention_heads;
let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads;
let qkv_hidden_size = if cfg.multi_query_attention {
projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num
} else {
3 * projection_size
};
let query_key_value = linear(
cfg.hidden_size,
qkv_hidden_size,
cfg.add_bias_linear || cfg.add_qkv_bias,
vb.pp("query_key_value"),
)?;
let core_attention = CoreAttention::new(layer_number, cfg)?;
let dense = linear(
cfg.hidden_size,
cfg.hidden_size,
cfg.add_bias_linear,
vb.pp("dense"),
)?;
Ok(Self {
query_key_value,
core_attention,
dense,
multi_query_attention: cfg.multi_query_attention,
num_attention_heads_per_partition: cfg.num_attention_heads,
num_multi_query_groups_per_partition: cfg.multi_query_group_num,
hidden_size_per_attention_head: cfg.kv_channels,
kv_cache: None,
})
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: &Option<Tensor>,
rotary_emb: &RotaryEmbedding,
) -> Result<Tensor> {
let mixed_x_layer = xs.apply(&self.query_key_value)?;
if !self.multi_query_attention {
candle::bail!("only multi_query_attention=true is supported")
}
let hpa = self.hidden_size_per_attention_head;
let query_layer =
mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?;
let key_layer = mixed_x_layer.narrow(
D::Minus1,
self.num_attention_heads_per_partition * hpa,
self.num_multi_query_groups_per_partition * hpa,
)?;
let value_layer = mixed_x_layer.narrow(
D::Minus1,
self.num_attention_heads_per_partition * hpa
+ self.num_multi_query_groups_per_partition * hpa,
self.num_multi_query_groups_per_partition * hpa,
)?;
let query_layer = query_layer.reshape((
query_layer.dim(0)?,
query_layer.dim(1)?,
self.num_attention_heads_per_partition,
hpa,
))?;
let key_layer = key_layer.reshape((
key_layer.dim(0)?,
key_layer.dim(1)?,
self.num_multi_query_groups_per_partition,
hpa,
))?;
let value_layer = value_layer.reshape((
value_layer.dim(0)?,
value_layer.dim(1)?,
self.num_multi_query_groups_per_partition,
hpa,
))?;
// Rotary embeddings.
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(0)?,
};
let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?;
let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?;
// KV cache.
let (key_layer, value_layer) = match &self.kv_cache {
None => (key_layer, value_layer),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key_layer], 0)?;
let v = Tensor::cat(&[prev_v, &value_layer], 0)?;
(k, v)
}
};
self.kv_cache = Some((key_layer.clone(), value_layer.clone()));
// Repeat KV.
let ratio =
self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition;
let key_layer = {
let (d0, d1, d2, d3) = key_layer.dims4()?;
key_layer
.unsqueeze(D::Minus2)?
.expand((d0, d1, d2, ratio, d3))?
.reshape((
d0,
d1,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
))?
};
let value_layer = {
let (d0, d1, d2, d3) = value_layer.dims4()?;
value_layer
.unsqueeze(D::Minus2)?
.expand((d0, d1, d2, ratio, d3))?
.reshape((
d0,
d1,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
))?
};
let context_layer =
self.core_attention
.forward(&query_layer, &key_layer, &value_layer, attention_mask)?;
let output = context_layer.apply(&self.dense)?;
Ok(output)
}
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone)]
struct MLP {
dense_h_to_4h: Linear,
dense_4h_to_h: Linear,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense_h_to_4h = linear(
cfg.hidden_size,
cfg.ffn_hidden_size * 2,
cfg.add_bias_linear,
vb.pp("dense_h_to_4h"),
)?;
let dense_4h_to_h = linear(
cfg.ffn_hidden_size,
cfg.hidden_size,
cfg.add_bias_linear,
vb.pp("dense_4h_to_h"),
)?;
Ok(Self {
dense_4h_to_h,
dense_h_to_4h,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense_h_to_4h)?
.apply(&candle_nn::Activation::Swiglu)?
.apply(&self.dense_4h_to_h)
}
}
#[derive(Debug, Clone)]
struct Block {
input_layernorm: candle_nn::LayerNorm,
self_attention: SelfAttention,
post_attention_layernorm: candle_nn::LayerNorm,
mlp: MLP,
apply_residual_connection_post_layernorm: bool,
}
impl Block {
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let input_layernorm = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("input_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("input_layernorm"),
)?
};
let post_attention_layernorm = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("post_attention_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("post_attention_layernorm"),
)?
};
let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
input_layernorm,
self_attention,
post_attention_layernorm,
mlp,
apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm,
})
}
fn reset_kv_cache(&mut self) {
self.self_attention.reset_kv_cache()
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: &Option<Tensor>,
rotary_emb: &RotaryEmbedding,
) -> Result<Tensor> {
let layernorm_output = xs.apply(&self.input_layernorm)?;
let attention_output =
self.self_attention
.forward(&layernorm_output, attention_mask, rotary_emb)?;
let residual = if self.apply_residual_connection_post_layernorm {
&layernorm_output
} else {
xs
};
let layernorm_input = (residual + attention_output)?;
let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?;
let mlp_output = layernorm_output.apply(&self.mlp)?;
let residual = if self.apply_residual_connection_post_layernorm {
&layernorm_output
} else {
&layernorm_input
};
mlp_output + residual
}
}
#[derive(Debug, Clone)]
struct Transformer {
layers: Vec<Block>,
final_layernorm: Option<candle_nn::LayerNorm>,
rotary_emb: RotaryEmbedding,
}
impl Transformer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_l = vb.pp("layers");
let mut layers = Vec::with_capacity(cfg.num_layers);
for layer_index in 0..cfg.num_layers {
let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?;
layers.push(block)
}
let final_layernorm = if cfg.post_layer_norm {
let ln = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("final_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("final_layernorm"),
)?
};
Some(ln)
} else {
None
};
let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?;
Ok(Self {
layers,
final_layernorm,
rotary_emb,
})
}
fn reset_kv_cache(&mut self) {
for block in self.layers.iter_mut() {
block.reset_kv_cache()
}
}
fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for block in self.layers.iter_mut() {
xs = block.forward(&xs, attention_mask, &self.rotary_emb)?
}
match self.final_layernorm.as_ref() {
None => Ok(xs),
Some(ln) => xs.apply(ln),
}
}
}
#[derive(Debug, Clone)]
struct Embedding {
word_embeddings: candle_nn::Embedding,
fp32_residual_connection: bool,
}
impl Embedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let word_embeddings = candle_nn::embedding(
cfg.padded_vocab_size,
cfg.hidden_size,
vb.pp("word_embeddings"),
)?;
Ok(Self {
word_embeddings,
fp32_residual_connection: cfg.fp32_residual_connection,
})
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h
if self.fp32_residual_connection {
xs.to_dtype(candle::DType::F32)
} else {
xs.contiguous()
}
}
}
#[derive(Debug, Clone)]
pub struct Model {
embedding: Embedding,
encoder: Transformer,
output_layer: Linear,
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("transformer");
let embedding = Embedding::new(cfg, vb.pp("embedding"))?;
let encoder = Transformer::new(cfg, vb.pp("encoder"))?;
let output_layer = linear(
cfg.hidden_size,
cfg.padded_vocab_size,
false,
vb.pp("output_layer"),
)?;
Ok(Self {
embedding,
encoder,
output_layer,
})
}
pub fn reset_kv_cache(&mut self) {
self.encoder.reset_kv_cache()
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_b_size, seq_len) = xs.dims2()?;
let input_embeds = xs.apply(&self.embedding)?;
let attention_mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
let xs = self.encoder.forward(&input_embeds, &attention_mask)?;
let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?;
Ok(lm_logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_t5.rs | candle-transformers/src/models/quantized_t5.rs | //! T5 model implementation with quantization support.
//!
//! T5 is an encoder-decoder model pre-trained on a multi-task mixture of supervised
//! and unsupervised tasks. This implementation provides quantization for reduced
//! memory and compute requirements.
//!
//! Key characteristics:
//! - Encoder-decoder architecture
//! - Layer normalization
//! - Relative positional encodings
//! - Support for 8-bit quantization
//!
//! References:
//! - 📝 [T5 Paper](https://arxiv.org/abs/1910.10683)
//! - 🤗 [Model Card](https://huggingface.co/t5-base)
//! - 🤗 Original model from [T5](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py)
use crate::models::t5::{deserialize_feed_forward_proj_activation, ActivationWithOptionalGating};
use crate::models::with_tracing::QMatMul;
use crate::quantized_nn::Embedding;
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::Activation;
use serde::Deserialize;
use std::sync::Arc;
fn default_relative_attention_max_distance() -> usize {
128
}
fn default_is_decoder() -> bool {
false
}
fn default_use_cache() -> bool {
true
}
fn default_tie_word_embeddings() -> bool {
true
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
vocab_size: usize,
d_model: usize,
d_kv: usize,
d_ff: usize,
num_layers: usize,
num_decoder_layers: Option<usize>,
num_heads: usize,
relative_attention_num_buckets: usize,
#[serde(default = "default_relative_attention_max_distance")]
relative_attention_max_distance: usize,
dropout_rate: f64,
layer_norm_epsilon: f64,
initializer_factor: f64,
#[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")]
pub feed_forward_proj: ActivationWithOptionalGating,
#[serde(default = "default_tie_word_embeddings")]
tie_word_embeddings: bool,
#[serde(default = "default_is_decoder")]
is_decoder: bool,
is_encoder_decoder: bool,
#[serde(default = "default_use_cache")]
pub use_cache: bool,
pub pad_token_id: usize,
pub eos_token_id: usize,
pub decoder_start_token_id: Option<usize>,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 32128,
d_model: 512,
d_kv: 64,
d_ff: 2048,
num_layers: 6,
num_decoder_layers: None,
num_heads: 8,
relative_attention_num_buckets: 32,
relative_attention_max_distance: 128,
dropout_rate: 0.1,
layer_norm_epsilon: 1e-6,
initializer_factor: 1.0,
feed_forward_proj: ActivationWithOptionalGating {
gated: false,
activation: Activation::Relu,
},
tie_word_embeddings: true,
is_decoder: false,
is_encoder_decoder: true,
use_cache: true,
pad_token_id: 0,
eos_token_id: 1,
decoder_start_token_id: Some(0),
}
}
}
#[derive(Debug, Clone)]
struct T5LayerNorm {
weight: Tensor,
variance_epsilon: f64,
span: tracing::Span,
}
impl T5LayerNorm {
fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(h, "weight")?.dequantize(vb.device())?;
Ok(Self {
weight,
variance_epsilon: eps,
span: tracing::span!(tracing::Level::TRACE, "layer-norm"),
})
}
}
impl Module for T5LayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let dtype = xs.dtype();
let xs_f32 = xs.to_dtype(DType::F32)?;
// variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?;
let xs = xs.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?;
let xs = xs.to_dtype(dtype)?;
let xs = xs.broadcast_mul(&self.weight)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5DenseActDense {
wi: QMatMul,
wo: QMatMul,
act: Activation,
span: tracing::Span,
}
impl T5DenseActDense {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wi = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi"))?;
let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
Ok(Self {
wi,
wo,
act: Activation::Relu,
span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"),
})
}
}
impl Module for T5DenseActDense {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.wi.forward(xs)?;
let xs = self.act.forward(&xs)?;
let xs = self.wo.forward(&xs)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5DenseGatedActDense {
wi_0: QMatMul,
wi_1: QMatMul,
wo: QMatMul,
act: Activation,
span: tracing::Span,
}
impl T5DenseGatedActDense {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wi_0 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?;
let wi_1 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?;
let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
Ok(Self {
wi_0,
wi_1,
wo,
act: cfg.feed_forward_proj.activation,
span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"),
})
}
}
impl Module for T5DenseGatedActDense {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?;
let hidden_linear = self.wi_1.forward(xs)?;
let xs = hidden_gelu.broadcast_mul(&hidden_linear)?;
let xs = self.wo.forward(&xs)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5LayerFF {
dense_act: Option<T5DenseActDense>,
gated_dense_act: Option<T5DenseGatedActDense>,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerFF {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated {
(
None,
Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?),
)
} else {
(
Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?),
None,
)
};
Ok(Self {
dense_act,
gated_dense_act,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "layer-ff"),
})
}
}
impl Module for T5LayerFF {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let ys = self.layer_norm.forward(xs)?;
let ys = match &self.dense_act {
Some(dense_act) => dense_act.forward(&ys)?,
None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?,
};
let xs = (xs + ys)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5Attention {
q: QMatMul,
k: QMatMul,
v: QMatMul,
o: QMatMul,
n_heads: usize,
d_kv: usize,
relative_attention_bias: Option<Embedding>,
relative_attention_num_buckets: usize,
relative_attention_max_distance: usize,
inner_dim: usize,
use_cache: bool,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
span_cache: tracing::Span,
span_mm: tracing::Span,
span_sm: tracing::Span,
}
impl T5Attention {
fn load(
has_relative_attention_bias: bool,
decoder: bool,
vb: VarBuilder,
cfg: &Config,
) -> Result<Self> {
let inner_dim = cfg.num_heads * cfg.d_kv;
let q = QMatMul::new(cfg.d_model, inner_dim, vb.pp("q"))?;
let k = QMatMul::new(cfg.d_model, inner_dim, vb.pp("k"))?;
let v = QMatMul::new(cfg.d_model, inner_dim, vb.pp("v"))?;
let o = QMatMul::new(inner_dim, cfg.d_model, vb.pp("o"))?;
let relative_attention_bias = if has_relative_attention_bias {
let emb = Embedding::new(
cfg.relative_attention_num_buckets,
cfg.num_heads,
vb.pp("relative_attention_bias"),
)?;
Some(emb)
} else {
None
};
Ok(Self {
q,
k,
v,
o,
n_heads: cfg.num_heads,
d_kv: cfg.d_kv,
relative_attention_bias,
relative_attention_num_buckets: cfg.relative_attention_num_buckets,
relative_attention_max_distance: cfg.relative_attention_max_distance,
inner_dim,
use_cache: cfg.use_cache && decoder,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "attention"),
span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"),
span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"),
span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
key_value_states: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
// Performs Self-attention (if key_value_states is None) or attention
// over source sentence (provided by key_value_states).
let _enter = self.span.enter();
let kv_input = match key_value_states {
None => xs,
Some(key_value_states) => key_value_states,
};
let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?);
let kv_len = kv_input.dim(1)?;
let q = self.q.forward(xs)?;
let k = self.k.forward(kv_input)?;
let v = self.v.forward(kv_input)?;
let q = q
.reshape((b_sz, q_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?
.contiguous()?;
let mut k = k
.reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?;
let mut v = v
.reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?;
if self.use_cache && key_value_states.is_none() {
let _enter = self.span_cache.enter();
if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache {
k = Tensor::cat(&[kv_cache_k, &k], 2)?;
v = Tensor::cat(&[kv_cache_v, &v], 2)?;
};
self.kv_cache = Some((k.clone(), v.clone()));
};
let k = k.contiguous()?;
let v = v.contiguous()?;
// TODO: Use flash_attn.
let scores = {
let _enter = self.span_mm.enter();
q.matmul(&k.t()?)?
};
let scores = match mask {
None => scores,
Some(mask) => masked_fill(
&scores,
&mask
.unsqueeze(0)?
.unsqueeze(0)?
.repeat((b_sz, self.n_heads))?,
f32::NEG_INFINITY,
)?,
};
let (scores, position_bias) = match position_bias {
Some(position_bias) => (
scores.broadcast_add(position_bias)?,
Some(position_bias.clone()),
),
None => match &self.relative_attention_bias {
None => (scores, None),
Some(relative_attention_bias) => {
// This only handles the bidirectional case.
let kv_len = k.dim(2)?;
let (q_start, q_end) = match self.use_cache {
true => ((kv_len - q_len) as u32, kv_len as u32),
false => (0_u32, kv_len as u32),
};
let num_buckets = self.relative_attention_num_buckets as u32 / 2;
let max_exact = num_buckets / 2;
let relative_position = (q_start..q_end)
.map(|i| {
(0..kv_len as u32)
.map(|j| {
if i < j {
if j - i < max_exact {
j - i + num_buckets
} else {
let b = f32::log(
(j - i) as f32 / max_exact as f32,
self.relative_attention_max_distance as f32
/ max_exact as f32,
) * (num_buckets - max_exact) as f32;
u32::min(
max_exact + num_buckets + b as u32,
self.relative_attention_num_buckets as u32 - 1,
)
}
} else if i - j < max_exact {
i - j
} else {
let b = f32::log(
(i - j) as f32 / max_exact as f32,
self.relative_attention_max_distance as f32
/ max_exact as f32,
) * (num_buckets - max_exact) as f32;
max_exact + b as u32
}
})
.collect::<Vec<u32>>()
})
.collect::<Vec<Vec<_>>>();
let relative_buckets = Tensor::new(relative_position, q.device())?;
let position_bias = relative_attention_bias
.forward(&relative_buckets)?
.permute((2, 0, 1))?
.unsqueeze(0)?;
(scores.broadcast_add(&position_bias)?, Some(position_bias))
// TODO: position_bias_masked?
}
},
};
let attn_weights = {
let _enter = self.span_sm.enter();
candle_nn::ops::softmax_last_dim(&scores)?
};
let attn_output = attn_weights.matmul(&v)?;
let attn_output = attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.inner_dim))?;
let attn_output = self.o.forward(&attn_output)?;
Ok((attn_output, position_bias))
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct T5LayerSelfAttention {
self_attention: T5Attention,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerSelfAttention {
fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> {
let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?;
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
Ok(Self {
self_attention,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
let normed_xs = self.layer_norm.forward(xs)?;
let (ys, position_bias) =
self.self_attention
.forward(&normed_xs, position_bias, None, mask)?;
let ys = (xs + ys)?;
Ok((ys, position_bias))
}
fn clear_kv_cache(&mut self) {
self.self_attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
struct T5LayerCrossAttention {
cross_attention: T5Attention,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerCrossAttention {
fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> {
let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?;
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
Ok(Self {
cross_attention,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "cross-attn"),
})
}
fn forward(
&mut self,
hidden_states: &Tensor,
position_bias: Option<&Tensor>,
key_value_states: &Tensor,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
let normed_hidden_states = self.layer_norm.forward(hidden_states)?;
let (ys, position_bias) = self.cross_attention.forward(
&normed_hidden_states,
position_bias,
Some(key_value_states),
None,
)?;
let ys = (hidden_states + ys)?;
Ok((ys, position_bias))
}
fn clear_kv_cache(&mut self) {
self.cross_attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
struct T5Block {
self_attn: T5LayerSelfAttention,
cross_attn: Option<T5LayerCrossAttention>,
ff: T5LayerFF,
span: tracing::Span,
}
impl T5Block {
fn load(
has_relative_attention_bias: bool,
decoder: bool,
vb: VarBuilder,
cfg: &Config,
) -> Result<Self> {
let vb = vb.pp("layer");
let self_attn =
T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?;
let cross_attn = if cfg.is_decoder {
Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?)
} else {
None
};
let ff_i = if cross_attn.is_some() { 2 } else { 1 };
let ff = T5LayerFF::load(vb.pp(ff_i), cfg)?;
Ok(Self {
self_attn,
cross_attn,
ff,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
encoder_hidden_states: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
// TODO: Cache masks
let mask = match self.cross_attn.is_some() {
true => {
let mask_len = xs.dim(1)?;
// If the input seq length is 1, no need for a mask, this is also helpful to avoid shape
// issues when using the KV cache in the decoder.
if mask_len <= 1 {
None
} else {
Some(get_mask(mask_len, xs.device())?)
}
}
false => None,
};
let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?;
// TODO: clamp for f16?
if let Some(cross_attn) = &mut self.cross_attn {
(xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?;
// TODO: clamp for f16?
}
let xs = self.ff.forward(&xs)?;
// TODO: clamp for f16?
Ok((xs, position_bias))
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache());
}
}
#[derive(Debug, Clone)]
struct T5Stack {
block: Vec<T5Block>,
shared: Arc<Embedding>,
final_layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5Stack {
fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> {
let block = (0..cfg.num_layers)
.map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg))
.collect::<Result<Vec<_>>>()?;
let final_layer_norm = T5LayerNorm::load(
cfg.d_model,
cfg.layer_norm_epsilon,
vb.pp("final_layer_norm"),
)?;
Ok(Self {
block,
shared: shared.clone(),
final_layer_norm,
span: tracing::span!(tracing::Level::TRACE, "stack"),
})
}
fn forward(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let input_embeds = self.shared.as_ref().forward(input_ids)?;
let mut hidden_states = input_embeds;
let mut position_bias = None;
for block in self.block.iter_mut() {
(hidden_states, position_bias) = block.forward(
&hidden_states,
position_bias.as_ref(),
encoder_hidden_states,
)?
}
self.final_layer_norm.forward(&hidden_states)
}
fn clear_kv_cache(&mut self) {
self.block.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
#[derive(Debug, Clone)]
pub struct T5EncoderModel {
encoder: T5Stack,
device: Device,
span: tracing::Span,
}
impl T5EncoderModel {
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let shared_vb = if vb.contains_key("shared.weight") {
vb.pp("shared")
} else {
vb.pp("decoder").pp("embed_tokens")
};
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?;
let shared = Arc::new(shared);
let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?;
Ok(Self {
encoder,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "encoder"),
})
}
pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.encoder.forward(input_ids, None)
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn clear_kv_cache(&mut self) {
self.encoder.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct T5ForConditionalGeneration {
encoder: T5Stack,
decoder: T5Stack,
d_model: usize,
tie_word_embeddings: bool,
lm_head: Option<QMatMul>,
shared: Arc<Embedding>,
device: Device,
span_decode: tracing::Span,
span_decode_head: tracing::Span,
}
impl T5ForConditionalGeneration {
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
assert!(cfg.is_encoder_decoder);
let d_model = cfg.d_model;
let shared_vb = if vb.contains_key("shared.weight") {
vb.pp("shared")
} else {
vb.pp("decoder").pp("embed_tokens")
};
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?;
let shared = Arc::new(shared);
let mut encoder_cfg = cfg.clone();
encoder_cfg.is_decoder = false;
encoder_cfg.use_cache = false;
encoder_cfg.is_encoder_decoder = false;
let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?;
let mut decoder_cfg = cfg.clone();
decoder_cfg.is_decoder = true;
decoder_cfg.is_encoder_decoder = false;
decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers);
let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?;
let tie_word_embeddings = cfg.tie_word_embeddings;
let lm_head = if tie_word_embeddings {
None
} else {
Some(QMatMul::new(cfg.d_model, cfg.vocab_size, vb.pp("lm_head"))?)
};
Ok(Self {
encoder,
decoder,
d_model,
tie_word_embeddings,
lm_head,
shared,
device: vb.device().clone(),
span_decode: tracing::span!(tracing::Level::TRACE, "decode"),
span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"),
})
}
pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> {
self.encoder.forward(input_ids, None)
}
pub fn decode(
&mut self,
decoder_input_ids: &Tensor,
encoder_output: &Tensor,
) -> Result<Tensor> {
let _enter = self.span_decode.enter();
let decoder_output = self
.decoder
.forward(decoder_input_ids, Some(encoder_output))?;
let scaling_factor = if self.tie_word_embeddings {
// Rescale output before projecting on vocab
// See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
(self.d_model as f64).sqrt()
} else {
1.0
};
let sequence_output = ((decoder_output
.narrow(1, decoder_output.dim(1)? - 1, 1)?
.squeeze(1)?)
* scaling_factor)?;
let output = {
let _enter = self.span_decode_head.enter();
match self.lm_head {
None => sequence_output.matmul(&self.shared.embeddings().t()?)?,
Some(ref lm_head) => lm_head.forward(&sequence_output)?,
}
};
Ok(output)
}
pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> {
let encoder_output = self.encode(input_ids)?;
self.decode(decoder_input_ids, &encoder_output)
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn clear_kv_cache(&mut self) {
self.encoder.clear_kv_cache();
self.decoder.clear_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/paligemma.rs | candle-transformers/src/models/paligemma.rs | //! Multimodal multi-purpose model combining Gemma-based language model with SigLIP image understanding
//!
//! See PaLiGemma details at:
//! - [Paper](https://arxiv.org/abs/2402.05257)
//! - [Google Blog Post](https://blog.research.google/2024/02/paligemma-scaling-language-image.html)
//!
//! The model is a multimodal combination of:
//! - SigLIP vision encoder
//! - Gemma language model
//! - Cross-projection layers
//!
//! References:
//! - [HuggingFace Implementation](https://huggingface.co/google/paligemma-3b)
//! - [Paper: PaLI-3 and Beyond: Scaling Language-Image Learning](https://arxiv.org/abs/2402.05257)
//!
use crate::models::{gemma, siglip};
use candle::{Module, Result, Tensor};
use candle_nn::{linear, Linear, VarBuilder};
#[derive(serde::Deserialize, Clone, Debug)]
pub struct Config {
pub vision_config: siglip::VisionConfig,
pub text_config: gemma::Config,
pub projection_dim: usize,
}
impl Config {
pub fn paligemma_3b_224() -> Self {
// https://huggingface.co/google/paligemma-3b-pt-224/blob/main/config.json
Self {
vision_config: siglip::VisionConfig::paligemma_3b_224(),
text_config: gemma::Config {
hidden_size: 2048,
intermediate_size: 16384,
num_attention_heads: 8,
num_hidden_layers: 18,
num_key_value_heads: 1,
vocab_size: 257216,
// Default values.
rope_theta: 10000.,
head_dim: 256,
hidden_act: Some(candle_nn::Activation::GeluPytorchTanh),
hidden_activation: None,
attention_bias: false,
max_position_embeddings: 8192,
rms_norm_eps: 1e-6,
},
projection_dim: 2048,
}
}
pub fn paligemma_3b_448() -> Self {
Self {
vision_config: siglip::VisionConfig::paligemma_3b_448(),
text_config: gemma::Config {
hidden_size: 2048,
intermediate_size: 16384,
num_attention_heads: 8,
num_hidden_layers: 18,
num_key_value_heads: 1,
// Default values.
rope_theta: 10000.,
head_dim: 256,
hidden_act: Some(candle_nn::Activation::GeluPytorchTanh),
hidden_activation: None,
attention_bias: false,
max_position_embeddings: 8192,
rms_norm_eps: 1e-6,
vocab_size: 257216,
},
projection_dim: 2048,
}
}
}
#[derive(Clone, Debug)]
pub struct MultiModalProjector {
linear: Linear,
}
impl MultiModalProjector {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let linear = linear(
cfg.vision_config.hidden_size,
cfg.projection_dim,
vb.pp("linear"),
)?;
Ok(Self { linear })
}
}
impl Module for MultiModalProjector {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.linear)
}
}
#[derive(Clone, Debug)]
pub struct Model {
pos: usize,
vision_tower: siglip::VisionModel,
multi_modal_projector: MultiModalProjector,
language_model: gemma::Model,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vision_tower = siglip::VisionModel::new(
&cfg.vision_config,
false,
vb.pp("vision_tower.vision_model"),
)?;
let multi_modal_projector = MultiModalProjector::new(cfg, vb.pp("multi_modal_projector"))?;
let language_model = gemma::Model::new(false, &cfg.text_config, vb.pp("language_model"))?;
Ok(Self {
pos: 0,
language_model,
vision_tower,
multi_modal_projector,
})
}
pub fn setup(&mut self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<Tensor> {
self.clear_kv_cache();
let image_features = self
.vision_tower
.forward(pixel_values)?
.apply(&self.multi_modal_projector)?;
let image_features = crate::models::clip::div_l2_norm(&image_features)?;
let text_features = self.language_model.embed_tokens().forward(input_ids)?;
let input_embeds = Tensor::cat(&[image_features, text_features], 1)?;
self.pos = input_embeds.dim(1)?;
self.language_model.forward_embeds(&input_embeds, None, 0)
}
pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let pos = self.pos;
let seq_len = input_ids.dim(1)?;
self.pos = pos + seq_len;
self.language_model.forward(input_ids, pos)
}
pub fn forward_without_projection(&mut self, input_ids: &Tensor) -> Result<Tensor> {
self.clear_kv_cache();
let input_embeds = self.language_model.embed_tokens().forward(input_ids)?;
self.language_model
.forward_embeds_without_projection(&input_embeds, None, 0)
}
pub fn setup_without_projection(
&mut self,
pixel_values: &Tensor,
input_ids: &Tensor,
) -> Result<Tensor> {
self.clear_kv_cache();
let image_features = self
.vision_tower
.forward(pixel_values)?
.apply(&self.multi_modal_projector)?;
let image_features = crate::models::clip::div_l2_norm(&image_features)?;
let text_features = self.language_model.embed_tokens().forward(input_ids)?;
let input_embeds = Tensor::cat(&[image_features, text_features], 1)?;
self.language_model
.forward_embeds_without_projection(&input_embeds, None, 0)
}
pub fn clear_kv_cache(&mut self) {
self.pos = 0;
self.language_model.clear_kv_cache()
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/marian.rs | candle-transformers/src/models/marian.rs | //! Marian Neural Machine Translation
//!
//! See "Marian: Fast Neural Machine Translation in C++" Junczys-Dowmunt et al. 2018
//! - [ACL Anthology](https://aclanthology.org/P18-4020/)
//! - [GitHub](https://github.com/marian-nmt/marian)
//!
use super::with_tracing::{linear, Embedding, Linear};
use candle::{Result, Tensor};
use candle_nn::{layer_norm, LayerNorm, VarBuilder};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub decoder_vocab_size: Option<usize>,
pub max_position_embeddings: usize,
pub encoder_layers: usize,
pub encoder_ffn_dim: usize,
pub encoder_attention_heads: usize,
pub decoder_layers: usize,
pub decoder_ffn_dim: usize,
pub decoder_attention_heads: usize,
pub use_cache: bool,
pub is_encoder_decoder: bool,
pub activation_function: candle_nn::Activation,
pub d_model: usize,
pub decoder_start_token_id: u32,
pub scale_embedding: bool,
pub pad_token_id: u32,
pub eos_token_id: u32,
pub forced_eos_token_id: u32,
pub share_encoder_decoder_embeddings: bool,
}
impl Config {
// https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-fr-en/blob/main/config.json
pub fn opus_mt_tc_big_fr_en() -> Self {
Self {
activation_function: candle_nn::Activation::Relu,
d_model: 1024,
decoder_attention_heads: 16,
decoder_ffn_dim: 4096,
decoder_layers: 6,
decoder_start_token_id: 53016,
decoder_vocab_size: Some(53017),
encoder_attention_heads: 16,
encoder_ffn_dim: 4096,
encoder_layers: 6,
eos_token_id: 43311,
forced_eos_token_id: 43311,
is_encoder_decoder: true,
max_position_embeddings: 1024,
pad_token_id: 53016,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 53017,
}
}
// https://huggingface.co/Helsinki-NLP/opus-mt-fr-en/blob/main/config.json
pub fn opus_mt_fr_en() -> Self {
Self {
activation_function: candle_nn::Activation::Swish,
d_model: 512,
decoder_attention_heads: 8,
decoder_ffn_dim: 2048,
decoder_layers: 6,
decoder_start_token_id: 59513,
decoder_vocab_size: Some(59514),
encoder_attention_heads: 8,
encoder_ffn_dim: 2048,
encoder_layers: 6,
eos_token_id: 0,
forced_eos_token_id: 0,
is_encoder_decoder: true,
max_position_embeddings: 512,
pad_token_id: 59513,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 59514,
}
}
pub fn opus_mt_en_zh() -> Self {
Self {
activation_function: candle_nn::Activation::Swish,
d_model: 512,
decoder_attention_heads: 8,
decoder_ffn_dim: 2048,
decoder_layers: 6,
decoder_start_token_id: 65000,
decoder_vocab_size: Some(65001),
encoder_attention_heads: 8,
encoder_ffn_dim: 2048,
encoder_layers: 6,
eos_token_id: 0,
forced_eos_token_id: 0,
is_encoder_decoder: true,
max_position_embeddings: 512,
pad_token_id: 65000,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 65001,
}
}
pub fn opus_mt_en_hi() -> Self {
Self {
activation_function: candle_nn::Activation::Swish,
d_model: 512,
decoder_attention_heads: 8,
decoder_ffn_dim: 2048,
decoder_layers: 6,
decoder_start_token_id: 61949,
decoder_vocab_size: Some(61950),
encoder_attention_heads: 8,
encoder_ffn_dim: 2048,
encoder_layers: 6,
eos_token_id: 0,
forced_eos_token_id: 0,
is_encoder_decoder: true,
max_position_embeddings: 512,
pad_token_id: 61949,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 61950,
}
}
pub fn opus_mt_en_es() -> Self {
Self {
activation_function: candle_nn::Activation::Swish,
d_model: 512,
decoder_attention_heads: 8,
decoder_ffn_dim: 2048,
decoder_layers: 6,
decoder_start_token_id: 65000,
decoder_vocab_size: Some(65001),
encoder_attention_heads: 8,
encoder_ffn_dim: 2048,
encoder_layers: 6,
eos_token_id: 0,
forced_eos_token_id: 0,
is_encoder_decoder: true,
max_position_embeddings: 512,
pad_token_id: 65000,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 65001,
}
}
pub fn opus_mt_en_fr() -> Self {
Self {
activation_function: candle_nn::Activation::Swish,
d_model: 512,
decoder_attention_heads: 8,
decoder_ffn_dim: 2048,
decoder_layers: 6,
decoder_start_token_id: 59513,
decoder_vocab_size: Some(59514),
encoder_attention_heads: 8,
encoder_ffn_dim: 2048,
encoder_layers: 6,
eos_token_id: 0,
forced_eos_token_id: 0,
is_encoder_decoder: true,
max_position_embeddings: 512,
pad_token_id: 59513,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 59514,
}
}
pub fn opus_mt_en_ru() -> Self {
Self {
activation_function: candle_nn::Activation::Swish,
d_model: 512,
decoder_attention_heads: 8,
decoder_ffn_dim: 2048,
decoder_layers: 6,
decoder_start_token_id: 62517,
decoder_vocab_size: Some(62518),
encoder_attention_heads: 8,
encoder_ffn_dim: 2048,
encoder_layers: 6,
eos_token_id: 0,
forced_eos_token_id: 0,
is_encoder_decoder: true,
max_position_embeddings: 512,
pad_token_id: 62517,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 62518,
}
}
}
#[derive(Debug, Clone)]
struct SinusoidalPositionalEmbedding {
emb: Embedding,
}
impl SinusoidalPositionalEmbedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dev = vb.device();
let dtype = vb.dtype();
let num_positions = cfg.max_position_embeddings;
let dim = cfg.d_model;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, num_positions as u32, dev)?
.to_dtype(dtype)?
.reshape((num_positions, 1))?;
let freqs = t.matmul(&inv_freq)?;
let sin = freqs.sin()?;
let cos = freqs.cos()?;
let weights = Tensor::cat(&[&sin, &cos], 1)?.contiguous()?;
let emb = Embedding::from_weights(weights)?;
Ok(Self { emb })
}
fn forward(&self, input_ids: &Tensor, past_kv_len: usize) -> Result<Tensor> {
let seq_len = input_ids.dim(1)?;
Tensor::arange(
past_kv_len as u32,
(past_kv_len + seq_len) as u32,
input_ids.device(),
)?
.apply(&self.emb)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
scaling: f64,
num_heads: usize,
head_dim: usize,
kv_cache: Option<(Tensor, Tensor)>,
is_decoder: bool,
}
impl Attention {
fn new(cfg: &Config, is_decoder: bool, vb: VarBuilder) -> Result<Self> {
let num_heads = if is_decoder {
cfg.decoder_attention_heads
} else {
cfg.encoder_attention_heads
};
let embed_dim = cfg.d_model;
let head_dim = embed_dim / num_heads;
let scaling = (head_dim as f64).powf(-0.5);
let q_proj = linear(embed_dim, embed_dim, vb.pp("q_proj"))?;
let k_proj = linear(embed_dim, embed_dim, vb.pp("k_proj"))?;
let v_proj = linear(embed_dim, embed_dim, vb.pp("v_proj"))?;
let out_proj = linear(embed_dim, embed_dim, vb.pp("out_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
scaling,
num_heads,
head_dim,
kv_cache: None,
is_decoder,
})
}
fn _shape(&self, tensor: &Tensor, bsz: usize) -> Result<Tensor> {
tensor
.reshape((bsz, (), self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
fn forward(
&mut self,
xs: &Tensor,
kv_states: Option<&Tensor>,
attn_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_sz, tgt_len, _) = xs.dims3()?;
let query_states = (xs.apply(&self.q_proj)? * self.scaling)?;
let (key_states, value_states) = match kv_states {
None => {
let key_states = self._shape(&xs.apply(&self.k_proj)?, b_sz)?;
let value_states = self._shape(&xs.apply(&self.v_proj)?, b_sz)?;
if self.is_decoder {
let kv_states = match &self.kv_cache {
None => (key_states, value_states),
Some((p_key_states, p_value_states)) => {
let key_states = Tensor::cat(&[p_key_states, &key_states], 2)?;
let value_states = Tensor::cat(&[p_value_states, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some(kv_states.clone());
kv_states
} else {
(key_states, value_states)
}
}
Some(kv_states) => {
let key_states = self._shape(&kv_states.apply(&self.k_proj)?, b_sz)?;
let value_states = self._shape(&kv_states.apply(&self.v_proj)?, b_sz)?;
(key_states, value_states)
}
};
let proj_shape = (b_sz * self.num_heads, (), self.head_dim);
let query_states = self._shape(&query_states, b_sz)?.reshape(proj_shape)?;
let key_states = key_states.reshape(proj_shape)?;
let value_states = value_states.reshape(proj_shape)?;
let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?;
let attn_weights = match attn_mask {
None => attn_weights,
Some(attn_mask) => attn_weights.broadcast_add(attn_mask)?,
};
let attn_probs = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_probs.matmul(&value_states)?;
attn_output
.reshape((b_sz, self.num_heads, tgt_len, self.head_dim))?
.transpose(1, 2)?
.reshape((b_sz, tgt_len, self.head_dim * self.num_heads))?
.apply(&self.out_proj)
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct EncoderLayer {
self_attn: Attention,
self_attn_layer_norm: LayerNorm,
activation_fn: candle_nn::Activation,
fc1: Linear,
fc2: Linear,
final_layer_norm: LayerNorm,
}
impl EncoderLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, true, vb.pp("self_attn"))?;
let self_attn_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("self_attn_layer_norm"))?;
let fc1 = linear(cfg.d_model, cfg.encoder_ffn_dim, vb.pp("fc1"))?;
let fc2 = linear(cfg.encoder_ffn_dim, cfg.d_model, vb.pp("fc2"))?;
let final_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("final_layer_norm"))?;
Ok(Self {
self_attn,
self_attn_layer_norm,
activation_fn: cfg.activation_function,
fc1,
fc2,
final_layer_norm,
})
}
fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = (self.self_attn.forward(xs, None, None)? + residual)?
.apply(&self.self_attn_layer_norm)?;
let residual = &xs;
let xs = xs
.apply(&self.fc1)?
.apply(&self.activation_fn)?
.apply(&self.fc2)?;
(xs + residual)?.apply(&self.final_layer_norm)
}
fn reset_kv_cache(&mut self) {
self.self_attn.reset_kv_cache()
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
self_attn_layer_norm: LayerNorm,
activation_fn: candle_nn::Activation,
encoder_attn: Attention,
encoder_attn_layer_norm: LayerNorm,
fc1: Linear,
fc2: Linear,
final_layer_norm: LayerNorm,
}
impl DecoderLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, true, vb.pp("self_attn"))?;
let self_attn_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("self_attn_layer_norm"))?;
let encoder_attn = Attention::new(cfg, true, vb.pp("encoder_attn"))?;
let encoder_attn_layer_norm =
layer_norm(cfg.d_model, 1e-5, vb.pp("encoder_attn_layer_norm"))?;
let fc1 = linear(cfg.d_model, cfg.decoder_ffn_dim, vb.pp("fc1"))?;
let fc2 = linear(cfg.decoder_ffn_dim, cfg.d_model, vb.pp("fc2"))?;
let final_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("final_layer_norm"))?;
Ok(Self {
self_attn,
self_attn_layer_norm,
activation_fn: cfg.activation_function,
encoder_attn,
encoder_attn_layer_norm,
fc1,
fc2,
final_layer_norm,
})
}
fn forward(
&mut self,
xs: &Tensor,
encoder_xs: Option<&Tensor>,
attn_mask: &Tensor,
) -> Result<Tensor> {
let residual = xs;
let xs = (self.self_attn.forward(xs, None, Some(attn_mask))? + residual)?
.apply(&self.self_attn_layer_norm)?;
let xs = match encoder_xs {
None => xs,
Some(encoder_xs) => {
let residual = &xs;
let xs = self.encoder_attn.forward(&xs, Some(encoder_xs), None)?;
(residual + xs)?.apply(&self.encoder_attn_layer_norm)?
}
};
let residual = &xs;
let xs = xs
.apply(&self.fc1)?
.apply(&self.activation_fn)?
.apply(&self.fc2)?;
let xs = (xs + residual)?.apply(&self.final_layer_norm)?;
Ok(xs)
}
fn reset_kv_cache(&mut self) {
self.self_attn.reset_kv_cache();
self.encoder_attn.reset_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
embed_tokens: Embedding,
embed_positions: SinusoidalPositionalEmbedding,
layers: Vec<EncoderLayer>,
embed_scale: Option<f64>,
}
impl Encoder {
fn new(cfg: &Config, embed_tokens: &Embedding, vb: VarBuilder) -> Result<Self> {
let embed_positions = SinusoidalPositionalEmbedding::new(cfg, vb.pp("embed_positions"))?;
let mut layers = Vec::with_capacity(cfg.encoder_layers);
let vb_l = vb.pp("layers");
for idx in 0..cfg.encoder_layers {
let layer = EncoderLayer::new(cfg, vb_l.pp(idx))?;
layers.push(layer)
}
let embed_scale = if cfg.scale_embedding {
Some((cfg.d_model as f64).sqrt())
} else {
None
};
Ok(Self {
embed_tokens: embed_tokens.clone(),
embed_positions,
layers,
embed_scale,
})
}
pub fn forward(&mut self, xs: &Tensor, past_kv_len: usize) -> Result<Tensor> {
let xs = xs.apply(&self.embed_tokens)?;
let xs = match self.embed_scale {
None => xs,
Some(scale) => (xs * scale)?,
};
let embed_pos = self
.embed_positions
.forward(&xs, past_kv_len)?
.unsqueeze(0)?;
let mut xs = xs.broadcast_add(&embed_pos)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs)?
}
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.reset_kv_cache()
}
}
}
#[derive(Debug, Clone)]
pub struct Decoder {
embed_tokens: Embedding,
embed_positions: SinusoidalPositionalEmbedding,
layers: Vec<DecoderLayer>,
embed_scale: Option<f64>,
}
impl Decoder {
fn new(cfg: &Config, embed_tokens: &Embedding, vb: VarBuilder) -> Result<Self> {
let embed_positions = SinusoidalPositionalEmbedding::new(cfg, vb.pp("embed_positions"))?;
let mut layers = Vec::with_capacity(cfg.decoder_layers);
let vb_l = vb.pp("layers");
for idx in 0..cfg.decoder_layers {
let layer = DecoderLayer::new(cfg, vb_l.pp(idx))?;
layers.push(layer)
}
let embed_scale = if cfg.scale_embedding {
Some((cfg.d_model as f64).sqrt())
} else {
None
};
Ok(Self {
embed_tokens: embed_tokens.clone(),
embed_positions,
layers,
embed_scale,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
encoder_xs: Option<&Tensor>,
past_kv_len: usize,
attn_mask: &Tensor,
) -> Result<Tensor> {
let xs = xs.apply(&self.embed_tokens)?;
let xs = match self.embed_scale {
None => xs,
Some(scale) => (xs * scale)?,
};
let embed_pos = self
.embed_positions
.forward(&xs, past_kv_len)?
.unsqueeze(0)?;
let mut xs = xs.broadcast_add(&embed_pos)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, encoder_xs, attn_mask)?;
}
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.reset_kv_cache()
}
}
}
#[derive(Debug, Clone)]
struct Model {
shared: Embedding,
encoder: Encoder,
decoder: Decoder,
}
impl Model {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("shared"))?;
let encoder = Encoder::new(cfg, &shared, vb.pp("encoder"))?;
let decoder = Decoder::new(cfg, &shared, vb.pp("decoder"))?;
Ok(Self {
shared,
encoder,
decoder,
})
}
fn reset_kv_cache(&mut self) {
self.encoder.reset_kv_cache();
self.decoder.reset_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct MTModel {
model: Model,
lm_head: Linear,
final_logits_bias: Tensor,
}
impl MTModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let target_vocab_size = cfg.decoder_vocab_size.unwrap_or(cfg.vocab_size);
let final_logits_bias = vb.get((1, target_vocab_size), "final_logits_bias")?;
let model = Model::new(cfg, vb.pp("model"))?;
let lm_head = Linear::from_weights(model.shared.embeddings().clone(), None);
Ok(Self {
model,
lm_head,
final_logits_bias,
})
}
pub fn encoder(&mut self) -> &mut Encoder {
&mut self.model.encoder
}
pub fn decoder(&mut self) -> &mut Decoder {
&mut self.model.decoder
}
pub fn decode(
&mut self,
xs: &Tensor,
encoder_xs: &Tensor,
past_kv_len: usize,
) -> Result<Tensor> {
let seq_len = xs.dim(1)?;
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_vec(mask, (seq_len, seq_len), xs.device())?;
self.model
.decoder
.forward(xs, Some(encoder_xs), past_kv_len, &mask)?
.apply(&self.lm_head)?
.broadcast_add(&self.final_logits_bias)
}
pub fn reset_kv_cache(&mut self) {
self.model.reset_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/distilbert.rs | candle-transformers/src/models/distilbert.rs | //! Implementation of DistilBert, a distilled version of BERT.
//!
//! See:
//! - ["DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"](https://arxiv.org/abs/1910.01108)
//!
use super::with_tracing::{layer_norm, linear, LayerNorm, Linear};
use candle::{DType, Device, Result, Tensor};
use candle_nn::{Embedding, Module, VarBuilder};
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HiddenAct {
Gelu,
Relu,
}
struct HiddenActLayer {
act: HiddenAct,
span: tracing::Span,
}
impl HiddenActLayer {
fn new(act: HiddenAct) -> Self {
let span = tracing::span!(tracing::Level::TRACE, "hidden-act");
Self { act, span }
}
}
impl Module for HiddenActLayer {
fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> {
let _enter = self.span.enter();
match self.act {
// https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213
HiddenAct::Gelu => xs.gelu(),
HiddenAct::Relu => xs.relu(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum PositionEmbeddingType {
#[default]
Absolute,
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub dim: usize,
n_layers: usize,
n_heads: usize,
hidden_dim: usize,
activation: HiddenAct,
max_position_embeddings: usize,
initializer_range: f64,
pub pad_token_id: usize,
#[serde(default)]
position_embedding_type: PositionEmbeddingType,
#[serde(default)]
use_cache: bool,
model_type: Option<String>,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 30522,
dim: 768,
n_layers: 12,
n_heads: 12,
hidden_dim: 3072,
activation: HiddenAct::Gelu,
max_position_embeddings: 512,
initializer_range: 0.02,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
model_type: Some("distilbert".to_string()),
}
}
}
struct Embeddings {
word_embeddings: Embedding,
position_embeddings: Embedding,
layer_norm: LayerNorm,
span: tracing::Span,
}
impl Embeddings {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let word_embeddings =
candle_nn::embedding(config.vocab_size, config.dim, vb.pp("word_embeddings"))?;
let position_embeddings = candle_nn::embedding(
config.max_position_embeddings,
config.dim,
vb.pp("position_embeddings"),
)?;
let layer_norm = layer_norm(config.dim, 1e-12, vb.pp("LayerNorm"))?;
Ok(Self {
word_embeddings,
position_embeddings,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_bsize, seq_len) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let position_ids = (0..seq_len as u32).collect::<Vec<_>>();
let position_ids = Tensor::new(&position_ids[..], input_ids.device())?;
let embeddings =
input_embeddings.broadcast_add(&self.position_embeddings.forward(&position_ids)?)?;
let embeddings = self.layer_norm.forward(&embeddings)?;
Ok(embeddings)
}
}
struct MultiHeadSelfAttention {
q_lin: Linear,
k_lin: Linear,
v_lin: Linear,
out_lin: Linear,
n_heads: usize,
attention_head_size: usize,
span: tracing::Span,
}
impl MultiHeadSelfAttention {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention_head_size = config.dim / config.n_heads;
let all_head_size = config.n_heads * attention_head_size;
let dim = config.dim;
let q_lin = linear(dim, all_head_size, vb.pp("q_lin"))?;
let v_lin = linear(dim, all_head_size, vb.pp("v_lin"))?;
let k_lin = linear(dim, all_head_size, vb.pp("k_lin"))?;
let out_lin = linear(all_head_size, dim, vb.pp("out_lin"))?;
Ok(Self {
q_lin,
k_lin,
v_lin,
out_lin,
n_heads: config.n_heads,
attention_head_size,
span: tracing::span!(tracing::Level::TRACE, "attention"),
})
}
}
impl MultiHeadSelfAttention {
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (bs, q_length, _dim) = hidden_states.dims3()?;
let dim_per_head = self.attention_head_size;
let q = self.q_lin.forward(hidden_states)?;
let k = self.k_lin.forward(hidden_states)?;
let v = self.v_lin.forward(hidden_states)?;
let q = q
.reshape((bs, q_length, self.n_heads, dim_per_head))?
.transpose(1, 2)?;
let k = k
.reshape((bs, q_length, self.n_heads, dim_per_head))?
.transpose(1, 2)?;
let v = v
.reshape((bs, q_length, self.n_heads, dim_per_head))?
.transpose(1, 2)?;
let q: Tensor = (q / (dim_per_head as f64).sqrt())?;
let scores = q.matmul(&k.transpose(2, 3)?.contiguous()?)?;
let mask = attention_mask.broadcast_as(scores.shape())?;
let scores = masked_fill(&scores.to_dtype(DType::F32)?, &mask, f32::NEG_INFINITY)?;
let weights = candle_nn::ops::softmax(&scores, candle::D::Minus1)?;
let context = weights.matmul(&v.contiguous()?)?;
let context = context
.transpose(1, 2)?
.reshape((bs, q_length, self.n_heads * dim_per_head))?
.contiguous()?;
let context = self.out_lin.forward(&context)?;
Ok(context)
}
}
#[allow(clippy::upper_case_acronyms)]
struct FFN {
lin1: Linear,
lin2: Linear,
activation: HiddenActLayer,
span: tracing::Span,
}
impl FFN {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let lin1 = linear(config.dim, config.hidden_dim, vb.pp("lin1"))?;
let lin2 = linear(config.hidden_dim, config.dim, vb.pp("lin2"))?;
Ok(Self {
lin1,
lin2,
activation: HiddenActLayer::new(config.activation),
span: tracing::span!(tracing::Level::TRACE, "ffn"),
})
}
}
impl Module for FFN {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
hidden_states
.apply(&self.lin1)?
.apply(&self.activation)?
.apply(&self.lin2)
}
}
struct TransformerBlock {
attention: MultiHeadSelfAttention,
sa_layer_norm: LayerNorm,
ffn: FFN,
output_layer_norm: LayerNorm,
span: tracing::Span,
}
impl TransformerBlock {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention = MultiHeadSelfAttention::load(vb.pp("attention"), config)?;
let sa_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("sa_layer_norm"))?;
let ffn = FFN::load(vb.pp("ffn"), config)?;
let output_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("output_layer_norm"))?;
Ok(Self {
attention,
sa_layer_norm,
ffn,
output_layer_norm,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
}
impl TransformerBlock {
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let sa_output = self.attention.forward(hidden_states, attention_mask)?;
// TODO: Support cross-attention?
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523
// TODO: Support something similar to `apply_chunking_to_forward`?
let sa_output = sa_output.broadcast_add(hidden_states)?;
let sa_output = self.sa_layer_norm.forward(&sa_output)?;
let ffn_output = self.ffn.forward(&sa_output)?;
let ffn_output = (&ffn_output + sa_output)?;
let output = self.output_layer_norm.forward(&ffn_output)?;
Ok(output)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556
struct Transformer {
layers: Vec<TransformerBlock>,
span: tracing::Span,
}
impl Transformer {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let layers = (0..config.n_layers)
.map(|index| TransformerBlock::load(vb.pp(format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
Ok(Transformer { layers, span })
}
}
impl Transformer {
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut hidden_states = hidden_states.clone();
// Use a loop rather than a fold as it's easier to modify when adding debug/...
for layer in self.layers.iter() {
hidden_states = layer.forward(&hidden_states, attention_mask)?;
}
Ok(hidden_states)
}
}
pub struct DistilBertModel {
embeddings: Embeddings,
transformer: Transformer,
pub device: Device,
span: tracing::Span,
}
impl DistilBertModel {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let (embeddings, transformer) = match (
Embeddings::load(vb.pp("embeddings"), config),
Transformer::load(vb.pp("transformer"), config),
) {
(Ok(embeddings), Ok(encoder)) => (embeddings, encoder),
(Err(err), _) | (_, Err(err)) => {
if let Some(model_type) = &config.model_type {
if let (Ok(embeddings), Ok(encoder)) = (
Embeddings::load(vb.pp(format!("{model_type}.embeddings")), config),
Transformer::load(vb.pp(format!("{model_type}.transformer")), config),
) {
(embeddings, encoder)
} else {
return Err(err);
}
} else {
return Err(err);
}
}
};
Ok(Self {
embeddings,
transformer,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
pub fn forward(&self, input_ids: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let embedding_output = self.embeddings.forward(input_ids)?;
let sequence_output = self
.transformer
.forward(&embedding_output, attention_mask)?;
Ok(sequence_output)
}
}
struct DistilBertPredictionHeadTransform {
dense: Linear,
activation: HiddenActLayer,
layer_norm: LayerNorm,
}
impl DistilBertPredictionHeadTransform {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.dim, config.dim, vb.pp("vocab_transform"))?;
let activation = HiddenActLayer::new(config.activation);
let layer_norm = layer_norm(config.dim, 1e-12, vb.pp("vocab_layer_norm"))?;
Ok(Self {
dense,
activation,
layer_norm,
})
}
}
impl Module for DistilBertPredictionHeadTransform {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let hidden_states = self
.activation
.forward(&self.dense.forward(hidden_states)?)?;
self.layer_norm.forward(&hidden_states)
}
}
// https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L769C1-L790C1
pub struct DistilBertLMPredictionHead {
transform: DistilBertPredictionHeadTransform,
decoder: Linear,
}
impl DistilBertLMPredictionHead {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let transform = DistilBertPredictionHeadTransform::load(vb.clone(), config)?;
// distil_bert_uncased uses the word embeddings for the vocab projector weight, but has a separate vocab_projector bias
let vocab_projector_weight_vb = vb.pp("distilbert.embeddings.word_embeddings");
let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL;
let ws = vocab_projector_weight_vb.get_with_hints(
(config.vocab_size, config.dim),
"weight",
init_ws,
)?;
let bound = 1. / (config.dim as f64).sqrt();
let init_bs = candle_nn::Init::Uniform {
lo: -bound,
up: bound,
};
let vocab_projector_bias_vb = vb.pp("vocab_projector");
let bs = vocab_projector_bias_vb.get_with_hints(config.vocab_size, "bias", init_bs)?;
let decoder = Linear::from_weights(ws, Some(bs));
Ok(Self { transform, decoder })
}
}
impl Module for DistilBertLMPredictionHead {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
self.decoder
.forward(&self.transform.forward(hidden_states)?)
}
}
// https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L792
pub struct DistilBertOnlyMLMHead {
predictions: DistilBertLMPredictionHead,
}
impl DistilBertOnlyMLMHead {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let predictions = DistilBertLMPredictionHead::load(vb.clone(), config)?;
Ok(Self { predictions })
}
}
impl Module for DistilBertOnlyMLMHead {
fn forward(&self, sequence_output: &Tensor) -> Result<Tensor> {
self.predictions.forward(sequence_output)
}
}
pub struct DistilBertForMaskedLM {
pub bert: DistilBertModel,
cls: DistilBertOnlyMLMHead,
}
impl DistilBertForMaskedLM {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let bert = DistilBertModel::load(vb.pp("distilbert"), config)?;
let cls = DistilBertOnlyMLMHead::load(vb.clone(), config)?;
Ok(Self { bert, cls })
}
pub fn forward(&self, input_ids: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let sequence_output = self.bert.forward(input_ids, attention_mask)?;
self.cls.forward(&sequence_output)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_llama.rs | candle-transformers/src/models/quantized_llama.rs | //! Quantized llama model implementation.
//!
//! This provides a quantized implementation of the llama language model architecture.
//! The model implements parameter efficient quantization for reduced memory usage
//! while maintaining model quality.
//!
//! Key characteristics:
//! - Transformer decoder architecture
//! - Support for 2/3/4/8-bit quantization
//! - Optimized memory usage through quantization
//! - Configurable model sizes and parameter counts
//!
//! - 💻 [GH Link](https://github.com/facebookresearch/llama)
//! - 📝 [Paper](https://arxiv.org/abs/2302.13971)
//!
//! 
//!
use std::collections::HashMap;
use crate::quantized_nn::RmsNorm;
use candle::quantized::QTensor;
use candle::quantized::{ggml_file, gguf_file};
use candle::{DType, Device, IndexOp, Result, Tensor};
use candle_nn::{Embedding, Module};
pub const MAX_SEQ_LEN: usize = 4096;
// QMatMul wrapper adding some tracing.
#[derive(Debug, Clone)]
struct QMatMul {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QMatMul {
fn from_qtensor(qtensor: QTensor) -> Result<Self> {
let inner = candle::quantized::QMatMul::from_qtensor(qtensor)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
struct Mlp {
feed_forward_w1: QMatMul,
feed_forward_w2: QMatMul,
feed_forward_w3: QMatMul,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let w1 = self.feed_forward_w1.forward(xs)?;
let w3 = self.feed_forward_w3.forward(xs)?;
self.feed_forward_w2
.forward(&(candle_nn::ops::silu(&w1)? * w3)?)
}
}
#[derive(Debug, Clone)]
enum MlpOrMoe {
Mlp(Mlp),
MoE {
n_expert_used: usize,
feed_forward_gate_inp: QMatMul,
experts: Vec<Mlp>,
},
}
impl Module for MlpOrMoe {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::MoE {
feed_forward_gate_inp,
experts,
n_expert_used,
} => {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = feed_forward_gate_inp.forward(&xs)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// In order to extract topk, we extract the data from the tensor and manipulate it
// directly. Maybe we will want to use some custom ops instead at some point.
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
// routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
// top_x contains the row indexes to evaluate for each expert.
let mut top_x = vec![vec![]; experts.len()];
let mut selected_rws = vec![vec![]; experts.len()];
for (row_idx, rw) in routing_weights.iter().enumerate() {
let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>();
dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize]));
let mut sum_routing_weights = 0f32;
for &expert_idx in dst.iter().take(*n_expert_used) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
sum_routing_weights += routing_weight;
top_x[expert_idx].push(row_idx as u32);
}
for &expert_idx in dst.iter().take(*n_expert_used) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
selected_rws[expert_idx].push(routing_weight / sum_routing_weights)
}
}
// routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
// expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_rws =
Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())?
.reshape(((), 1))?;
// Index the correct hidden states and compute the expert hidden state for
// the current expert. We need to make sure to multiply the output hidden
// states by `routing_weights` on the corresponding tokens (top-1 and top-2)
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
// current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None])
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states =
current_hidden_states.broadcast_mul(&selected_rws)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
let ys = ys.reshape((b_size, seq_len, hidden_dim))?;
Ok(ys)
}
Self::Mlp(mlp) => mlp.forward(xs),
}
}
}
#[derive(Debug, Clone)]
struct LayerWeights {
attention_wq: QMatMul,
attention_wk: QMatMul,
attention_wv: QMatMul,
attention_wo: QMatMul,
attention_norm: RmsNorm,
mlp_or_moe: MlpOrMoe,
ffn_norm: RmsNorm,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
cos: Tensor,
sin: Tensor,
neg_inf: Tensor,
kv_cache: Option<(Tensor, Tensor)>,
span_attn: tracing::Span,
span_rot: tracing::Span,
span_mlp: tracing::Span,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> {
let shape = mask.shape();
let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?;
Ok(m)
}
impl LayerWeights {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _n_head, seq_len, _n_embd) = x.dims4()?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
// The call to contiguous below is only necessary when processing the prompt.
// When the seq_len is 1 in the inference loop, this is a no-op.
candle_nn::rotary_emb::rope_i(&x.contiguous()?, &cos, &sin)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, n_embd) = x.dims3()?;
let q = self.attention_wq.forward(x)?;
let k = self.attention_wk.forward(x)?;
let v = self.attention_wv.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
// This call to contiguous ensures that the fast kernel can be called below. It's
// actually a no-op except when processing the initial prompt so has no significant
// impact on performance.
.contiguous()?;
let q = self.apply_rotary_emb(&q, index_pos)?;
let k = self.apply_rotary_emb(&k, index_pos)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((k_cache, v_cache)) => {
if index_pos == 0 {
(k, v)
} else {
let k = Tensor::cat(&[k_cache, &k], 2)?;
let v = Tensor::cat(&[v_cache, &v], 2)?;
(k, v)
}
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let y = if q.device().is_metal() && seq_len == 1 {
// SDPA will do MQA for us
candle_nn::ops::sdpa(
&q,
&k,
&v,
None,
false,
1. / (self.head_dim as f32).sqrt(),
1.,
)?
} else {
// Support for MQA, useful for 70B models and mistral.
let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = match mask {
None => att,
Some(mask) => {
let mask = mask.broadcast_as(att.shape())?;
masked_fill(&att, &mask, &self.neg_inf)?
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.attention_wo.forward(&y)?;
Ok(y)
}
}
#[derive(Debug, Clone)]
pub struct ModelWeights {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
norm: RmsNorm,
output: QMatMul,
masks: HashMap<usize, Tensor>,
span: tracing::Span,
span_output: tracing::Span,
}
fn precomput_freqs_cis(
head_dim: usize,
freq_base: f32,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)?
.to_dtype(DType::F32)?
.reshape((MAX_SEQ_LEN, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok((cos, sin))
}
impl ModelWeights {
pub fn from_ggml(mut ct: ggml_file::Content, gqa: usize) -> Result<Self> {
let head_dim = (ct.hparams.n_embd / ct.hparams.n_head) as usize;
let (cos, sin) = precomput_freqs_cis(head_dim, 10000., &ct.device)?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, &ct.device)?;
let tok_embeddings = ct.remove("tok_embeddings.weight")?;
let tok_embeddings = tok_embeddings.dequantize(&ct.device)?;
let norm = RmsNorm::from_qtensor(ct.remove("norm.weight")?, 1e-5)?;
let output = ct.remove("output.weight")?;
let mut layers = Vec::with_capacity(ct.hparams.n_layer as usize);
for layer_idx in 0..ct.hparams.n_layer {
let prefix = format!("layers.{layer_idx}");
let attention_wq = ct.remove(&format!("{prefix}.attention.wq.weight"))?;
let attention_wk = ct.remove(&format!("{prefix}.attention.wk.weight"))?;
let attention_wv = ct.remove(&format!("{prefix}.attention.wv.weight"))?;
let attention_wo = ct.remove(&format!("{prefix}.attention.wo.weight"))?;
let mlp_or_moe = {
let feed_forward_w1 = ct.remove(&format!("{prefix}.feed_forward.w1.weight"))?;
let feed_forward_w2 = ct.remove(&format!("{prefix}.feed_forward.w2.weight"))?;
let feed_forward_w3 = ct.remove(&format!("{prefix}.feed_forward.w3.weight"))?;
MlpOrMoe::Mlp(Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
})
};
let attention_norm = ct.remove(&format!("{prefix}.attention_norm.weight"))?;
let ffn_norm = ct.remove(&format!("{prefix}.ffn_norm.weight"))?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp");
layers.push(LayerWeights {
attention_wq: QMatMul::from_qtensor(attention_wq)?,
attention_wk: QMatMul::from_qtensor(attention_wk)?,
attention_wv: QMatMul::from_qtensor(attention_wv)?,
attention_wo: QMatMul::from_qtensor(attention_wo)?,
attention_norm: RmsNorm::from_qtensor(attention_norm, 1e-5)?,
mlp_or_moe,
ffn_norm: RmsNorm::from_qtensor(ffn_norm, 1e-5)?,
n_head: ct.hparams.n_head as usize,
n_kv_head: ct.hparams.n_head as usize / gqa,
head_dim: (ct.hparams.n_embd / ct.hparams.n_head) as usize,
cos: cos.clone(),
sin: sin.clone(),
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_rot,
span_mlp,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, ct.hparams.n_embd as usize),
layers,
norm,
output: QMatMul::from_qtensor(output)?,
masks: HashMap::new(),
span,
span_output,
})
}
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let md_get = |s: &str| match ct.metadata.get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
// Parameter extraction from metadata.
let n_expert = md_get("llama.expert_count")
.and_then(|v| v.to_u32())
.unwrap_or(0) as usize;
let n_expert_used = md_get("llama.expert_used_count")
.and_then(|v| v.to_u32())
.unwrap_or(0) as usize;
let head_count = md_get("llama.attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("llama.attention.head_count_kv")?.to_u32()? as usize;
let block_count = md_get("llama.block_count")?.to_u32()? as usize;
let embedding_length = md_get("llama.embedding_length")?.to_u32()? as usize;
let rope_dim = md_get("llama.rope.dimension_count")?.to_u32()? as usize;
// Strangely this value is generally 1e-6 in GGUF file but used to be 1e-5 by default.
let rms_norm_eps = md_get("llama.attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let rope_freq_base = md_get("llama.rope.freq_base")
.and_then(|m| m.to_f32())
.unwrap_or(10000f32);
let (cos, sin) = precomput_freqs_cis(rope_dim, rope_freq_base, device)?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
let tok_embeddings_q = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings_q.dequantize(device)?;
let norm = RmsNorm::from_qtensor(
ct.tensor(reader, "output_norm.weight", device)?,
rms_norm_eps,
)?;
let output = match ct.tensor(reader, "output.weight", device) {
Ok(tensor) => tensor,
Err(_) => tok_embeddings_q,
};
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?;
let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?;
let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?;
let attention_wo =
ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?;
let mlp_or_moe = if n_expert <= 1 {
let feed_forward_w1 =
ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?;
let feed_forward_w2 =
ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?;
let feed_forward_w3 =
ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?;
MlpOrMoe::Mlp(Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
})
} else {
let feed_forward_gate_inp =
ct.tensor(reader, &format!("{prefix}.ffn_gate_inp.weight"), device)?;
let mut experts = Vec::with_capacity(n_expert);
for i in 0..n_expert {
let feed_forward_w1 =
ct.tensor(reader, &format!("{prefix}.ffn_gate.{i}.weight"), device)?;
let feed_forward_w2 =
ct.tensor(reader, &format!("{prefix}.ffn_down.{i}.weight"), device)?;
let feed_forward_w3 =
ct.tensor(reader, &format!("{prefix}.ffn_up.{i}.weight"), device)?;
experts.push(Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
})
}
MlpOrMoe::MoE {
n_expert_used,
feed_forward_gate_inp: QMatMul::from_qtensor(feed_forward_gate_inp)?,
experts,
}
};
let attention_norm =
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?;
let ffn_norm = ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp");
layers.push(LayerWeights {
attention_wq: QMatMul::from_qtensor(attention_wq)?,
attention_wk: QMatMul::from_qtensor(attention_wk)?,
attention_wv: QMatMul::from_qtensor(attention_wv)?,
attention_wo: QMatMul::from_qtensor(attention_wo)?,
attention_norm: RmsNorm::from_qtensor(attention_norm, rms_norm_eps)?,
mlp_or_moe,
ffn_norm: RmsNorm::from_qtensor(ffn_norm, rms_norm_eps)?,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim: embedding_length / head_count,
cos: cos.clone(),
sin: sin.clone(),
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_rot,
span_mlp,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
norm,
output: QMatMul::from_qtensor(output)?,
masks: HashMap::new(),
span,
span_output,
})
}
fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mask = if seq_len == 1 {
None
} else {
Some(self.mask(seq_len, x.device())?)
};
let _enter = self.span.enter();
let mut layer_in = self.tok_embeddings.forward(x)?;
for layer in self.layers.iter_mut() {
let x = layer_in;
let residual = &x;
let x = layer.attention_norm.forward(&x)?;
let attn = layer.forward_attn(&x, mask.as_ref(), index_pos)?;
let x = (attn + residual)?;
// MLP
let _enter = layer.span_mlp.enter();
let residual = &x;
let x = layer.ffn_norm.forward(&x)?;
let x = layer.mlp_or_moe.forward(&x)?;
let x = (x + residual)?;
layer_in = x
}
let x = self.norm.forward(&layer_in)?;
let x = x.i((.., seq_len - 1, ..))?;
let _enter = self.span_output.enter();
self.output.forward(&x)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/snac.rs | candle-transformers/src/models/snac.rs | #![allow(unused)]
//! Implementation of the Multi-Scale Neural Audio Codec (SNAC)
//!
//! See: [SNAC](https://github.com/hubertsiuzdak/snac)
//!
/// Multi-Scale Neural Audio Codec (SNAC) compresses audio into discrete codes at a low bitrate.
/// For more information, read the paper: https://arxiv.org/abs/2410.14411
///
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{
linear_b, Conv1d, Conv1dConfig, ConvTranspose1d, ConvTranspose1dConfig, LayerNorm, Linear,
VarBuilder,
};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub sampling_rate: usize,
pub encoder_dim: usize,
pub encoder_rates: Vec<usize>,
pub decoder_dim: usize,
pub decoder_rates: Vec<usize>,
pub attn_window_size: Option<usize>,
pub codebook_size: usize,
pub codebook_dim: usize,
pub vq_strides: Vec<usize>,
pub noise: bool,
pub depthwise: bool,
}
// Equivalent to torch.repeat_interleave
pub fn repeat_interleave<D: candle::shape::Dim>(
img: &Tensor,
repeats: usize,
dim: D,
) -> Result<Tensor> {
if repeats == 1 {
return Ok(img.clone());
}
let dim = dim.to_index(img.shape(), "chunk")?;
let img = img.unsqueeze(dim + 1)?;
let mut dims = img.dims().to_vec();
dims[dim + 1] = repeats;
img.broadcast_as(dims)?.flatten(dim, dim + 1)
}
pub fn conv1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "parametrizations.weight.original0")?;
let weight_v = {
let name = "parametrizations.weight.original1";
match vb.get((out_c, in_c, kernel_size), name) {
Ok(v) => v,
Err(_) => vb.get((out_c, 1, kernel_size), name)?,
}
};
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = vb.get(out_c, "bias")?;
Ok(Conv1d::new(weight, Some(bias), config))
}
pub fn conv1d_weight_norm_no_bias(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "parametrizations.weight.original0")?;
let weight_v = {
let name = "parametrizations.weight.original1";
match vb.get((out_c, in_c, kernel_size), name) {
Ok(v) => v,
Err(_) => vb.get((out_c, 1, kernel_size), name)?,
}
};
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
Ok(Conv1d::new(weight, None, config))
}
pub fn conv_transpose1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
bias: bool,
config: candle_nn::ConvTranspose1dConfig,
vb: VarBuilder,
) -> Result<ConvTranspose1d> {
let weight_g = vb.get((in_c, 1, 1), "parametrizations.weight.original0")?;
let weight_v = vb.get(
(in_c, out_c, kernel_size),
"parametrizations.weight.original1",
)?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = if bias {
Some(vb.get(out_c, "bias")?)
} else {
None
};
Ok(ConvTranspose1d::new(weight, bias, config))
}
// https://github.com/hubertsiuzdak/snac/blob/main/snac/attention.py
#[allow(unused)]
#[derive(Debug, Clone)]
struct SinusoidalEmbeddings {
inv_freq: Tensor,
scale: Tensor,
scale_base: f32,
use_xpos: bool,
}
impl SinusoidalEmbeddings {
fn new(dim: usize, scale_base: f32, use_xpos: bool, dev: &Device) -> Result<Self> {
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10_000f32.powf(i as f32 / dim as f32))
.collect();
let len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, len, dev)?.to_dtype(DType::F32)?;
let scale: Vec<_> = (0..dim)
.step_by(2)
.map(|i| (i as f32 + 0.4 * dim as f32) / (1.4 * dim as f32))
.collect();
let scale = Tensor::from_vec(scale, len, dev)?.to_dtype(DType::F32)?;
Ok(Self {
inv_freq,
scale,
scale_base,
use_xpos,
})
}
}
#[allow(unused)]
#[derive(Debug, Clone)]
struct LocalMHA {
norm: LayerNorm,
to_qkv: Linear,
to_out: Linear,
num_heads: usize,
head_dim: usize,
rel_pos: Option<SinusoidalEmbeddings>,
}
impl LocalMHA {
fn new(
dim: usize,
window_size: usize,
dim_head: usize,
use_rotary_pos_emb: bool,
vb: VarBuilder,
) -> Result<Self> {
let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?;
let to_qkv = linear_b(dim, dim * 3, false, vb.pp("to_qkv"))?;
let to_out = linear_b(dim, dim, false, vb.pp("to_out"))?;
let rel_pos = if use_rotary_pos_emb {
let rel_pos =
SinusoidalEmbeddings::new(dim_head, window_size as f32 / 2.0, false, vb.device())?;
Some(rel_pos)
} else {
None
};
Ok(Self {
norm,
to_qkv,
to_out,
rel_pos,
num_heads: dim / dim_head,
head_dim: dim_head,
})
}
}
impl Module for LocalMHA {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, c, t) = xs.dims3()?;
let residual = xs.clone();
let xs = xs.transpose(1, 2)?.apply(&self.norm)?;
let qkv = xs.apply(&self.to_qkv)?;
let q = qkv.narrow(D::Minus1, 0, c)?;
let k = qkv.narrow(D::Minus1, c, c)?;
let v = qkv.narrow(D::Minus1, 2 * c, c)?;
let q = q
.reshape((b, t, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b, t, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let v = v
.reshape((b, t, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let (q, k) = match self.rel_pos {
Some(_) => todo!(),
None => (q, k),
};
let out = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
// Non-causal attention
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&v)?
};
let out = out
.transpose(1, 2)?
.reshape((b, t, self.num_heads * self.head_dim))?
.apply(&self.to_out)?;
out.transpose(1, 2)? + residual
}
}
#[derive(Debug, Clone)]
struct Snake1d {
alpha: Tensor,
}
impl Snake1d {
pub fn new(channels: usize, vb: VarBuilder) -> Result<Self> {
let alpha = vb.get((1, channels, 1), "alpha")?;
Ok(Self { alpha })
}
}
impl Module for Snake1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs_shape = xs.shape();
let xs = xs.flatten_from(2)?;
let sin = self.alpha.broadcast_mul(&xs)?.sin()?;
let sin = (&sin * &sin)?;
(xs + (&self.alpha + 1e-9)?.recip()?.broadcast_mul(&sin)?)?.reshape(xs_shape)
}
}
#[derive(Debug, Clone)]
struct ResidualUnit {
snake1: Snake1d,
conv1: Conv1d,
snake2: Snake1d,
conv2: Conv1d,
}
impl ResidualUnit {
fn new(
dim: usize,
dilation: usize,
kernel: usize,
groups: usize,
vb: VarBuilder,
) -> Result<Self> {
let pad = ((kernel - 1) * dilation) / 2;
let vb = vb.pp("block");
let snake1 = Snake1d::new(dim, vb.pp(0))?;
let cfg1 = Conv1dConfig {
dilation,
padding: pad,
groups,
..Default::default()
};
let conv1 = conv1d_weight_norm(dim, dim, 7, cfg1, vb.pp(1))?;
let snake2 = Snake1d::new(dim, vb.pp(2))?;
let conv2 = conv1d_weight_norm(dim, dim, 1, Default::default(), vb.pp(3))?;
Ok(Self {
snake1,
conv1,
snake2,
conv2,
})
}
}
impl Module for ResidualUnit {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs
.apply(&self.snake1)?
.apply(&self.conv1)?
.apply(&self.snake2)?
.apply(&self.conv2)?;
let pad = (xs.dim(D::Minus1)? - ys.dim(D::Minus1)?) / 2;
if pad > 0 {
&ys + xs.narrow(D::Minus1, pad, ys.dim(D::Minus1)?)
} else {
ys + xs
}
}
}
#[derive(Debug, Clone)]
struct NoiseBlock {
linear: Conv1d,
}
impl NoiseBlock {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let linear = conv1d_weight_norm_no_bias(dim, dim, 1, Default::default(), vb.pp("linear"))?;
Ok(Self { linear })
}
}
impl Module for NoiseBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, _c, t) = xs.dims3()?;
let noise = Tensor::randn(0f32, 1f32, (b, 1, t), xs.device())?;
let h = xs.apply(&self.linear)?;
let n = noise.broadcast_mul(&h)?;
let xs = (xs + n)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct DecoderBlock {
snake1: Snake1d,
conv_tr1: ConvTranspose1d,
noise: Option<NoiseBlock>,
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
}
impl DecoderBlock {
fn new(
in_dim: usize,
out_dim: usize,
stride: usize,
noise: bool,
groups: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let snake1 = Snake1d::new(in_dim, vb.pp(0))?;
let cfg = ConvTranspose1dConfig {
stride,
padding: stride.div_ceil(2),
output_padding: stride % 2,
..Default::default()
};
let conv_tr1 =
conv_transpose1d_weight_norm(in_dim, out_dim, 2 * stride, true, cfg, vb.pp(1))?;
let (n, noise) = if noise {
let noise = NoiseBlock::new(out_dim, vb.pp(2))?;
(1, Some(noise))
} else {
(0, None)
};
let res1 = ResidualUnit::new(out_dim, 1, 7, groups, vb.pp(2 + n))?;
let res2 = ResidualUnit::new(out_dim, 3, 7, groups, vb.pp(3 + n))?;
let res3 = ResidualUnit::new(out_dim, 9, 7, groups, vb.pp(4 + n))?;
Ok(Self {
snake1,
conv_tr1,
noise,
res1,
res2,
res3,
})
}
}
impl Module for DecoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.snake1)?
.apply(&self.conv_tr1)?
.apply(&self.noise.as_ref())?
.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)
}
}
#[derive(Debug, Clone)]
struct EncoderBlock {
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
snake1: Snake1d,
conv1: Conv1d,
}
impl EncoderBlock {
fn new(
out_dim: usize,
in_dim: Option<usize>,
stride: usize,
groups: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let in_dim = in_dim.unwrap_or(out_dim / 2);
let res1 = ResidualUnit::new(in_dim, 1, 7, groups, vb.pp(0))?;
let res2 = ResidualUnit::new(in_dim, 3, 7, groups, vb.pp(1))?;
let res3 = ResidualUnit::new(in_dim, 9, 7, groups, vb.pp(2))?;
let snake1 = Snake1d::new(in_dim, vb.pp(3))?;
let cfg1 = Conv1dConfig {
stride,
padding: stride.div_ceil(2),
..Default::default()
};
let conv1 = conv1d_weight_norm(in_dim, out_dim, 2 * stride, cfg1, vb.pp(4))?;
Ok(Self {
res1,
res2,
res3,
snake1,
conv1,
})
}
}
impl candle::Module for EncoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)?
.apply(&self.snake1)?
.apply(&self.conv1)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
conv1: Conv1d,
blocks: Vec<EncoderBlock>,
local_mha: Option<LocalMHA>,
conv2: Conv1d,
}
impl candle::Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.conv2)
}
}
impl Encoder {
fn new(
mut d_model: usize,
strides: &[usize],
depthwise: bool,
attn_window_size: Option<usize>,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let mut idx = 0;
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = conv1d_weight_norm(1, d_model, 7, cfg1, vb.pp(idx))?;
idx += 1;
let mut blocks = Vec::with_capacity(strides.len());
for &stride in strides.iter() {
d_model *= 2;
let groups = if depthwise { d_model / 2 } else { 1 };
let block = EncoderBlock::new(d_model, None, stride, groups, vb.pp(idx))?;
idx += 1;
blocks.push(block)
}
let local_mha = match attn_window_size {
Some(w) => {
let mha = LocalMHA::new(d_model, w, 64, true, vb.pp(idx))?;
idx += 1;
Some(mha)
}
None => None,
};
let groups = if depthwise { d_model } else { 1 };
let cfg2 = Conv1dConfig {
padding: 3,
groups,
..Default::default()
};
let conv2 = conv1d_weight_norm(d_model, d_model, 7, cfg2, vb.pp(idx))?;
idx += 1;
Ok(Self {
conv1,
blocks,
local_mha,
conv2,
})
}
}
#[derive(Debug, Clone)]
enum ConvInit {
Depthwise(Conv1d, Conv1d),
Standard(Conv1d),
}
#[derive(Debug, Clone)]
pub struct Decoder {
conv1: ConvInit,
local_mha: Option<LocalMHA>,
blocks: Vec<DecoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl Decoder {
#[allow(clippy::too_many_arguments)]
fn new(
in_c: usize,
mut channels: usize,
rates: &[usize],
noise: bool,
depthwise: bool,
attn_window_size: Option<usize>,
d_out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("model");
let mut idx = 0;
let pad3 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = if depthwise {
let cfg1 = Conv1dConfig {
padding: 3,
groups: in_c,
..Default::default()
};
let conv1 = conv1d_weight_norm(in_c, in_c, 7, cfg1, vb.pp(idx))?;
idx += 1;
let conv2 = conv1d_weight_norm(in_c, channels, 1, Default::default(), vb.pp(idx))?;
idx += 1;
ConvInit::Depthwise(conv1, conv2)
} else {
let conv1 = conv1d_weight_norm(in_c, channels, 7, pad3, vb.pp(idx))?;
idx += 1;
ConvInit::Standard(conv1)
};
let mut blocks = Vec::with_capacity(rates.len());
let local_mha = match attn_window_size {
Some(w) => {
let mha = LocalMHA::new(channels, w, 64, true, vb.pp(idx))?;
idx += 1;
Some(mha)
}
None => None,
};
for stride in rates.iter() {
let groups = if depthwise { channels / 2 } else { 1 };
let block =
DecoderBlock::new(channels, channels / 2, *stride, noise, groups, vb.pp(idx))?;
idx += 1;
channels /= 2;
blocks.push(block)
}
let snake1 = Snake1d::new(channels, vb.pp(idx))?;
idx += 1;
let conv2 = conv1d_weight_norm(channels, d_out, 7, pad3, vb.pp(idx))?;
idx += 1;
Ok(Self {
conv1,
local_mha,
blocks,
snake1,
conv2,
})
}
}
impl candle::Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = match &self.conv1 {
ConvInit::Standard(c) => xs.apply(c)?,
ConvInit::Depthwise(c1, c2) => xs.apply(c1)?.apply(c2)?,
};
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
fn normalize(v: &Tensor) -> Result<Tensor> {
v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)
}
// https://github.com/hubertsiuzdak/snac/blob/main/snac/vq.py
#[allow(unused)]
#[derive(Clone, Debug)]
struct VectorQuantizer {
in_proj: Conv1d,
out_proj: Conv1d,
codebook: candle_nn::Embedding,
stride: usize,
}
impl VectorQuantizer {
fn new(
in_dim: usize,
cb_size: usize,
cb_dim: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Self> {
let in_proj = conv1d_weight_norm(in_dim, cb_dim, 1, Default::default(), vb.pp("in_proj"))?;
let out_proj =
conv1d_weight_norm(cb_dim, in_dim, 1, Default::default(), vb.pp("out_proj"))?;
let codebook = candle_nn::embedding(cb_size, cb_dim, vb.pp("codebook"))?;
Ok(Self {
in_proj,
out_proj,
codebook,
stride,
})
}
fn decode_latents(&self, latents: &Tensor) -> Result<(Tensor, Tensor)> {
let (b, d, t) = latents.dims3()?;
let encodings = latents.transpose(1, 2)?.reshape((b * t, d))?;
let encodings = normalize(&encodings)?;
let codebook = normalize(self.codebook.embeddings())?;
let dist = (encodings
.sqr()?
.sum_keepdim(1)?
.broadcast_sub(&encodings.matmul(&codebook.t()?)?)?
* 2.0)?
.broadcast_add(&codebook.sqr()?.sum_keepdim(1)?.t()?)?;
let indices = dist.argmin(1)?.reshape((b, ()))?;
let z_q = self.decode_code(&indices)?;
Ok((z_q, indices))
}
fn encode(&self, z: &Tensor) -> Result<(Tensor, Tensor)> {
let z = if self.stride > 1 {
let (b, c, t) = z.dims3()?;
z.reshape((b, c, 1, t))?
.avg_pool2d((1, self.stride))?
.squeeze(2)?
} else {
z.clone()
};
let z_e = z.apply(&self.in_proj)?;
let (z_q, indices) = self.decode_latents(&z_e)?;
let z_q = z_q.apply(&self.out_proj)?;
let z_q = if self.stride > 1 {
repeat_interleave(&z_q, self.stride, D::Minus1)?
} else {
z_q
};
Ok((z_q, indices))
}
fn embed_code(&self, embed_id: &Tensor) -> Result<Tensor> {
embed_id.apply(&self.codebook)
}
fn decode_code(&self, embed_id: &Tensor) -> Result<Tensor> {
self.embed_code(embed_id)?.transpose(1, 2)
}
}
#[derive(Clone, Debug)]
pub struct ResidualVectorQuantizer {
quantizers: Vec<VectorQuantizer>,
}
impl ResidualVectorQuantizer {
fn new(
input_dim: usize,
cb_size: usize,
cb_dim: usize,
vq_strides: &[usize],
vb: VarBuilder,
) -> Result<Self> {
let vb = &vb.pp("quantizers");
let quantizers = vq_strides
.iter()
.enumerate()
.map(|(i, stride)| VectorQuantizer::new(input_dim, cb_size, cb_dim, *stride, vb.pp(i)))
.collect::<Result<Vec<_>>>()?;
Ok(Self { quantizers })
}
fn encode(&self, z: &Tensor) -> Result<(Tensor, Vec<Tensor>)> {
let mut residual = z.clone();
let mut z_q = z.zeros_like()?;
let mut codes = Vec::with_capacity(self.quantizers.len());
for quantizer in self.quantizers.iter() {
let (z_q_i, indices_i) = quantizer.encode(&residual)?;
z_q = (z_q + &z_q_i)?;
residual = (residual - &z_q_i)?;
codes.push(indices_i)
}
Ok((z_q, codes))
}
#[allow(clippy::wrong_self_convention)]
fn from_codes(&self, codes: &[&Tensor]) -> Result<Tensor> {
let mut sum = None;
for (quantizer, codes) in self.quantizers.iter().zip(codes.iter()) {
let z_p_i = quantizer.decode_code(codes)?;
let z_q_i = z_p_i.apply(&quantizer.out_proj)?;
let z_q_i = repeat_interleave(&z_q_i, quantizer.stride, D::Minus1)?;
let s = match sum {
None => z_q_i,
Some(s) => (s + z_q_i)?,
};
sum = Some(s)
}
match sum {
Some(s) => Ok(s),
None => candle::bail!("empty codebooks"),
}
}
}
fn gcd(mut a: usize, mut b: usize) -> usize {
while b != 0 {
let t = b;
b = a % b;
a = t;
}
a
}
fn lcm(a: usize, b: usize) -> usize {
a / gcd(a, b) * b
}
// https://github.com/hubertsiuzdak/snac/blob/main/snac/snac.py
#[derive(Debug, Clone)]
pub struct Model {
pub encoder: Encoder,
pub quantizer: ResidualVectorQuantizer,
pub decoder: Decoder,
pub hop_length: usize,
pub config: Config,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(
cfg.encoder_dim,
&cfg.encoder_rates,
cfg.depthwise,
cfg.attn_window_size,
vb.pp("encoder"),
)?;
let latent_dim = cfg.encoder_dim * 2usize.pow(cfg.encoder_rates.len() as u32);
let quantizer = ResidualVectorQuantizer::new(
latent_dim,
cfg.codebook_size,
cfg.codebook_dim,
&cfg.vq_strides,
vb.pp("quantizer"),
)?;
let decoder = Decoder::new(
latent_dim,
cfg.decoder_dim,
&cfg.decoder_rates,
cfg.noise,
cfg.depthwise,
cfg.attn_window_size,
/* d_out */ 1,
vb.pp("decoder"),
)?;
let hop_length = cfg.encoder_rates.iter().product::<usize>();
Ok(Self {
encoder,
decoder,
quantizer,
config: cfg.clone(),
hop_length,
})
}
fn preprocess(&self, audio_data: &Tensor) -> Result<Tensor> {
let len = audio_data.dim(D::Minus1)?;
let lcm = lcm(
self.config.vq_strides[0],
self.config.attn_window_size.unwrap_or(1),
);
let pad_to = self.hop_length * lcm;
let right_pad = len.div_ceil(pad_to) * pad_to - len;
let audio_data = audio_data.pad_with_zeros(D::Minus1, 0, right_pad)?;
Ok(audio_data)
}
pub fn encode(&self, audio_data: &Tensor) -> Result<Vec<Tensor>> {
let audio_data = self.preprocess(audio_data)?;
let z = self.encoder.forward(&audio_data)?;
let (_, codes) = self.quantizer.encode(&z)?;
Ok(codes)
}
pub fn decode(&self, audio_codes: &[&Tensor]) -> Result<Tensor> {
let audio_values = self.quantizer.from_codes(audio_codes)?;
audio_values.apply(&self.decoder)
}
pub fn config(&self) -> &Config {
&self.config
}
pub fn num_codebooks(&self) -> usize {
self.quantizer.quantizers.len()
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/rwkv_v5.rs | candle-transformers/src/models/rwkv_v5.rs | //! RWKV v5 model implementation.
//!
//! The [RWKV model](https://wiki.rwkv.com/) is a recurrent neural network model
//! with performance on par with transformer architectures. Several variants are
//! available, candle implements the v5 and v6 versions and can be used with
//! Eagle 7B([blog post](https://blog.rwkv.com/p/eagle-7b-soaring-past-transformers)).
//!
//! Key characteristics:
//! - Time-mix attention mechanism
//! - Channel-mix feed-forward network
//! - Linear attention
//! - Group normalization
//! - Token shift mechanism
//!
//! References:
//! - [RWKV Language Model](https://github.com/BlinkDL/RWKV-LM)
//! - [RWKV v5 Release](https://github.com/BlinkDL/ChatRWKV/tree/main)
//!
//! # Example
//!
//! ```bash
//! cargo run --example rwkv --release -- \
//! --prompt "The smallest prime is "
//!
//! > avx: true, neon: false, simd128: false, f16c: true
//! > temp: 0.00 repeat-penalty: 1.10 repeat-last-n: 64
//! > The smallest prime is ϕ(2) = 2.
//! > The smallest composite is ϕ(3) = 3.
//! > The smallest perfect number is ϕ(5) = 5.
//! > The smallest perfect square is ϕ(4) = 4.
//! > The smallest perfect cube is ϕ(6) = 6.
//! ```
use super::with_tracing::{layer_norm, linear_no_bias as linear, LayerNorm, Linear};
use candle::{DType, Device, IndexOp, Result, Tensor};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use std::collections::{HashMap, HashSet};
fn default_num_attention_heads() -> usize {
64
}
// https://huggingface.co/RWKV/HF_v5-Eagle-7B/blob/main/configuration_rwkv5.py
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub attention_hidden_size: usize,
#[serde(default = "default_num_attention_heads")]
pub num_attention_heads: usize,
pub head_size: usize,
pub intermediate_size: Option<usize>,
pub layer_norm_epsilon: f64,
pub rescale_every: usize,
}
pub struct StatePerLayer {
pub extract_key_value: Tensor,
pub linear_attention: Tensor,
pub feed_forward: Tensor,
}
pub struct State {
pub per_layer: Vec<StatePerLayer>,
pub pos: usize,
}
impl State {
pub fn new(batch_size: usize, cfg: &Config, dev: &Device) -> Result<Self> {
let mut per_layer = Vec::with_capacity(cfg.num_hidden_layers);
// Certainly a weird convention but taken from modeling_rwkv5.py
let num_attention_heads = cfg.hidden_size / cfg.num_attention_heads;
for _layer_idx in 0..cfg.num_hidden_layers {
let extract_key_value = Tensor::zeros((batch_size, cfg.hidden_size), DType::F32, dev)?;
let linear_attention = Tensor::zeros(
(
batch_size,
num_attention_heads,
cfg.hidden_size / num_attention_heads,
cfg.hidden_size / num_attention_heads,
),
DType::F32,
dev,
)?;
let feed_forward = Tensor::zeros((batch_size, cfg.hidden_size), DType::F32, dev)?;
per_layer.push(StatePerLayer {
extract_key_value,
linear_attention,
feed_forward,
});
}
Ok(Self { per_layer, pos: 0 })
}
}
#[derive(Debug, Clone)]
struct SelfAttention {
key: Linear,
receptance: Linear,
value: Linear,
gate: Linear,
output: Linear,
ln_x: candle_nn::GroupNorm,
time_mix_key: Tensor,
time_mix_value: Tensor,
time_mix_receptance: Tensor,
time_decay: Tensor,
time_faaaa: Tensor,
time_mix_gate: Tensor,
layer_id: usize,
n_attn_heads: usize,
}
impl SelfAttention {
pub fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let attn_hidden_size = cfg.attention_hidden_size;
let key = linear(hidden_size, attn_hidden_size, vb.pp("key"))?;
let receptance = linear(hidden_size, attn_hidden_size, vb.pp("receptance"))?;
let value = linear(hidden_size, attn_hidden_size, vb.pp("value"))?;
let gate = linear(hidden_size, attn_hidden_size, vb.pp("gate"))?;
let output = linear(attn_hidden_size, hidden_size, vb.pp("output"))?;
let ln_x = candle_nn::group_norm(
hidden_size / cfg.head_size,
hidden_size,
1e-5,
vb.pp("ln_x"),
)?;
let time_mix_key = vb.get((1, 1, cfg.hidden_size), "time_mix_key")?;
let time_mix_value = vb.get((1, 1, cfg.hidden_size), "time_mix_value")?;
let time_mix_receptance = vb.get((1, 1, cfg.hidden_size), "time_mix_receptance")?;
let n_attn_heads = cfg.hidden_size / cfg.head_size;
let time_decay = vb.get((n_attn_heads, cfg.head_size), "time_decay")?;
let time_faaaa = vb.get((n_attn_heads, cfg.head_size), "time_faaaa")?;
let time_mix_gate = vb.get((1, 1, cfg.hidden_size), "time_mix_gate")?;
Ok(Self {
key,
value,
receptance,
gate,
output,
ln_x,
time_mix_key,
time_mix_value,
time_mix_receptance,
time_decay,
time_faaaa,
time_mix_gate,
layer_id,
n_attn_heads,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let h = self.time_decay.dim(0)?;
let (b, t, s) = xs.dims3()?;
let s = s / h;
let (receptance, key, value, gate) = {
// extract key-value
let shifted = state.per_layer[self.layer_id].extract_key_value.clone();
let shifted = if shifted.rank() == 2 {
shifted.unsqueeze(1)?
} else {
shifted
};
let key = ((xs * &self.time_mix_key)? + &shifted * (1.0 - &self.time_mix_key)?)?;
let value = ((xs * &self.time_mix_value)? + &shifted * (1.0 - &self.time_mix_value)?)?;
let receptance = ((xs * &self.time_mix_receptance)?
+ &shifted * (1.0 - &self.time_mix_receptance)?)?;
let gate = ((xs * &self.time_mix_gate)? + &shifted * (1.0 - &self.time_mix_gate)?)?;
let key = self.key.forward(&key)?;
let value = self.value.forward(&value)?;
let receptance = self.receptance.forward(&receptance)?;
let gate = candle_nn::ops::silu(&self.gate.forward(&gate)?)?;
state.per_layer[self.layer_id].extract_key_value = xs.i((.., t - 1))?;
(receptance, key, value, gate)
};
// linear attention
let mut state_ = state.per_layer[self.layer_id].linear_attention.clone();
let key = key.reshape((b, t, h, s))?.permute((0, 2, 3, 1))?;
let value = value.reshape((b, t, h, s))?.transpose(1, 2)?;
let receptance = receptance.reshape((b, t, h, s))?.transpose(1, 2)?;
let time_decay = self
.time_decay
.exp()?
.neg()?
.exp()?
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let time_faaaa =
self.time_faaaa
.reshape(((), 1, 1))?
.reshape((self.n_attn_heads, (), 1))?;
let mut out: Vec<Tensor> = Vec::with_capacity(t);
for t_ in 0..t {
let rt = receptance.i((.., .., t_..t_ + 1))?.contiguous()?;
let kt = key.i((.., .., .., t_..t_ + 1))?.contiguous()?;
let vt = value.i((.., .., t_..t_ + 1))?.contiguous()?;
let at = kt.matmul(&vt)?;
let rhs = (time_faaaa.broadcast_mul(&at)? + &state_)?;
let out_ = rt.matmul(&rhs)?.squeeze(2)?;
state_ = (&at + time_decay.broadcast_mul(&state_))?;
out.push(out_)
}
let out = Tensor::cat(&out, 1)?.reshape((b * t, h * s, 1))?;
let out = out.apply(&self.ln_x)?.reshape((b, t, h * s))?;
let out = (out * gate)?.apply(&self.output)?;
state.per_layer[self.layer_id].linear_attention = state_;
Ok(out)
}
}
#[derive(Debug, Clone)]
struct FeedForward {
time_mix_key: Tensor,
time_mix_receptance: Tensor,
key: Linear,
receptance: Linear,
value: Linear,
layer_id: usize,
}
impl FeedForward {
pub fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let int_size = cfg
.intermediate_size
.unwrap_or(((cfg.hidden_size as f64 * 3.5) as usize) / 32 * 32);
let key = linear(cfg.hidden_size, int_size, vb.pp("key"))?;
let receptance = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("receptance"))?;
let value = linear(int_size, cfg.hidden_size, vb.pp("value"))?;
let time_mix_key = vb.get((1, 1, cfg.hidden_size), "time_mix_key")?;
let time_mix_receptance = vb.get((1, 1, cfg.hidden_size), "time_mix_receptance")?;
Ok(Self {
key,
receptance,
value,
time_mix_key,
time_mix_receptance,
layer_id,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let shifted = &state.per_layer[self.layer_id].feed_forward;
let key = (xs.broadcast_mul(&self.time_mix_key)?
+ shifted.broadcast_mul(&(1.0 - &self.time_mix_key)?)?)?;
let receptance = (xs.broadcast_mul(&self.time_mix_receptance)?
+ shifted.broadcast_mul(&(1.0 - &self.time_mix_receptance)?)?)?;
let key = key.apply(&self.key)?.relu()?.sqr()?;
let value = key.apply(&self.value)?;
let receptance = candle_nn::ops::sigmoid(&receptance.apply(&self.receptance)?)?;
state.per_layer[self.layer_id].feed_forward = xs.i((.., xs.dim(1)? - 1))?;
let xs = (receptance * value)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct Block {
pre_ln: Option<LayerNorm>,
ln1: LayerNorm,
ln2: LayerNorm,
attention: SelfAttention,
feed_forward: FeedForward,
}
impl Block {
pub fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln1 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln1"))?;
let ln2 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln2"))?;
let pre_ln = if layer_id == 0 {
let ln = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("pre_ln"))?;
Some(ln)
} else {
None
};
let attention = SelfAttention::new(layer_id, cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(layer_id, cfg, vb.pp("feed_forward"))?;
Ok(Self {
pre_ln,
ln1,
ln2,
attention,
feed_forward,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let xs = match self.pre_ln.as_ref() {
None => xs.clone(),
Some(pre_ln) => xs.apply(pre_ln)?,
};
let attention = self.attention.forward(&xs.apply(&self.ln1)?, state)?;
let xs = (xs + attention)?;
let feed_forward = self.feed_forward.forward(&xs.apply(&self.ln2)?, state)?;
let xs = (xs + feed_forward)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embedding,
blocks: Vec<Block>,
ln_out: LayerNorm,
head: Linear,
rescale_every: usize,
layers_are_rescaled: bool,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("rwkv");
let embeddings = embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embeddings"))?;
let mut blocks = Vec::with_capacity(cfg.num_hidden_layers);
let vb_b = vb_m.pp("blocks");
for block_index in 0..cfg.num_hidden_layers {
let block = Block::new(block_index, cfg, vb_b.pp(block_index))?;
blocks.push(block)
}
let ln_out = layer_norm(cfg.hidden_size, 1e-5, vb_m.pp("ln_out"))?;
let head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("head"))?;
Ok(Self {
embeddings,
blocks,
ln_out,
head,
rescale_every: cfg.rescale_every,
layers_are_rescaled: false, // This seem to only happen for the f16/bf16 dtypes.
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let (_b_size, _seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embeddings)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
xs = block.forward(&xs, state)?;
if self.layers_are_rescaled && (block_idx + 1) % self.rescale_every == 0 {
xs = (xs / 2.)?
}
}
let xs = xs.apply(&self.ln_out)?.apply(&self.head)?;
state.pos += 1;
Ok(xs)
}
}
type Bytes = Vec<u8>;
// https://github.com/BlinkDL/ChatRWKV/blob/095e812aef15a1f74107f6c39d13578a2412dc46/RWKV_v5_demo.py#L14
pub struct Tokenizer {
table: Vec<Vec<Vec<Bytes>>>,
good: Vec<HashSet<u8>>,
idx2token: HashMap<u32, Vec<u8>>,
token2idx: HashMap<Vec<u8>, u32>,
}
impl Tokenizer {
pub fn new<P: AsRef<std::path::Path>>(p: P) -> Result<Self> {
let file = std::fs::File::open(p)?;
let token2idx: HashMap<String, u32> =
serde_json::from_reader(file).map_err(candle::Error::wrap)?;
let token2idx = token2idx
.into_iter()
.map(|(key, value)| (key.into_bytes(), value))
.collect::<HashMap<_, _>>();
let idx2token = token2idx
.iter()
.map(|(key, value)| (*value, key.to_vec()))
.collect::<HashMap<_, _>>();
let max_idx = token2idx.values().copied().max().unwrap_or(0);
let mut table = vec![vec![vec![]; 256]; 256];
let mut good = vec![HashSet::new(); 256];
for idx in (0..(1 + max_idx)).rev() {
let s = match idx2token.get(&idx) {
None => continue,
Some(s) => s,
};
if s.len() >= 2 {
let (s0, s1) = (s[0], s[1]);
table[s0 as usize][s1 as usize].push(s.to_vec());
good[s0 as usize].insert(s1);
}
}
Ok(Self {
table,
good,
idx2token,
token2idx,
})
}
pub fn decode_bytes(&self, tokens: &[u32]) -> Vec<u8> {
let mut v = Vec::new();
for token_id in tokens.iter() {
if let Some(token) = self.idx2token.get(token_id) {
v.extend_from_slice(token.as_slice())
}
}
v
}
pub fn decode(&self, tokens: &[u32]) -> Result<String> {
let bytes = self.decode_bytes(tokens);
String::from_utf8(bytes).map_err(candle::Error::wrap)
}
pub fn encode_bytes(&self, bytes: &[u8]) -> Result<Vec<u32>> {
let mut tokens = Vec::new();
let mut i = 0;
while i < bytes.len() {
let mut s = vec![bytes[i]];
if i + 1 < bytes.len() && self.good[bytes[i] as usize].contains(&bytes[i + 1]) {
let table = &self.table[bytes[i] as usize][bytes[i + 1] as usize];
for table_elem in table.iter() {
if bytes[i..].starts_with(table_elem) {
s = table_elem.to_vec();
break;
}
}
}
i += s.len();
let token = match self.token2idx.get(&s) {
None => candle::bail!("unexpected token '{}' {s:?}", String::from_utf8_lossy(&s)),
Some(token) => *token,
};
tokens.push(token)
}
Ok(tokens)
}
pub fn encode(&self, str: &str) -> Result<Vec<u32>> {
self.encode_bytes(str.as_bytes())
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_stable_lm.rs | candle-transformers/src/models/quantized_stable_lm.rs | //! Module for quantized StableLM implementation.
//!
//! StableLM is a series of open-source large language models
//! optimized for performance and stability. This implementation
//! provides quantization support for efficient model deployment.
//!
//! Key characteristics:
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - [StableLM](https://github.com/Stability-AI/StableLM)
//!
use crate::quantized_nn::{layer_norm, linear, linear_no_bias, Embedding, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, LayerNorm};
use std::sync::Arc;
pub use crate::models::stable_lm::Config;
use crate::models::stable_lm::RotaryEmbedding;
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
span: tracing::Span,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
span: tracing::span!(tracing::Level::TRACE, "mlp"),
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_cache: bool,
rotary_ndims: usize,
span: tracing::Span,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let head_dim = cfg.head_dim();
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let linear_layer = if cfg.use_qkv_bias {
linear
} else {
linear_no_bias
};
let q_proj = linear_layer(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups: cfg.num_kv_groups(),
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
use_cache: cfg.use_cache,
rotary_ndims: cfg.rotary_ndims(),
span: tracing::span!(tracing::Level::TRACE, "attn"),
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (rot_ndims, pass_ndims) = (self.rotary_ndims, self.head_dim - self.rotary_ndims);
let query_rot = query_states.narrow(D::Minus1, 0, rot_ndims)?;
let query_pass = query_states.narrow(D::Minus1, rot_ndims, pass_ndims)?;
let key_rot = key_states.narrow(D::Minus1, 0, rot_ndims)?;
let key_pass = key_states.narrow(D::Minus1, rot_ndims, pass_ndims)?;
let (query_rot, key_rot) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_rot, &key_rot, seqlen_offset)?;
let query_states = Tensor::cat(&[query_rot, query_pass], D::Minus1)?.contiguous()?;
let key_states = Tensor::cat(&[key_rot, key_pass], D::Minus1)?.contiguous()?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
if self.use_cache {
self.kv_cache = Some((key_states.clone(), value_states.clone()));
}
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: LayerNorm,
post_attention_layernorm: LayerNorm,
span: tracing::Span,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb.pp("input_layernorm"),
)?;
let post_attention_layernorm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: Embedding,
layers: Vec<DecoderLayer>,
norm: LayerNorm,
lm_head: Linear,
device: Device,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(DType::F32)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/yi.rs | candle-transformers/src/models/yi.rs | //! Yi model implementation.
//!
//! This candle implementation uses a pre-trained Yi decoder-only large language model for inference.
//! The model was trained by 01.AI and follows a standard transformer architecture similar to LLaMA.
//!
//! Original code:
//! - 💻 [Yi Model](https://huggingface.co/01-ai/Yi-6B)
//! - 💻 [Yi Modeling Code](https://huggingface.co/01-ai/Yi-6B/blob/main/modeling_yi.py)
//! - 📝 [Technical Report](https://arxiv.org/abs/2403.04652) Yi: Open Foundation Models by 01.AI
//!
//! Key characteristics:
//! - Multi-head attention with rotary positional embeddings
//! - RMS normalization
//! - SwiGLU activation in feed-forward layers
//! - Grouped-query attention for efficient inference
//!
use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: usize,
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) rms_norm_eps: f64,
pub(crate) rope_theta: f64,
}
impl Config {
pub fn config_6b() -> Self {
Self {
vocab_size: 64000,
hidden_size: 4096,
intermediate_size: 11008,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 4,
hidden_act: Activation::Silu,
max_position_embeddings: 4096,
rms_norm_eps: 1e-5,
rope_theta: 5_000_000.,
}
}
pub fn config_34b() -> Self {
Self {
vocab_size: 64000,
hidden_size: 7168,
intermediate_size: 20480,
num_hidden_layers: 60,
num_attention_heads: 56,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 4096,
rms_norm_eps: 1e-5,
rope_theta: 5_000_000.,
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let ln2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
ln1,
ln2,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.ln1.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.ln2)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_mistral.rs | candle-transformers/src/models/quantized_mistral.rs | //! Mistral model implementation with quantization support.
//!
//! Mistral is a large language model optimized for efficiency.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Sliding window attention mechanism
//! - Grouped query attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - [Mistral Paper](https://arxiv.org/abs/2310.06825)
//! - [Model Card](https://huggingface.co/mistralai/Mistral-7B-v0.1)
//!
use crate::quantized_nn::{linear_no_bias, Embedding, Linear, RmsNorm};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::Activation;
use std::sync::Arc;
pub use crate::models::mistral::Config;
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(cfg: &Config, dev: &Device) -> Result<Self> {
let rope_theta = cfg.rope_theta as f32;
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(q, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(k, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: Option<usize>,
device: Device,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
})
}
fn prepare_decoder_attention_mask(
&self,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let sliding_window = self.sliding_window.unwrap_or(tgt_len + 1);
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((1, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(DType::F32)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (_b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.contiguous()?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/helium.rs | candle-transformers/src/models/helium.rs | //! Helium inference implementation.
//!
//! See the model card on Hugging Face's [hub](https://huggingface.co/kmhf/helium-2b).
use super::with_tracing::{linear_b as linear, Linear, RmsNorm};
use candle::{DType, Device, Result, Tensor, D};
use candle_nn::{Module, VarBuilder};
use std::sync::Arc;
fn default_use_flash_attn() -> bool {
false
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub attention_bias: bool,
pub bos_token_id: u32,
pub eos_token_id: u32,
pub head_dim: usize,
pub hidden_act: candle_nn::Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub mlp_bias: bool,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub tie_word_embeddings: bool,
pub vocab_size: usize,
#[serde(default = "default_use_flash_attn")]
pub use_flash_attn: bool,
}
impl Config {
pub fn config_2b(use_flash_attn: bool) -> Self {
Self {
attention_bias: false,
bos_token_id: 1,
eos_token_id: 2,
head_dim: 128,
hidden_act: candle_nn::Activation::Silu,
hidden_size: 2560,
intermediate_size: 7040,
max_position_embeddings: 4096,
mlp_bias: false,
num_attention_heads: 20,
num_hidden_layers: 24,
num_key_value_heads: 20,
rms_norm_eps: 1e-08,
rope_theta: 100000.0,
tie_word_embeddings: false,
vocab_size: 48000,
use_flash_attn,
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let rope_theta = cfg.rope_theta as f32;
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(DType::F32)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?.to_dtype(dtype)?,
cos: freqs.cos()?.to_dtype(dtype)?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope_i(q, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope_i(k, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let bias = cfg.mlp_bias;
let gate_proj = linear(hidden_sz, intermediate_sz, bias, vb.pp("gate_proj"))?;
let up_proj = linear(hidden_sz, intermediate_sz, bias, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_sz, hidden_sz, bias, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = cfg.head_dim;
let bias = cfg.attention_bias;
let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?;
let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
rotary_emb,
kv_cache: None,
use_flash_attn: cfg.use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.num_heads * self.head_dim))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(embed_tokens.embeddings().clone(), None)
} else {
linear(cfg.hidden_size, cfg.vocab_size, false, vb.pp("lm_head"))?
};
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((1, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn embed_tokens(&self) -> &candle_nn::Embedding {
&self.embed_tokens
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (_b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/blip.rs | candle-transformers/src/models/blip.rs | //! Based on the BLIP paper from Salesforce Research.
//!
//! The blip-image-captioning model can generate captions for an input image.
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning)
//! - 💻 [GH Link](https://github.com/salesforce/BLIP)
//! - 🤗 [HF Link](https://huggingface.co/Salesforce/blip-image-captioning-base)
//! - 📝 [Paper](https://arxiv.org/abs/2201.12086)
//!
use super::blip_text;
use super::with_tracing::{conv2d, linear, Conv2d, Linear};
use candle::{Module, Result, Tensor, D};
use candle_nn::{layer_norm, Conv2dConfig, LayerNorm, VarBuilder};
use serde::Deserialize;
#[derive(Debug, Clone, Deserialize)]
pub struct VisionConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub projection_dim: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub image_size: usize,
pub patch_size: usize,
pub hidden_act: candle_nn::Activation,
pub layer_norm_eps: f64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Config {
pub text_config: blip_text::Config,
pub vision_config: VisionConfig,
pub projection_dim: usize,
pub image_text_hidden_size: usize,
}
impl Config {
pub fn image_captioning_large() -> Self {
let text_config = blip_text::Config {
vocab_size: 30524,
hidden_size: 768,
encoder_hidden_size: 1024,
intermediate_size: 3072,
projection_dim: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
max_position_embeddings: 512,
hidden_act: candle_nn::Activation::Gelu,
layer_norm_eps: 1e-12,
is_decoder: true,
};
let vision_config = VisionConfig {
hidden_size: 1024,
intermediate_size: 4096,
projection_dim: 512,
num_hidden_layers: 24,
num_attention_heads: 16,
image_size: 384,
patch_size: 16,
hidden_act: candle_nn::Activation::Gelu,
layer_norm_eps: 1e-5,
};
Self {
text_config,
vision_config,
projection_dim: 512,
image_text_hidden_size: 256,
}
}
}
#[derive(Debug, Clone)]
struct VisionEmbeddings {
class_embedding: Tensor,
patch_embedding: Conv2d,
position_embedding: Tensor,
}
impl VisionEmbeddings {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let class_embedding = vb.get((1, 1, cfg.hidden_size), "class_embedding")?;
let conv_cfg = Conv2dConfig {
stride: cfg.patch_size,
..Default::default()
};
let patch_embedding = conv2d(
3,
cfg.hidden_size,
cfg.patch_size,
conv_cfg,
vb.pp("patch_embedding"),
)?;
let num_patches1 = cfg.image_size / cfg.patch_size;
let num_patches = num_patches1 * num_patches1;
let num_positions = num_patches + 1;
let position_embedding =
vb.get((1, num_positions, cfg.hidden_size), "position_embedding")?;
Ok(Self {
class_embedding,
patch_embedding,
position_embedding,
})
}
}
impl Module for VisionEmbeddings {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let target_dtype = xs.dtype();
let b_size = xs.dim(0)?;
let patch_embeds = xs.apply(&self.patch_embedding)?.flatten_from(2)?.t()?;
let d = self.class_embedding.dim(D::Minus1)?;
let class_embeds = self
.class_embedding
.broadcast_as((b_size, 1, d))?
.to_dtype(target_dtype)?;
let embeddings = Tensor::cat(&[&class_embeds, &patch_embeds], 1)?;
let position_embedding = self.position_embedding.narrow(1, 0, embeddings.dim(1)?)?;
embeddings.broadcast_add(&position_embedding)
}
}
#[derive(Debug, Clone)]
struct Attention {
qkv: Linear,
projection: Linear,
scale: f64,
num_heads: usize,
}
impl Attention {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let head_dim = embed_dim / num_heads;
let scale = 1f64 / (head_dim as f64).sqrt();
let qkv = linear(embed_dim, 3 * embed_dim, vb.pp("qkv"))?;
let projection = linear(embed_dim, embed_dim, vb.pp("projection"))?;
Ok(Self {
qkv,
projection,
scale,
num_heads,
})
}
fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> {
let (b_sz, tgt_len, embed_dim) = xs.dims3()?;
let mixed_qkv = xs
.apply(&self.qkv)?
.reshape((b_sz, tgt_len, 3, self.num_heads, embed_dim / self.num_heads))?
.permute((2, 0, 3, 1, 4))?;
let query = mixed_qkv.get(0)?;
let key = mixed_qkv.get(1)?;
let value = mixed_qkv.get(2)?;
let attention_scores = query.matmul(&key.t()?)?;
let attention_scores = (attention_scores * self.scale)?;
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
let attention_probs = match attn_mask {
None => attention_probs,
Some(attn_mask) => (attention_probs * attn_mask)?,
};
attention_probs
.matmul(&value)?
.permute((0, 2, 1, 3))?
.flatten_from(D::Minus2)?
.apply(&self.projection)
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
activation_fn: candle_nn::Activation,
fc1: Linear,
fc2: Linear,
}
impl MLP {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?;
let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?;
Ok(Self {
activation_fn: cfg.hidden_act,
fc1,
fc2,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?
.apply(&self.activation_fn)?
.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct EncoderLayer {
self_attn: Attention,
layer_norm1: LayerNorm,
mlp: MLP,
layer_norm2: LayerNorm,
}
impl EncoderLayer {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.hidden_size;
let self_attn = Attention::new(cfg, vb.pp("self_attn"))?;
let layer_norm1 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm1"))?;
let layer_norm2 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm2"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.layer_norm1)?;
let xs = self.self_attn.forward(&xs, attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?;
xs + residual
}
}
#[derive(Debug, Clone)]
struct Encoder {
layers: Vec<EncoderLayer>,
}
impl Encoder {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb = vb.pp("layers");
for i in 0..cfg.num_hidden_layers {
let layer = EncoderLayer::new(cfg, vb.pp(i))?;
layers.push(layer)
}
Ok(Self { layers })
}
fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, attention_mask)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct VisionModel {
embeddings: VisionEmbeddings,
encoder: Encoder,
post_layernorm: LayerNorm,
}
impl VisionModel {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?;
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let post_layernorm =
layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?;
Ok(Self {
embeddings,
encoder,
post_layernorm,
})
}
}
impl Module for VisionModel {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.embeddings)?;
let encoder_outputs = self.encoder.forward(&xs, None)?;
// Return the last hidden state rather than pooled outputs.
encoder_outputs.apply(&self.post_layernorm)
}
}
#[derive(Debug, Clone)]
pub struct BlipForConditionalGeneration {
vision_model: VisionModel,
text_decoder: blip_text::TextLMHeadModel,
}
impl BlipForConditionalGeneration {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vision_model = VisionModel::new(&cfg.vision_config, vb.pp("vision_model"))?;
let text_decoder =
blip_text::TextLMHeadModel::new(&cfg.text_config, vb.pp("text_decoder"))?;
Ok(Self {
vision_model,
text_decoder,
})
}
pub fn vision_model(&self) -> &VisionModel {
&self.vision_model
}
pub fn text_decoder(&mut self) -> &mut blip_text::TextLMHeadModel {
&mut self.text_decoder
}
pub fn reset_kv_cache(&mut self) {
self.text_decoder.reset_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mixtral.rs | candle-transformers/src/models/mixtral.rs | //! Mixtral Model, a sparse mixture of expert model based on the Mistral architecture
//!
//! See Mixtral model details at:
//! - [Hugging Face](https://huggingface.co/docs/transformers/model_doc/mixtral)
//! - [Mixtral-8x7B Blog Post](https://mistral.ai/news/mixtral-of-experts/)
//!
//! The model uses a mixture of experts architecture with:
//! - 8 experts per layer
//! - Top 2 expert routing
//! - Sliding window attention
//! - RoPE embeddings
//!
//! References:
//! - [Hugging Face Implementation](https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py)
//! - [Mixtral Blog Post](https://mistral.ai/news/mixtral-of-experts/)
//!
use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm};
/// Mixtral Model
/// https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py
/// https://mistral.ai/news/mixtral-of-experts/
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use serde::Deserialize;
use std::sync::Arc;
/// https://github.com/huggingface/transformers/blob/1a585c1222a56bcaecc070966d558d4a9d862e83/src/transformers/models/mixtral/configuration_mixtral.py#L113
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: usize,
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) rms_norm_eps: f64,
pub(crate) rope_theta: f64,
pub(crate) sliding_window: usize,
pub(crate) num_experts_per_tok: usize,
pub(crate) num_local_experts: usize,
pub(crate) use_flash_attn: bool,
}
impl Config {
/// https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/blob/main/config.json
pub fn v0_1_8x7b(use_flash_attn: bool) -> Self {
Self {
vocab_size: 32000,
hidden_size: 4096,
intermediate_size: 14336,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 32768,
rms_norm_eps: 1e-5,
rope_theta: 1e6,
sliding_window: 4096,
num_experts_per_tok: 2,
num_local_experts: 8,
use_flash_attn,
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / (cfg.rope_theta as f32).powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
use_flash_attn: cfg.use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct BlockSparseTop2MLP {
w1: Linear,
w2: Linear,
w3: Linear,
act_fn: Activation,
}
impl BlockSparseTop2MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let w1 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w1"))?;
let w2 = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("w2"))?;
let w3 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w3"))?;
Ok(Self {
w1,
w2,
w3,
act_fn: cfg.hidden_act,
})
}
}
impl Module for BlockSparseTop2MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.w1)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.w3)?;
(lhs * rhs)?.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct SparseMoeBlock {
gate: Linear,
experts: Vec<BlockSparseTop2MLP>,
num_experts_per_tok: usize,
}
impl SparseMoeBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let gate = linear_no_bias(cfg.hidden_size, cfg.num_local_experts, vb.pp("gate"))?;
let mut experts = Vec::with_capacity(cfg.num_local_experts);
let vb = vb.pp("experts");
for idx in 0..cfg.num_local_experts {
let expert = BlockSparseTop2MLP::new(cfg, vb.pp(idx))?;
experts.push(expert)
}
Ok(SparseMoeBlock {
gate,
experts,
num_experts_per_tok: cfg.num_experts_per_tok,
})
}
}
impl Module for SparseMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = xs.apply(&self.gate)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// In order to extract topk, we extract the data from the tensor and manipulate it
// directly. Maybe we will want to use some custom ops instead at some point.
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
// routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
// top_x contains the row indexes to evaluate for each expert.
let mut top_x = vec![vec![]; self.experts.len()];
let mut selected_rws = vec![vec![]; self.experts.len()];
for (row_idx, rw) in routing_weights.iter().enumerate() {
let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>();
dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize]));
let mut sum_routing_weights = 0f32;
for &expert_idx in dst.iter().take(self.num_experts_per_tok) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
sum_routing_weights += routing_weight;
top_x[expert_idx].push(row_idx as u32);
}
for &expert_idx in dst.iter().take(self.num_experts_per_tok) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
selected_rws[expert_idx].push(routing_weight / sum_routing_weights)
}
}
// routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
// expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in self.experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_rws =
Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())?.reshape(((), 1))?;
// Index the correct hidden states and compute the expert hidden state for
// the current expert. We need to make sure to multiply the output hidden
// states by `routing_weights` on the corresponding tokens (top-1 and top-2)
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
// current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None])
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states = current_hidden_states.broadcast_mul(&selected_rws)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
let ys = ys.reshape((b_size, seq_len, hidden_dim))?;
Ok(ys)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
block_sparse_moe: SparseMoeBlock,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let block_sparse_moe = SparseMoeBlock::new(cfg, vb.pp("block_sparse_moe"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
block_sparse_moe,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs
.apply(&self.post_attention_layernorm)?
.apply(&self.block_sparse_moe)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + self.sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_qwen3.rs | candle-transformers/src/models/quantized_qwen3.rs | //! Qwen3 implementation with quantization support.
//!
//! Based on the Qwen3 architecture and implemented with quantized weights
//! for reduced memory usage and faster inference on compatible hardware.
//!
//! References:
//! - [Qwen3 Models](https://huggingface.co/Qwen/Qwen3-0.6B) (architecture based on official implementations)
//!
use super::with_tracing::QMatMul;
use crate::{quantized_nn::RmsNorm, utils::repeat_kv};
use candle::quantized::{gguf_file, QTensor};
use candle::{DType, Device, Result, Tensor};
use candle_nn::{kv_cache::ConcatKvCache, Activation, Embedding, Module};
use std::io::{Read, Seek};
use std::sync::Arc;
pub struct Gguf<R: Read + Seek> {
ct: gguf_file::Content,
reader: R,
device: Device,
}
impl<R: Read + Seek> Gguf<R> {
pub fn new(ct: gguf_file::Content, reader: R, device: Device) -> Self {
Self { ct, reader, device }
}
pub fn qmatmul(&mut self, name: &str) -> Result<QMatMul> {
let ws = self.ct.tensor(&mut self.reader, name, &self.device)?;
QMatMul::from_weights(ws.into())
}
pub fn rms_norm(&mut self, name: &str, eps: f64) -> Result<RmsNorm> {
let ws = self.ct.tensor(&mut self.reader, name, &self.device)?;
RmsNorm::from_qtensor(ws, eps)
}
pub fn metadata(&self) -> &std::collections::HashMap<String, gguf_file::Value> {
&self.ct.metadata
}
pub fn tensor(&mut self, name: &str) -> Result<QTensor> {
self.ct.tensor(&mut self.reader, name, &self.device)
}
}
#[derive(Debug, Clone)]
struct MlpWeights {
gate_proj: QMatMul,
up_proj: QMatMul,
down_proj: QMatMul,
act_fn: Activation,
span: tracing::Span,
}
impl MlpWeights {
fn new<R: Read + Seek>(gg: &mut Gguf<R>, prefix: &str) -> Result<Self> {
let gate_proj = gg.qmatmul(&format!("{prefix}.ffn_gate.weight"))?;
let up_proj = gg.qmatmul(&format!("{prefix}.ffn_up.weight"))?;
let down_proj = gg.qmatmul(&format!("{prefix}.ffn_down.weight"))?;
let act_fn = Activation::Silu;
let span = tracing::span!(tracing::Level::TRACE, "mlp");
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn,
span,
})
}
}
impl Module for MlpWeights {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let gate = self.gate_proj.forward(x)?.apply(&self.act_fn)?;
let up = self.up_proj.forward(x)?;
let gated = (gate * up)?;
self.down_proj.forward(&gated)
}
}
#[derive(Debug, Clone)]
pub struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
pub fn new(
dtype: DType,
head_dim: usize,
max_position_embeddings: usize,
rope_theta: f64,
dev: &Device,
) -> Result<Self> {
let dim = head_dim;
let max_seq_len = max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
/// Apply RoPE (q, k shape: B x H x L x D)
pub fn apply(&self, q: &Tensor, k: &Tensor, offset: usize) -> Result<(Tensor, Tensor)> {
let (_, _, seq_len, _) = q.dims4()?;
let cos = self.cos.narrow(0, offset, seq_len)?.to_dtype(q.dtype())?;
let sin = self.sin.narrow(0, offset, seq_len)?.to_dtype(q.dtype())?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
struct AttentionWeights {
q_proj: QMatMul,
k_proj: QMatMul,
v_proj: QMatMul,
o_proj: QMatMul,
q_norm: RmsNorm,
k_norm: RmsNorm,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: ConcatKvCache,
span_attn: tracing::Span,
}
impl AttentionWeights {
fn new<R: Read + Seek>(
gg: &mut Gguf<R>,
num_heads: usize,
num_kv_heads: usize,
head_dim: usize,
rms_norm_eps: f64,
rotary_emb: Arc<RotaryEmbedding>,
prefix: &str,
) -> Result<Self> {
let num_kv_groups = num_heads / num_kv_heads;
let q_proj = gg.qmatmul(&format!("{prefix}.attn_q.weight"))?;
let k_proj = gg.qmatmul(&format!("{prefix}.attn_k.weight"))?;
let v_proj = gg.qmatmul(&format!("{prefix}.attn_v.weight"))?;
let o_proj = gg.qmatmul(&format!("{prefix}.attn_output.weight"))?;
let q_norm = gg.rms_norm(&format!("{prefix}.attn_q_norm.weight"), rms_norm_eps)?;
let k_norm = gg.rms_norm(&format!("{prefix}.attn_k_norm.weight"), rms_norm_eps)?;
let kv_cache = ConcatKvCache::new(2);
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
rotary_emb,
kv_cache,
span_attn,
})
}
fn forward(&mut self, x: &Tensor, attn_mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b, l, _) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q
.reshape((b, l, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let q_flat = q.flatten(0, 2)?;
let k_flat = k.flatten(0, 2)?;
let q_flat = self.q_norm.forward(&q_flat)?;
let k_flat = self.k_norm.forward(&k_flat)?;
let q = q_flat.reshape((b, self.num_heads, l, self.head_dim))?;
let k = k_flat.reshape((b, self.num_kv_heads, l, self.head_dim))?;
let (q, k) = self.rotary_emb.apply(&q, &k, offset)?;
let (k, v) = self.kv_cache.append(&k, &v)?;
let k = repeat_kv(k, self.num_kv_groups)?.contiguous()?;
let v = repeat_kv(v, self.num_kv_groups)?.contiguous()?;
let scale = 1.0 / (self.head_dim as f64).sqrt();
let mut scores = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(m) = attn_mask {
let m_dtype = m.dtype();
let scores_dtype = scores.dtype();
let mask = if m_dtype != scores_dtype {
m.to_dtype(scores_dtype)?
} else {
m.clone()
};
scores = scores.broadcast_add(&mask)?;
}
let probs = candle_nn::ops::softmax_last_dim(&scores)?;
let ctx = probs.matmul(&v)?; // (B, H, L, D)
let reshaped_ctx = ctx
.transpose(1, 2)?
.reshape((b, l, self.num_heads * self.head_dim))?;
self.o_proj.forward(&reshaped_ctx)
}
fn clear_kv_cache(&mut self) {
self.kv_cache.reset();
}
}
#[derive(Debug, Clone)]
struct LayerWeights {
self_attn: AttentionWeights,
mlp: MlpWeights,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl LayerWeights {
fn new<R: Read + Seek>(
gg: &mut Gguf<R>,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: usize,
rms_norm_eps: f64,
rotary: Arc<RotaryEmbedding>,
layer_idx: usize,
) -> Result<Self> {
let prefix = format!("blk.{layer_idx}");
let ln1 = gg.rms_norm(&format!("{prefix}.attn_norm.weight"), rms_norm_eps)?;
let ln2 = gg.rms_norm(&format!("{prefix}.ffn_norm.weight"), rms_norm_eps)?;
let self_attn = AttentionWeights::new(
gg,
num_attention_heads,
num_key_value_heads,
head_dim,
rms_norm_eps,
rotary,
&prefix,
)?;
let mlp = MlpWeights::new(gg, &prefix)?;
Ok(Self {
self_attn,
mlp,
ln1,
ln2,
})
}
fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let h = self.ln1.forward(x)?;
let h = self.self_attn.forward(&h, mask, offset)?;
let x = (x + h)?;
let h2 = self.ln2.forward(&x)?;
let h2 = h2.apply(&self.mlp)?;
x + h2
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct ModelWeights {
embed_tokens: Embedding,
layers: Vec<LayerWeights>,
norm: RmsNorm,
lm_head: QMatMul,
device: Device,
dtype: DType,
span: tracing::Span,
span_output: tracing::Span,
}
impl ModelWeights {
pub fn from_gguf<R: Read + Seek>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let mut gg = Gguf::new(ct, reader, device.clone());
let md_get = |s: &str| match gg.metadata().get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
let num_attention_heads = md_get("qwen3.attention.head_count")?.to_u32()? as usize;
let num_kv_heads = md_get("qwen3.attention.head_count_kv")?.to_u32()? as usize;
let head_dim = md_get("qwen3.attention.key_length")?.to_u32()? as usize;
let num_layers = md_get("qwen3.block_count")?.to_u32()? as usize;
let hidden_size = md_get("qwen3.embedding_length")?.to_u32()? as usize;
let max_position_embeddings = md_get("qwen3.context_length")?.to_u32()? as usize;
let rms_norm_eps = md_get("qwen3.attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let rope_freq_base = md_get("qwen3.rope.freq_base")?.to_f32()? as f64;
let dtype = match gg.metadata().get("general.dtype") {
Some(v) => match v.to_u32() {
Ok(0) => DType::F32,
Ok(1) => DType::F16,
_ => DType::F16,
},
None => DType::F16,
};
let embed_tensor = gg.tensor("token_embd.weight")?;
let embed_tokens = Embedding::new(embed_tensor.dequantize(device)?, hidden_size);
let rotary = Arc::new(RotaryEmbedding::new(
dtype,
head_dim,
max_position_embeddings,
rope_freq_base,
device,
)?);
let mut layers = Vec::with_capacity(num_layers);
for i in 0..num_layers {
layers.push(LayerWeights::new(
&mut gg,
num_attention_heads,
num_kv_heads,
head_dim,
rms_norm_eps,
rotary.clone(),
i,
)?);
}
let norm = gg.rms_norm("output_norm.weight", rms_norm_eps)?;
// Load output projection tensor, falling back to tied embeddings like gemma3
let lm_head_tensor = match gg.tensor("output.weight") {
Ok(tensor) => tensor,
Err(_) => gg.tensor("token_embd.weight")?,
};
let lm_head = QMatMul::from_weights(lm_head_tensor.into())?;
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: device.clone(),
dtype,
span,
span_output,
})
}
fn causal_mask(
&self,
b: usize,
tgt: usize,
offset: usize,
sw: Option<usize>,
) -> Result<Tensor> {
let minf = f32::NEG_INFINITY;
let mask: Vec<_> = (0..tgt)
.flat_map(|i| {
(0..(tgt + offset)).map(move |j| {
let past_ok = j <= i + offset;
let sw_ok = match sw {
Some(w) => (i + offset) as i64 - j as i64 <= w as i64,
None => true,
};
if past_ok && sw_ok {
0.
} else {
minf
}
})
})
.collect();
Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype)
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, l) = input.dims2()?;
let mut h = self.embed_tokens.forward(input)?;
let causal_mask = if l == 1 {
None
} else {
Some(self.causal_mask(b, l, offset, None)?)
};
for layer in &mut self.layers {
h = layer.forward(&h, causal_mask.as_ref(), offset)?;
}
let h = self.norm.forward(&h)?;
let _enter = self.span_output.enter();
let last_hidden = h.narrow(1, l - 1, 1)?;
self.lm_head.forward(&last_hidden)?.squeeze(1)
}
pub fn clear_kv_cache(&mut self) {
for layer in &mut self.layers {
layer.clear_kv_cache();
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/dinov2.rs | candle-transformers/src/models/dinov2.rs | //! Implementation of the DINOv2 models from Meta Research.
//!
//! This module implements the DINOv2 vision transformer model from Meta AI Research.
//! DINOv2 is a self-supervised learning model that can learn visual features
//! without using any labeled data. See: ["DINOv2: Learning Robust Visual Features without Supervision"](https://github.com/facebookresearch/dinov2)
//!
//! ## Running an example with color map and CUDA
//!
//! ```bash
//! cargo run \
//! --features cuda,depth_anything_v2 \
//! --package candle-examples \
//! --example depth_anything_v2 \
//! -- --color-map \
//! --image candle-examples/examples/yolo-v8/assets/bike.jpg
//! ```
//!
//! ## Running as an ImageNet classifier
//!
//! The model returns the probability for the image to belong to each of the 1000 ImageNet categories.
//!
//! <div align=center>
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640>
//! </div>
//!
//! ```bash
//! cargo run \
//! --example dinov2 \
//! --release \
//! -- --image candle-examples/examples/yolo-v8/assets/bike.jpg
//!
//! > mountain bike, all-terrain bike, off-roader: 43.67%
//! > bicycle-built-for-two, tandem bicycle, tandem: 33.20%
//! > crash helmet : 13.23%
//! > unicycle, monocycle : 2.44%
//! > maillot : 2.42%
//! ```
//!
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder};
const IMG_SIZE: usize = 518;
const PATCH_SIZE: usize = 14;
const NUM_CLASSES: usize = 1000;
fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
if bias {
candle_nn::linear(in_dim, out_dim, vb)
} else {
candle_nn::linear_no_bias(in_dim, out_dim, vb)
}
}
#[derive(Debug)]
struct Attention {
qkv: Linear,
proj: Linear,
num_heads: usize,
scale: f64,
}
impl Attention {
fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
qkv_bias: bool,
proj_bias: bool,
) -> Result<Self> {
let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?;
let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?;
let scale = 1. / ((dim / num_heads) as f64).sqrt();
Ok(Self {
qkv,
proj,
num_heads,
scale,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = self
.qkv
.forward(xs)?
.reshape((b, n, 3, self.num_heads, c / self.num_heads))?
.transpose(1, 2)? // 02134
.transpose(0, 1)? // 20134
.transpose(2, 3)?; // 20314
let q = (qkv.i(0)? * self.scale)?;
let k = qkv.i(1)?.contiguous()?;
let v = qkv.i(2)?.contiguous()?;
let attn = candle_nn::ops::softmax(&q.matmul(&k.t()?)?, D::Minus1)?;
let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?;
self.proj.forward(&attn)
}
}
#[derive(Debug)]
struct LayerScale {
gamma: Tensor,
}
impl LayerScale {
fn new(vb: VarBuilder, dim: usize) -> Result<Self> {
let gamma = vb.get(dim, "gamma")?;
Ok(Self { gamma })
}
}
impl Module for LayerScale {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&self.gamma)
}
}
#[derive(Debug)]
struct Mlp {
fc1: Linear,
fc2: Linear,
}
impl Mlp {
fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> {
let out_features = in_features;
let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?;
let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?;
Ok(Self { fc1, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?.gelu()?;
self.fc2.forward(&xs)
}
}
#[derive(Debug)]
struct Block {
norm1: LayerNorm,
attn: Attention,
ls1: LayerScale,
norm2: LayerNorm,
mlp: Mlp,
ls2: LayerScale,
}
impl Block {
fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?;
let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?;
let ls1 = LayerScale::new(vb.pp("ls1"), dim)?;
let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?;
let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?;
let ls2 = LayerScale::new(vb.pp("ls2"), dim)?;
Ok(Self {
norm1,
attn,
ls1,
norm2,
mlp,
ls2,
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = self
.ls1
.forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self
.ls2
.forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?;
xs + residual
}
}
#[derive(Debug)]
struct PatchEmbed {
proj: candle_nn::Conv2d,
patch_size: (usize, usize),
num_patches: usize,
}
impl PatchEmbed {
fn new(
vb: VarBuilder,
img_size: usize,
patch_size: usize,
in_chans: usize,
embed_dim: usize,
) -> Result<Self> {
let config = candle_nn::Conv2dConfig {
stride: patch_size,
..Default::default()
};
let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?;
let num_patches = (img_size / patch_size) * (img_size / patch_size);
Ok(Self {
proj,
patch_size: (patch_size, patch_size),
num_patches,
})
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _c, h, w) = xs.dims4()?;
let (patch_h, patch_w) = self.patch_size;
if (h % patch_h) != 0 {
candle::bail!("image height {h} is not a multiple of patch height {patch_h}")
}
if (w % patch_w) != 0 {
candle::bail!("image width {w} is not a multiple of patch width {patch_w}")
}
let xs = self.proj.forward(xs)?;
let (b, c, h, w) = xs.dims4()?;
// flatten embeddings.
xs.reshape((b, c, h * w))?.transpose(1, 2)
}
}
#[derive(Debug)]
pub struct DinoVisionTransformer {
patch_embed: PatchEmbed,
cls_token: Tensor,
pos_embed: Tensor,
blocks: Vec<Block>,
norm: LayerNorm,
head: Linear,
}
impl DinoVisionTransformer {
pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> {
let patch_embed =
PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?;
let cls_token = vb.get((1, 1, embed_dim), "cls_token")?;
let num_tokens = 1;
let pos_embed = vb.get(
(1, patch_embed.num_patches + num_tokens, embed_dim),
"pos_embed",
)?;
let head = linear(vb.pp("head"), 2 * embed_dim, NUM_CLASSES, true)?;
let norm = layer_norm(embed_dim, 1e-5, vb.pp("norm"))?;
let vb_b = vb.pp("blocks");
let blocks = (0..depth)
.map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
patch_embed,
cls_token,
pos_embed,
blocks,
norm,
head,
})
}
fn interpolate_pos_encoding(&self, xs: &Tensor, w: usize, h: usize) -> Result<Tensor> {
let npatch = xs.dim(1)? - 1;
let n = self.pos_embed.dim(1)? - 1;
let sqrt_n = (n as f64).sqrt();
if npatch == n && w == h {
return Ok(self.pos_embed.clone());
}
let class_pos_embed = self.pos_embed.i((.., ..1))?;
let patch_pos_embed = self.pos_embed.i((.., 1..))?;
let dim = xs.dim(D::Minus1)?;
let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1);
let patch_pos_embed = patch_pos_embed
.reshape((1, sqrt_n as usize, sqrt_n as usize, dim))?
.transpose(2, 3)?
.transpose(1, 2)?;
// This uses bicubic interpolation in the original implementation.
let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?;
let el_count = patch_pos_embed.shape().elem_count();
let patch_pos_embed =
patch_pos_embed
.transpose(1, 2)?
.transpose(2, 3)?
.reshape((1, el_count / dim, dim))?;
Tensor::cat(&[&class_pos_embed, &patch_pos_embed], 1)
}
fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _nc, w, h) = xs.dims4()?;
let xs = self.patch_embed.forward(xs)?;
let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?;
&xs + &self.interpolate_pos_encoding(&xs, w, h)?
}
fn get_intermediate_layers_not_chunked(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
) -> Result<Vec<Tensor>> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
let mut output = Vec::new();
for (i, blk) in self.blocks.iter().enumerate() {
xs = blk.forward(&xs)?;
if blocks_to_take.contains(&i) {
output.push(xs.clone());
}
}
if output.len() != blocks_to_take.len() {
candle::bail!(
"only {} / {} blocks found",
output.len(),
blocks_to_take.len()
);
}
Ok(output)
}
pub fn get_intermediate_layers(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
reshape: bool,
return_class_token: bool,
norm: bool,
) -> Result<Tensor> {
let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?;
let outputs = if norm {
outputs
.iter()
.map(|out| self.norm.forward(out))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let class_tokens = outputs
.iter()
.map(|out| out.i((.., 0)))
.collect::<Result<Vec<_>>>()?;
let outputs = outputs
.iter()
.map(|out| out.i((.., 1..)))
.collect::<Result<Vec<_>>>()?;
let outputs = if reshape {
let (b, _c, w, h) = xs.dims4()?;
let patch_size = self.patch_embed.patch_size.0;
let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size));
outputs
.iter()
.map(|out| {
out.reshape((b, w / patch_size, h / patch_size, num_channels))?
.transpose(2, 3)?
.transpose(1, 2)
})
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let outputs = if return_class_token {
outputs
.iter()
.zip(class_tokens.iter())
.map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
Tensor::stack(&outputs[..], 0)
}
}
impl Module for DinoVisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
for blk in self.blocks.iter() {
xs = blk.forward(&xs)?
}
let xs = self.norm.forward(&xs)?;
let xs_norm_clstoken = xs.i((.., 0))?;
let xs_norm_patchtokens = xs.i((.., 1..))?.mean(1)?;
let xs = Tensor::cat(&[xs_norm_clstoken, xs_norm_patchtokens], D::Minus1)?;
self.head.forward(&xs)
}
}
pub fn vit_small(vb: VarBuilder) -> Result<DinoVisionTransformer> {
DinoVisionTransformer::new(vb, 12, 384, 6)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/fastvit.rs | candle-transformers/src/models/fastvit.rs | //! # FastViT inference implementation based on timm
//!
//! ## Description
//! See ["FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization"](https://arxiv.org/pdf/2303.14189)
//!
//! Implementation based on [timm model](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/fastvit.py)
use candle::{Context, DType, Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, linear, linear_no_bias, ops::sigmoid, ops::softmax,
BatchNorm, Conv2d, Conv2dConfig, Func, VarBuilder,
};
#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)]
pub struct Config {
pub exp_ratio: usize,
pub in_channels: usize,
pub blocks: [usize; 4],
pub attn: bool,
pub lkc_use_act: bool,
}
impl Config {
pub fn t8() -> Self {
Self {
exp_ratio: 3,
in_channels: 48,
blocks: [2, 2, 4, 2],
attn: false,
lkc_use_act: false,
}
}
pub fn t12() -> Self {
Self {
exp_ratio: 3,
in_channels: 64,
blocks: [2, 2, 6, 2],
attn: false,
lkc_use_act: false,
}
}
pub fn s12() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [2, 2, 6, 2],
attn: false,
lkc_use_act: false,
}
}
pub fn sa12() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [2, 2, 6, 2],
attn: true,
lkc_use_act: false,
}
}
pub fn sa24() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [4, 4, 12, 4],
attn: true,
lkc_use_act: false,
}
}
pub fn sa36() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [6, 6, 18, 6],
attn: true,
lkc_use_act: false,
}
}
pub fn ma36() -> Self {
Self {
exp_ratio: 4,
in_channels: 76,
blocks: [6, 6, 18, 6],
attn: true,
lkc_use_act: false,
}
}
// configs used by MobileCLIP's image encoder
pub fn mci0() -> Self {
Self {
exp_ratio: 3,
in_channels: 64,
blocks: [2, 6, 10, 2],
attn: true,
lkc_use_act: true,
}
}
pub fn mci1() -> Self {
Self {
exp_ratio: 3,
in_channels: 64,
blocks: [4, 12, 20, 4],
attn: true,
lkc_use_act: true,
}
}
pub fn mci2() -> Self {
Self {
exp_ratio: 3,
in_channels: 80,
blocks: [4, 12, 24, 4],
attn: true,
lkc_use_act: true,
}
}
}
fn conv_norm(
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding: kernel / 2,
groups: in_channels,
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, kernel, conv2d_cfg, vb.pp("conv"))?;
let conv = conv.absorb_bn(&bn)?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?;
Ok(xs)
}))
}
fn conv_mlp(dim: usize, exp_ratio: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let conv = conv_norm(dim, dim, 7, 1, vb.pp("conv"))?;
let fc1 = conv2d(dim, dim * exp_ratio, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(dim * exp_ratio, dim, 1, conv2d_cfg, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?.apply(&fc1)?.gelu_erf()?.apply(&fc2)?;
Ok(xs)
}))
}
fn squeeze_and_excitation(
in_channels: usize,
squeeze_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let fc1 = conv2d(in_channels, squeeze_channels, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(squeeze_channels, in_channels, 1, conv2d_cfg, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let residual = xs;
let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?;
let xs = sigmoid(&xs.apply(&fc1)?.relu()?.apply(&fc2)?)?;
residual.broadcast_mul(&xs)
}))
}
// fuses a convolutional kernel and a batchnorm layer into a convolutional layer
// based on the _fuse_bn_tensor method in timm
// see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602
fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> {
let (gamma, beta) = bn.weight_and_bias().context("no weight-bias")?;
let mu = bn.running_mean();
let sigma = (bn.running_var() + bn.eps())?.sqrt();
let gps = (gamma / sigma)?;
let bias = (beta - mu * &gps)?;
let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?;
Ok((weights, bias))
}
fn mobileone_block(
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
group_size: usize,
use_act: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let groups = if group_size == 0 {
1
} else {
in_channels / group_size
};
let padding = kernel / 2;
let conv2d_cfg = Conv2dConfig {
stride,
groups,
padding,
..Default::default()
};
let mut w = Tensor::zeros(
(out_channels, in_channels / groups, kernel, kernel),
DType::F32,
vb.device(),
)?;
let dim = out_channels;
let mut b = Tensor::zeros(dim, DType::F32, vb.device())?;
let conv_kxk_bn = batch_norm(dim, 1e-5, vb.pp("conv_kxk.0.bn"));
let conv_kxk = conv2d_no_bias(
in_channels,
out_channels,
kernel,
conv2d_cfg,
vb.pp("conv_kxk.0.conv"),
);
if let (Ok(conv), Ok(bn)) = (conv_kxk, conv_kxk_bn) {
let (wk, bk) = fuse_conv_bn(conv.weight(), bn)?;
w = (w + wk)?;
b = (b + bk)?;
};
let conv_scale_bn = batch_norm(dim, 1e-5, vb.pp("conv_scale.bn"));
let conv_scale = conv2d_no_bias(
in_channels,
out_channels,
1,
conv2d_cfg,
vb.pp("conv_scale.conv"),
);
if let (Ok(conv), Ok(bn)) = (conv_scale, conv_scale_bn) {
let (ws, bs) = fuse_conv_bn(conv.weight(), bn)?;
// pad to 3x3
let ws = ws
.pad_with_zeros(D::Minus1, 1, 1)?
.pad_with_zeros(D::Minus2, 1, 1)?;
w = (w + ws)?;
b = (b + bs)?;
};
let se = squeeze_and_excitation(out_channels, out_channels / 16, vb.pp("se"));
// read and reparameterize the identity bn into wi and bi
let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"));
if let Ok(id_bn) = identity_bn {
let mut weights: Vec<f32> = vec![0.0; w.elem_count()];
let id = in_channels / groups;
// See https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L809
for i in 0..in_channels {
if kernel > 1 {
weights[i * kernel * kernel + 4] = 1.0;
} else {
weights[i * (id + 1)] = 1.0;
}
}
let weights = &Tensor::from_vec(weights, w.shape(), w.device())?;
let (wi, bi) = fuse_conv_bn(weights, id_bn)?;
w = (w + wi)?;
b = (b + bi)?;
};
let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg);
Ok(Func::new(move |xs| {
let mut xs = xs.apply(&reparam_conv)?;
if let Ok(f) = &se {
xs = xs.apply(f)?;
}
if use_act {
xs = xs.gelu_erf()?;
};
Ok(xs)
}))
}
fn repmixer(dim: usize, kernel: usize, vb: VarBuilder) -> Result<Func<'static>> {
let gamma = vb.get((dim, 1, 1), "layer_scale.gamma")?;
let norm = mobileone_block(dim, dim, kernel, 1, 1, false, vb.pp("norm"))?;
let mixer = mobileone_block(dim, dim, kernel, 1, 1, false, vb.pp("mixer"))?;
Ok(Func::new(move |xs| {
let residual = xs.clone();
let xs = (xs.apply(&mixer)? - xs.apply(&norm)?)?;
let xs = xs.broadcast_mul(&gamma.reshape((1, (), 1, 1))?)?;
let xs = (xs + residual)?;
Ok(xs)
}))
}
fn repmixer_block(dim: usize, exp_ratio: usize, vb: VarBuilder) -> Result<Func<'static>> {
let gamma = vb.get((dim, 1, 1), "layer_scale.gamma")?;
let token_mixer = repmixer(dim, 3, vb.pp("token_mixer"))?;
let mlp = conv_mlp(dim, exp_ratio, vb.pp("mlp"))?;
Ok(Func::new(move |xs| {
let residual = xs.apply(&token_mixer)?;
let mut xs = residual.apply(&mlp)?;
xs = xs.broadcast_mul(&gamma.reshape((1, (), 1, 1))?)?;
let xs = (xs + residual)?;
Ok(xs)
}))
}
fn positional_encoding(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 1,
padding: 3,
groups: dim,
..Default::default()
};
let conv = conv2d(dim, dim, 7, conv2d_cfg, vb.pp("pos_enc"))?;
Ok(Func::new(move |xs| {
let xs = (xs + xs.apply(&conv)?)?;
Ok(xs)
}))
}
fn attention(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let qkv = linear_no_bias(dim, dim * 3, vb.pp("qkv"))?;
let proj = linear(dim, dim, vb.pp("proj"))?;
let head_dim = 32;
let num_heads = dim / head_dim;
let scale = (head_dim as f64).powf(-0.5);
Ok(Func::new(move |xs| {
let xs = xs.clone();
let (b, c, h, w) = xs.dims4()?;
let n = h * w;
let xs = xs.flatten_from(2)?.transpose(D::Minus1, D::Minus2)?;
let qkv = xs
.apply(&qkv)?
.reshape((b, n, 3, num_heads, head_dim))?
.permute((2, 0, 3, 1, 4))?;
let q = qkv.get(0)?;
let k = qkv.get(1)?;
let v = qkv.get(2)?;
let q = (q * scale)?;
let att = q.matmul(&k.transpose(D::Minus2, D::Minus1)?)?;
let att = softmax(&att, D::Minus1)?;
let xs = att.matmul(&v)?;
let xs = xs.transpose(1, 2)?.reshape((b, n, c))?;
let xs = xs.apply(&proj)?;
let xs = xs.transpose(D::Minus1, D::Minus2)?.reshape((b, c, h, w))?;
Ok(xs)
}))
}
fn attention_block(dim: usize, exp_ratio: usize, vb: VarBuilder) -> Result<Func<'static>> {
let gamma1 = vb.get((dim, 1, 1), "layer_scale_1.gamma")?;
let gamma2 = vb.get((dim, 1, 1), "layer_scale_2.gamma")?;
let norm = batch_norm(dim, 1e-5, vb.pp("norm"))?;
let token_mixer = attention(dim, vb.pp("token_mixer"))?;
let mlp = conv_mlp(dim, exp_ratio, vb.pp("mlp"))?;
Ok(Func::new(move |xs| {
let xs = xs.clone();
let xs = (&xs
+ &xs
.apply_t(&norm, false)?
.apply(&token_mixer)?
.broadcast_mul(&gamma1.reshape((1, (), 1, 1))?)?)?;
let xs = (&xs
+ &xs
.apply(&mlp)?
.broadcast_mul(&gamma2.reshape((1, (), 1, 1))?)?)?;
Ok(xs)
}))
}
fn fastvit_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.blocks[idx];
let mut blocks = Vec::with_capacity(nblocks);
let dim = cfg.in_channels << idx;
let downsample = fastvit_patch_embed(dim / 2, dim, cfg.lkc_use_act, vb.pp("downsample"));
for block_idx in 0..nblocks {
let block = if cfg.attn && idx == 3 {
attention_block(dim, cfg.exp_ratio, vb.pp(format!("blocks.{block_idx}")))?
} else {
repmixer_block(dim, cfg.exp_ratio, vb.pp(format!("blocks.{block_idx}")))?
};
blocks.push(block);
}
let pos_emb = positional_encoding(dim, vb.pp("pos_emb"));
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
if let Ok(ds) = &downsample {
xs = xs.apply(ds)?;
}
if let Ok(pos) = &pos_emb {
xs = xs.apply(pos)?;
}
for block in blocks.iter() {
xs = xs.apply(block)?;
}
Ok(xs)
}))
}
fn fastvit_patch_embed(
in_channels: usize,
out_channels: usize,
use_act: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let lk = conv_norm(in_channels, out_channels, 7, 2, vb.pp("proj.0.large_conv"))?;
let sk = conv_norm(in_channels, out_channels, 3, 2, vb.pp("proj.0.small_conv"))?;
let se = squeeze_and_excitation(out_channels, out_channels / 4, vb.pp("proj.0.se"));
let mb = mobileone_block(out_channels, out_channels, 1, 1, 0, true, vb.pp("proj.1"))?;
Ok(Func::new(move |xs| {
let mut xs = (xs.apply(&lk)? + xs.apply(&sk)?)?;
if let Ok(f) = &se {
xs = xs.apply(f)?;
}
if use_act {
xs = xs.gelu_erf()?;
};
let xs = xs.apply(&mb)?;
Ok(xs)
}))
}
fn fastvit_stem(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let mb0 = mobileone_block(in_channels, out_channels, 3, 2, 0, true, vb.pp(0))?;
let mb1 = mobileone_block(out_channels, out_channels, 3, 2, 1, true, vb.pp(1))?;
let mb2 = mobileone_block(out_channels, out_channels, 1, 1, 0, true, vb.pp(2))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&mb0)?.apply(&mb1)?.apply(&mb2)?;
Ok(xs)
}))
}
// Build a fastvit model for a given configuration.
fn fastvit_model(cfg: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let linear = linear(cfg.in_channels * 16, nclasses, vb.pp("head.fc"))?;
Some(linear)
}
};
let stem = fastvit_stem(3, cfg.in_channels, vb.pp("stem"))?;
let final_conv = mobileone_block(
cfg.in_channels * 8,
cfg.in_channels * 16,
3,
1,
1,
true,
vb.pp("final_conv"),
)?;
let vb = vb.pp("stages");
let stage1 = fastvit_stage(cfg, 0, vb.pp(0))?;
let stage2 = fastvit_stage(cfg, 1, vb.pp(1))?;
let stage3 = fastvit_stage(cfg, 2, vb.pp(2))?;
let stage4 = fastvit_stage(cfg, 3, vb.pp(3))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.apply(&stage4)?
.apply(&final_conv)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.mean(D::Minus2)?.mean(D::Minus1)?.apply(cls),
}
}))
}
pub fn fastvit(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
fastvit_model(cfg, Some(nclasses), vb)
}
pub fn fastvit_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
fastvit_model(cfg, None, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/codegeex4_9b.rs | candle-transformers/src/models/codegeex4_9b.rs | //! CodeGeeX4 - A multi-language code generation model
//!
//! A Pre-Trained Model For Code Generation with Multilingual Evaluations on HumanEval-X"
//!
//! - 📝 [Arxiv](https://arxiv.org/abs/2303.17568)
//! - 💻 [GitHub](https://github.com/THUDM/CodeGeeX)
//!
use crate::models::with_tracing::{linear_b as linear, Linear};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
fn default_one() -> usize {
1
}
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub struct Config {
pub num_layers: usize,
pub padded_vocab_size: usize,
pub hidden_size: usize,
pub ffn_hidden_size: usize,
pub kv_channels: usize,
pub num_attention_heads: usize,
pub seq_length: usize,
pub layernorm_epsilon: f64,
pub rmsnorm: bool,
pub apply_residual_connection_post_layernorm: bool,
pub post_layer_norm: bool,
pub add_bias_linear: bool,
pub add_qkv_bias: bool,
pub bias_dropout_fusion: bool,
pub multi_query_attention: bool,
pub multi_query_group_num: usize,
pub apply_query_key_layer_scaling: bool,
pub attention_softmax_in_fp32: bool,
pub fp32_residual_connection: bool,
#[serde(default = "default_one")]
pub rope_ratio: usize,
}
impl Config {
pub fn codegeex4() -> Self {
Self {
num_layers: 40,
padded_vocab_size: 151552,
hidden_size: 4096,
ffn_hidden_size: 13696,
kv_channels: 128,
num_attention_heads: 32,
seq_length: 131072,
layernorm_epsilon: 1e-5,
rmsnorm: true,
apply_residual_connection_post_layernorm: false,
post_layer_norm: true,
add_bias_linear: false,
add_qkv_bias: true,
bias_dropout_fusion: true,
multi_query_attention: true,
multi_query_group_num: 2,
apply_query_key_layer_scaling: true,
attention_softmax_in_fp32: true,
fp32_residual_connection: false,
rope_ratio: 500,
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
cache: Tensor,
}
impl RotaryEmbedding {
fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> {
let rotary_dim = cfg.kv_channels;
let n_elem = rotary_dim / 2;
let base = 10_000f64 * cfg.rope_ratio as f64;
let inv_freq: Vec<_> = (0..n_elem)
.step_by(2)
.map(|i| 1f32 / base.powf(i as f64 / n_elem as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)?
.to_dtype(dtype)
.expect("unalbe to dytpe in Rotray Embedding new")
.reshape((cfg.seq_length, 1))?;
let freqs = t.matmul(&inv_freq)?;
let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?;
Ok(Self { cache })
}
fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (seqlen, _b, np, _hn) = xs.dims4()?;
let cache = self.cache.narrow(0, seqlen_offset, seqlen)?;
let rot_dim = cache.dim(D::Minus2)? * 2;
let (xs, xs_pass) = (
xs.narrow(D::Minus1, 0, rot_dim)?,
xs.narrow(D::Minus1, rot_dim, rot_dim)?,
);
let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?;
let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?;
let (xshaped0, xshaped1) = (
xshaped.i((.., .., .., .., 0))?,
xshaped.i((.., .., .., .., 1))?,
);
let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?);
let xs_out = Tensor::stack(
&[
(xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?,
(xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?,
],
D::Minus1,
)?;
let xs_out = xs_out.flatten_from(3)?;
Tensor::cat(&[xs_out, xs_pass], D::Minus1)
}
}
#[derive(Debug, Clone)]
struct CoreAttention {
coeff: Option<f64>,
norm_factor: f64,
dtype: DType,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32, dtype: DType) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true.to_dtype(dtype)?, on_false)?;
Ok(m)
}
impl CoreAttention {
fn new(layer_number: usize, cfg: &Config, dtype: DType) -> Result<Self> {
let norm_factor = (cfg.kv_channels as f64).sqrt();
let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling {
let coeff = f64::max(1.0, layer_number as f64);
(norm_factor * coeff, Some(coeff))
} else {
(norm_factor, None)
};
Ok(Self {
coeff,
norm_factor,
dtype,
})
}
fn forward(
&self,
query_layer: &Tensor,
key_layer: &Tensor,
value_layer: &Tensor,
attention_mask: &Option<Tensor>,
) -> Result<Tensor> {
let output_size = (
query_layer.dim(1)?, // b
query_layer.dim(2)?, // np
query_layer.dim(0)?, // sq
key_layer.dim(0)?, // sk
);
let query_layer =
query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?;
let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?;
let matmul_result = Tensor::matmul(
&query_layer.transpose(0, 1)?.contiguous()?,
&key_layer.transpose(0, 1)?.transpose(1, 2)?.contiguous()?,
)?;
let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?;
let matmul_result = match self.coeff {
None => matmul_result,
Some(coeff) => (matmul_result * coeff)?,
};
let attention_scores = match attention_mask {
Some(mask) => masked_fill(
&matmul_result,
&mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?,
f32::NEG_INFINITY,
self.dtype,
)?,
None => matmul_result,
};
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
let output_size = (
value_layer.dim(1)?,
value_layer.dim(2)?,
query_layer.dim(0)?,
value_layer.dim(3)?,
);
let value_layer =
value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?;
let attention_probs =
attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?;
let context_layer = Tensor::matmul(
&attention_probs.contiguous()?,
&value_layer.transpose(0, 1)?.contiguous()?,
)?;
let context_layer = context_layer.reshape(output_size)?;
let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?;
context_layer.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct SelfAttention {
query_key_value: Linear,
core_attention: CoreAttention,
dense: Linear,
multi_query_attention: bool,
num_attention_heads_per_partition: usize,
num_multi_query_groups_per_partition: usize,
hidden_size_per_attention_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
}
impl SelfAttention {
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let projection_size = cfg.kv_channels * cfg.num_attention_heads;
let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads;
let qkv_hidden_size = if cfg.multi_query_attention {
projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num
} else {
3 * projection_size
};
let query_key_value = linear(
cfg.hidden_size,
qkv_hidden_size,
cfg.add_bias_linear || cfg.add_qkv_bias,
vb.pp("query_key_value"),
)?;
let core_attention = CoreAttention::new(layer_number, cfg, vb.dtype())?;
let dense = linear(
cfg.hidden_size,
cfg.hidden_size,
cfg.add_bias_linear,
vb.pp("dense"),
)?;
Ok(Self {
query_key_value,
core_attention,
dense,
multi_query_attention: cfg.multi_query_attention,
num_attention_heads_per_partition: cfg.num_attention_heads,
num_multi_query_groups_per_partition: cfg.multi_query_group_num,
hidden_size_per_attention_head: cfg.kv_channels,
kv_cache: None,
})
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: &Option<Tensor>,
rotary_emb: &RotaryEmbedding,
) -> Result<Tensor> {
let mixed_x_layer = xs.apply(&self.query_key_value)?;
if !self.multi_query_attention {
candle::bail!("only multi_query_attention=true is supported")
}
let hpa = self.hidden_size_per_attention_head;
let query_layer =
mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?;
let key_layer = mixed_x_layer.narrow(
D::Minus1,
self.num_attention_heads_per_partition * hpa,
self.num_multi_query_groups_per_partition * hpa,
)?;
let value_layer = mixed_x_layer.narrow(
D::Minus1,
self.num_attention_heads_per_partition * hpa
+ self.num_multi_query_groups_per_partition * hpa,
self.num_multi_query_groups_per_partition * hpa,
)?;
let query_layer = query_layer.reshape((
query_layer.dim(0)?,
query_layer.dim(1)?,
self.num_attention_heads_per_partition,
hpa,
))?;
let key_layer = key_layer.reshape((
key_layer.dim(0)?,
key_layer.dim(1)?,
self.num_multi_query_groups_per_partition,
hpa,
))?;
let value_layer = value_layer.reshape((
value_layer.dim(0)?,
value_layer.dim(1)?,
self.num_multi_query_groups_per_partition,
hpa,
))?;
// Rotary embeddings.
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(0)?,
};
let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?;
let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?;
// KV cache.
let (key_layer, value_layer) = match &self.kv_cache {
None => (key_layer, value_layer),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key_layer], 0)?;
let v = Tensor::cat(&[prev_v, &value_layer], 0)?;
(k, v)
}
};
self.kv_cache = Some((key_layer.clone(), value_layer.clone()));
// Repeat KV.
let ratio =
self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition;
let key_layer = {
let (d0, d1, d2, d3) = key_layer.dims4()?;
key_layer
.unsqueeze(D::Minus2)?
.expand((d0, d1, d2, ratio, d3))?
.reshape((
d0,
d1,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
))?
};
let value_layer = {
let (d0, d1, d2, d3) = value_layer.dims4()?;
value_layer
.unsqueeze(D::Minus2)?
.expand((d0, d1, d2, ratio, d3))?
.reshape((
d0,
d1,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
))?
};
let context_layer =
self.core_attention
.forward(&query_layer, &key_layer, &value_layer, attention_mask)?;
let output = context_layer.apply(&self.dense)?;
Ok(output)
}
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone)]
struct MLP {
dense_h_to_4h: Linear,
dense_4h_to_h: Linear,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense_h_to_4h = linear(
cfg.hidden_size,
cfg.ffn_hidden_size * 2,
cfg.add_bias_linear,
vb.pp("dense_h_to_4h"),
)?;
let dense_4h_to_h = linear(
cfg.ffn_hidden_size,
cfg.hidden_size,
cfg.add_bias_linear,
vb.pp("dense_4h_to_h"),
)?;
Ok(Self {
dense_4h_to_h,
dense_h_to_4h,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense_h_to_4h)?
.apply(&candle_nn::Activation::Swiglu)?
.apply(&self.dense_4h_to_h)
}
}
#[derive(Debug, Clone)]
struct Block {
input_layernorm: candle_nn::LayerNorm,
self_attention: SelfAttention,
post_attention_layernorm: candle_nn::LayerNorm,
mlp: MLP,
apply_residual_connection_post_layernorm: bool,
}
impl Block {
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let input_layernorm = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("input_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("input_layernorm"),
)?
};
let post_attention_layernorm = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("post_attention_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("post_attention_layernorm"),
)?
};
let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
input_layernorm,
self_attention,
post_attention_layernorm,
mlp,
apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm,
})
}
fn reset_kv_cache(&mut self) {
self.self_attention.reset_kv_cache()
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: &Option<Tensor>,
rotary_emb: &RotaryEmbedding,
) -> Result<Tensor> {
let layernorm_output = xs.apply(&self.input_layernorm)?;
let attention_output =
self.self_attention
.forward(&layernorm_output, attention_mask, rotary_emb)?;
let residual = if self.apply_residual_connection_post_layernorm {
&layernorm_output
} else {
xs
};
let layernorm_input = (residual + attention_output)?;
let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?;
let mlp_output = layernorm_output.apply(&self.mlp)?;
let residual = if self.apply_residual_connection_post_layernorm {
&layernorm_output
} else {
&layernorm_input
};
mlp_output + residual
}
}
#[derive(Debug, Clone)]
struct Transformer {
layers: Vec<Block>,
final_layernorm: Option<candle_nn::LayerNorm>,
rotary_emb: RotaryEmbedding,
}
impl Transformer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_l = vb.pp("layers");
let mut layers = Vec::with_capacity(cfg.num_layers);
for layer_index in 0..cfg.num_layers {
let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?;
layers.push(block)
}
let final_layernorm = if cfg.post_layer_norm {
let ln = if cfg.rmsnorm {
candle_nn::rms_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("final_layernorm"),
)?
.into_inner()
} else {
candle_nn::layer_norm(
cfg.hidden_size,
cfg.layernorm_epsilon,
vb.pp("final_layernorm"),
)?
};
Some(ln)
} else {
None
};
let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?;
Ok(Self {
layers,
final_layernorm,
rotary_emb,
})
}
fn reset_kv_cache(&mut self) {
for block in self.layers.iter_mut() {
block.reset_kv_cache()
}
}
fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for block in self.layers.iter_mut() {
xs = block.forward(&xs, attention_mask, &self.rotary_emb)?
}
match self.final_layernorm.as_ref() {
None => Ok(xs),
Some(ln) => xs.apply(ln),
}
}
}
#[derive(Debug, Clone)]
struct Embedding {
word_embeddings: candle_nn::Embedding,
fp32_residual_connection: bool,
}
impl Embedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let word_embeddings = candle_nn::embedding(
cfg.padded_vocab_size,
cfg.hidden_size,
vb.pp("word_embeddings"),
)?;
Ok(Self {
word_embeddings,
fp32_residual_connection: cfg.fp32_residual_connection,
})
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h
if self.fp32_residual_connection {
xs.to_dtype(candle::DType::F32)
} else {
xs.contiguous()
}
}
}
#[derive(Debug, Clone)]
pub struct Model {
embedding: Embedding,
encoder: Transformer,
output_layer: Linear,
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("transformer");
let embedding = Embedding::new(cfg, vb.pp("embedding"))?;
let encoder = Transformer::new(cfg, vb.pp("encoder"))?;
let output_layer = linear(
cfg.hidden_size,
cfg.padded_vocab_size,
false,
vb.pp("output_layer"),
)?;
Ok(Self {
embedding,
encoder,
output_layer,
})
}
pub fn reset_kv_cache(&mut self) {
self.encoder.reset_kv_cache()
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_b_size, seq_len) = xs.dims2()?;
let input_embeds = xs.apply(&self.embedding)?;
let attention_mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
let xs = self.encoder.forward(&input_embeds, &attention_mask)?;
let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?;
Ok(lm_logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/trocr.rs | candle-transformers/src/models/trocr.rs | //! TrOCR model implementation.
//!
//! TrOCR is a Transformer-based OCR model that uses a Vision Transformer encoder
//! and a BART-like decoder for optical character recognition.
//!
//! Key characteristics:
//! - Vision Transformer encoder for image processing
//! - BART-style decoder for text generation
//! - Learned positional embeddings
//! - Layer normalization and self-attention
//!
//! References:
//! - [Paper](https://arxiv.org/abs/2109.10282)
//! - [Model Card](https://huggingface.co/microsoft/trocr-base-handwritten)
//!
use crate::models::vit::{Config, Embeddings, Encoder};
use candle::{DType, Result, Tensor};
use candle_nn::{
embedding, layer_norm, linear_no_bias, Embedding, LayerNorm, Linear, Module, VarBuilder,
};
fn default_tie_word_embeddings() -> bool {
true
}
fn default_use_learned_position_embeddings() -> bool {
true
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct TrOCRConfig {
pub vocab_size: usize,
pub d_model: usize,
pub cross_attention_hidden_size: usize,
pub decoder_layers: usize,
pub decoder_attention_heads: usize,
pub decoder_ffn_dim: usize,
pub activation_function: candle_nn::Activation,
pub max_position_embeddings: usize,
pub dropout: f64,
pub attention_dropout: f64,
pub activation_dropout: f64,
pub decoder_start_token_id: u32,
pub init_std: f64,
pub decoder_layerdrop: f64,
pub use_cache: bool,
pub scale_embedding: bool,
pub pad_token_id: usize,
pub bos_token_id: usize,
pub eos_token_id: u32,
pub decoder_vocab_size: Option<usize>,
#[serde(default = "default_use_learned_position_embeddings")]
pub use_learned_position_embeddings: bool,
#[serde(default = "default_tie_word_embeddings")]
pub tie_word_embeddings: bool,
}
impl Default for TrOCRConfig {
fn default() -> Self {
Self {
vocab_size: 50265,
d_model: 1024,
cross_attention_hidden_size: 768,
decoder_layers: 12,
decoder_attention_heads: 16,
decoder_ffn_dim: 4096,
activation_function: candle_nn::Activation::Gelu,
max_position_embeddings: 512,
dropout: 0.1,
attention_dropout: 0.0,
activation_dropout: 0.0,
decoder_start_token_id: 2,
init_std: 0.02,
decoder_layerdrop: 0.0,
use_cache: true,
scale_embedding: false,
pad_token_id: 1,
bos_token_id: 0,
eos_token_id: 2,
decoder_vocab_size: Some(50265),
use_learned_position_embeddings: true,
tie_word_embeddings: true,
}
}
}
#[derive(Debug, Clone)]
struct TrOCRLearnedPositionalEmbedding {
offset: usize,
weights: Embedding,
}
impl TrOCRLearnedPositionalEmbedding {
fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> {
let offset: usize = 2;
let num_embeddings = cfg.max_position_embeddings;
let embedding_dim = cfg.d_model;
let weights = embedding(num_embeddings + offset, embedding_dim, vb)?;
Ok(Self { offset, weights })
}
fn new_sinusoidal(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> {
// https://github.com/huggingface/transformers/blob/58e3d23e97078f361a533b9ec4a6a2de674ea52a/src/transformers/models/trocr/modeling_trocr.py#L81
let embedding_dim = cfg.d_model;
let half_dim = embedding_dim / 2;
let num_positions = cfg.max_position_embeddings + cfg.pad_token_id + 1;
let dev = vb.device();
let inv_freq: Vec<_> = (0..half_dim)
.map(|i| 1f32 / 10000f32.powf(i as f32 / (half_dim - 1) as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, num_positions as u32, dev)?
.to_dtype(DType::F32)?
.reshape((num_positions, 1))?;
let freqs = t.matmul(&inv_freq)?;
let emb = Tensor::cat(&[freqs.sin()?, freqs.cos()?], 1)?;
let emb = Tensor::cat(
&[
emb.narrow(0, 0, cfg.pad_token_id)?,
Tensor::zeros((1, embedding_dim), DType::F32, dev)?,
emb.narrow(0, cfg.pad_token_id + 1, cfg.max_position_embeddings)?,
],
0,
)?
.contiguous()?;
let emb = Embedding::new(emb, embedding_dim);
Ok(Self {
offset: cfg.pad_token_id + 1,
weights: emb,
})
}
fn forward(&mut self, input_ids: &Tensor, past_key_values_length: u32) -> Result<Tensor> {
let (b_sz, seq_len) = input_ids.dims2()?;
let positions = Tensor::arange(
past_key_values_length,
seq_len as u32 + past_key_values_length,
input_ids.device(),
)?
.expand((b_sz, seq_len))?;
let positions =
positions.broadcast_add(&Tensor::new(self.offset as u32, input_ids.device())?)?;
self.weights.forward(&positions)
}
}
#[derive(Debug, Clone)]
struct TrOCRAttention {
head_dim: usize,
num_heads: usize,
is_decoder: bool,
scaling: f64,
k_proj: Linear,
v_proj: Linear,
q_proj: Linear,
out_proj: Linear,
kv_cache: Option<(Tensor, Tensor)>,
}
impl TrOCRAttention {
fn load(
vb: VarBuilder,
cfg: &TrOCRConfig,
kdim: Option<usize>,
vdim: Option<usize>,
) -> Result<Self> {
let embed_dim = cfg.d_model;
let num_heads = cfg.decoder_attention_heads;
let head_dim = embed_dim / num_heads;
let kdim = kdim.unwrap_or(embed_dim);
let vdim = vdim.unwrap_or(embed_dim);
let k_proj = linear_no_bias(kdim, embed_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(vdim, embed_dim, vb.pp("v_proj"))?;
let q_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("q_proj"))?;
let out_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("out_proj"))?;
Ok(Self {
head_dim,
num_heads,
is_decoder: true,
scaling: 1. / (head_dim as f64).sqrt(),
k_proj,
v_proj,
q_proj,
out_proj,
kv_cache: None,
})
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
fn _shape(&self, tensor: &Tensor, bsz: usize) -> Result<Tensor> {
tensor
.reshape((bsz, (), self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
fn forward(
&mut self,
xs: &Tensor,
kv_states: Option<&Tensor>,
attn_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_sz, tgt_len, _) = xs.dims3()?;
let query_states = (xs.apply(&self.q_proj)? * self.scaling)?;
let (key_states, value_states) = match kv_states {
None => {
let key_states = self._shape(&xs.apply(&self.k_proj)?, b_sz)?;
let value_states = self._shape(&xs.apply(&self.v_proj)?, b_sz)?;
if self.is_decoder {
let kv_states = match &self.kv_cache {
None => (key_states, value_states),
Some((p_key_states, p_value_states)) => {
let key_states = Tensor::cat(&[p_key_states, &key_states], 2)?;
let value_states = Tensor::cat(&[p_value_states, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some(kv_states.clone());
kv_states
} else {
(key_states, value_states)
}
}
Some(kv_states) => {
let key_states = self._shape(&kv_states.apply(&self.k_proj)?, b_sz)?;
let value_states = self._shape(&kv_states.apply(&self.v_proj)?, b_sz)?;
(key_states, value_states)
}
};
let proj_shape = (b_sz * self.num_heads, (), self.head_dim);
let query_states = self._shape(&query_states, b_sz)?.reshape(proj_shape)?;
let key_states = key_states.reshape(proj_shape)?;
let value_states = value_states.reshape(proj_shape)?;
let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?;
let attn_weights = match attn_mask {
None => attn_weights,
Some(attn_mask) => attn_weights.broadcast_add(attn_mask)?,
};
let attn_probs = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_probs.matmul(&value_states)?;
attn_output
.reshape((b_sz, self.num_heads, tgt_len, self.head_dim))?
.transpose(1, 2)?
.reshape((b_sz, tgt_len, self.head_dim * self.num_heads))?
.apply(&self.out_proj)
}
}
#[derive(Debug, Clone)]
struct TrOCRDecoderLayer {
self_attn: TrOCRAttention,
activation_fn: candle_nn::Activation,
self_attn_layer_norm: LayerNorm,
encoder_attn: TrOCRAttention,
encoder_attn_layer_norm: LayerNorm,
fc1: Linear,
fc2: Linear,
final_layer_norm: LayerNorm,
}
impl TrOCRDecoderLayer {
fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> {
let embed_dim = cfg.d_model;
let self_attn = TrOCRAttention::load(vb.pp("self_attn"), cfg, None, None)?;
let self_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("self_attn_layer_norm"))?;
let encoder_attn = TrOCRAttention::load(
vb.pp("encoder_attn"),
cfg,
Some(cfg.cross_attention_hidden_size),
Some(cfg.cross_attention_hidden_size),
)?;
let encoder_attn_layer_norm =
layer_norm(embed_dim, 1e-5, vb.pp("encoder_attn_layer_norm"))?;
let fc1 = linear_no_bias(embed_dim, cfg.decoder_ffn_dim, vb.pp("fc1"))?;
let fc2 = linear_no_bias(cfg.decoder_ffn_dim, embed_dim, vb.pp("fc2"))?;
let final_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("final_layer_norm"))?;
Ok(Self {
self_attn,
activation_fn: cfg.activation_function,
self_attn_layer_norm,
encoder_attn,
encoder_attn_layer_norm,
fc1,
fc2,
final_layer_norm,
})
}
fn reset_kv_cache(&mut self) {
self.self_attn.reset_kv_cache();
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: &Tensor,
encoder_hidden_states: Option<&Tensor>,
) -> Result<Tensor> {
let residual = xs.clone();
let xs = self.self_attn.forward(xs, None, Some(attention_mask))?;
let xs = (xs + residual)?;
let mut xs = self.self_attn_layer_norm.forward(&xs)?;
if let Some(encoder_hidden_states) = &encoder_hidden_states {
let residual = xs.clone();
let encoder_attention_mask = attention_mask.clone(); // TODO
xs = self.encoder_attn.forward(
&xs,
Some(encoder_hidden_states),
Some(&encoder_attention_mask),
)?;
xs = (xs + residual)?;
xs = self.encoder_attn_layer_norm.forward(&xs)?
}
let residual = xs.clone();
let xs = self.fc1.forward(&xs)?;
let xs = self.activation_fn.forward(&xs)?;
let xs = self.fc2.forward(&xs)?;
let xs = (xs + residual)?;
let xs = self.final_layer_norm.forward(&xs)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct TrOCRDecoder {
layers: Vec<TrOCRDecoderLayer>,
embed_scale: Option<f64>,
embed_tokens: Embedding,
embed_positions: TrOCRLearnedPositionalEmbedding,
}
impl TrOCRDecoder {
fn new(cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("decoder.model.decoder");
let embed_tokens = embedding(cfg.vocab_size, cfg.d_model, vb.pp("embed_tokens"))?;
let embed_positions = if cfg.use_learned_position_embeddings {
TrOCRLearnedPositionalEmbedding::load(vb.pp("embed_positions"), cfg)?
} else {
TrOCRLearnedPositionalEmbedding::new_sinusoidal(vb.pp("embed_positions"), cfg)?
};
let mut layers = Vec::with_capacity(cfg.decoder_layers);
let vb_l = vb.pp("layers");
for idx in 0..cfg.decoder_layers {
let layer = TrOCRDecoderLayer::load(vb_l.pp(idx), cfg)?;
layers.push(layer)
}
let embed_scale = if cfg.scale_embedding {
Some((cfg.d_model as f64).sqrt())
} else {
None
};
Ok(Self {
layers,
embed_scale,
embed_tokens,
embed_positions,
})
}
fn reset_kv_cache(&mut self) {
self.layers.iter_mut().for_each(|l| l.reset_kv_cache())
}
pub fn forward(
&mut self,
xs: &Tensor,
encoder_xs: Option<&Tensor>,
past_kv_len: usize,
attn_mask: &Tensor,
) -> Result<Tensor> {
let embed_pos = self.embed_positions.forward(xs, past_kv_len as u32)?;
let xs = xs.apply(&self.embed_tokens)?;
let xs = match self.embed_scale {
None => xs,
Some(scale) => (xs * scale)?,
};
let mut xs = xs.broadcast_add(&embed_pos)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attn_mask, encoder_xs)?;
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct TrOCREncoder {
embeddings: Embeddings,
encoder: Encoder,
layernorm: LayerNorm,
}
impl TrOCREncoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_v = vb.pp("encoder");
let embeddings = Embeddings::new(cfg, false, vb_v.pp("embeddings"))?;
let encoder = Encoder::new(cfg, vb_v.pp("encoder"))?;
let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_v.pp("layernorm"))?;
Ok(Self {
embeddings,
encoder,
layernorm,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let embedding_output = self.embeddings.forward(xs, None, false)?;
let encoder_outputs = self.encoder.forward(&embedding_output)?;
self.layernorm.forward(&encoder_outputs)
}
}
#[derive(Debug, Clone)]
pub struct TrOCRForCausalLM {
decoder: TrOCRDecoder,
output_projection: Linear,
}
impl TrOCRForCausalLM {
pub fn new(decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> {
let decoder = TrOCRDecoder::new(decoder_cfg, vb.clone())?;
let output_projection = if decoder_cfg.tie_word_embeddings {
candle_nn::Linear::new(decoder.embed_tokens.embeddings().clone(), None)
} else {
candle_nn::linear_no_bias(
decoder_cfg.d_model,
decoder_cfg.vocab_size,
vb.pp("decoder.output_projection"),
)?
};
Ok(Self {
decoder,
output_projection,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
encoder_xs: Option<&Tensor>,
past_kv_len: usize,
attn_mask: &Tensor,
) -> Result<Tensor> {
let xs = self
.decoder
.forward(xs, encoder_xs, past_kv_len, attn_mask)?;
let xs = xs.apply(&self.output_projection)?;
Ok(xs)
}
fn reset_kv_cache(&mut self) {
self.decoder.reset_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct TrOCRModel {
encoder: TrOCREncoder,
decoder: TrOCRForCausalLM,
}
impl TrOCRModel {
pub fn new(encoder_cfg: &Config, decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> {
let encoder = TrOCREncoder::new(encoder_cfg, vb.clone())?;
let decoder = TrOCRForCausalLM::new(decoder_cfg, vb)?;
Ok(Self { encoder, decoder })
}
pub fn encoder(&mut self) -> &mut TrOCREncoder {
&mut self.encoder
}
pub fn decoder(&mut self) -> &mut TrOCRForCausalLM {
&mut self.decoder
}
pub fn decode(
&mut self,
xs: &Tensor,
encoder_xs: &Tensor,
past_kv_len: usize,
) -> Result<Tensor> {
let seq_len = xs.dim(1)?;
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_vec(mask, (seq_len, seq_len), xs.device())?;
self.decoder
.forward(xs, Some(encoder_xs), past_kv_len, &mask)
}
pub fn reset_kv_cache(&mut self) {
self.decoder.reset_kv_cache();
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/dac.rs | candle-transformers/src/models/dac.rs | //! Implementation of the Descript Audio Codec (DAC) model
//!
//! See: [Descript Audio Codec](https://github.com/descriptinc/descript-audio-codec)
//!
/// An efficient neural codec for compressing/decompressing audio
///
use crate::models::encodec;
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{Conv1d, Conv1dConfig, ConvTranspose1d, ConvTranspose1dConfig, VarBuilder};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub num_codebooks: usize,
pub model_bitrate: u32,
pub codebook_size: usize,
pub latent_dim: usize,
pub frame_rate: u32,
pub sampling_rate: u32,
}
#[derive(Debug, Clone)]
pub struct Snake1d {
alpha: Tensor,
}
impl Snake1d {
pub fn new(channels: usize, vb: VarBuilder) -> Result<Self> {
let alpha = vb.get((1, channels, 1), "alpha")?;
Ok(Self { alpha })
}
}
impl candle::Module for Snake1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs_shape = xs.shape();
let xs = xs.flatten_from(2)?;
let sin = self.alpha.broadcast_mul(&xs)?.sin()?;
let sin = (&sin * &sin)?;
(xs + (&self.alpha + 1e-9)?.recip()?.broadcast_mul(&sin)?)?.reshape(xs_shape)
}
}
#[derive(Debug, Clone)]
pub struct ResidualUnit {
snake1: Snake1d,
conv1: Conv1d,
snake2: Snake1d,
conv2: Conv1d,
}
impl ResidualUnit {
pub fn new(dim: usize, dilation: usize, vb: VarBuilder) -> Result<Self> {
let pad = ((7 - 1) * dilation) / 2;
let vb = vb.pp("block");
let snake1 = Snake1d::new(dim, vb.pp(0))?;
let cfg1 = Conv1dConfig {
dilation,
padding: pad,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(dim, dim, 7, cfg1, vb.pp(1))?;
let snake2 = Snake1d::new(dim, vb.pp(2))?;
let conv2 = encodec::conv1d_weight_norm(dim, dim, 1, Default::default(), vb.pp(3))?;
Ok(Self {
snake1,
conv1,
snake2,
conv2,
})
}
}
impl candle::Module for ResidualUnit {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs
.apply(&self.snake1)?
.apply(&self.conv1)?
.apply(&self.snake2)?
.apply(&self.conv2)?;
let pad = (xs.dim(D::Minus1)? - ys.dim(D::Minus1)?) / 2;
if pad > 0 {
&ys + xs.narrow(D::Minus1, pad, ys.dim(D::Minus1)?)
} else {
ys + xs
}
}
}
#[derive(Debug, Clone)]
pub struct EncoderBlock {
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
snake1: Snake1d,
conv1: Conv1d,
}
impl EncoderBlock {
pub fn new(dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("block");
let res1 = ResidualUnit::new(dim / 2, 1, vb.pp(0))?;
let res2 = ResidualUnit::new(dim / 2, 3, vb.pp(1))?;
let res3 = ResidualUnit::new(dim / 2, 9, vb.pp(2))?;
let snake1 = Snake1d::new(dim / 2, vb.pp(3))?;
let cfg1 = Conv1dConfig {
stride,
padding: stride.div_ceil(2),
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(dim / 2, dim, 2 * stride, cfg1, vb.pp(4))?;
Ok(Self {
res1,
res2,
res3,
snake1,
conv1,
})
}
}
impl candle::Module for EncoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)?
.apply(&self.snake1)?
.apply(&self.conv1)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
conv1: Conv1d,
blocks: Vec<EncoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl candle::Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
impl Encoder {
pub fn new(
mut d_model: usize,
strides: &[usize],
d_latent: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(1, d_model, 7, cfg1, vb.pp(0))?;
let mut blocks = Vec::with_capacity(strides.len());
for (block_idx, stride) in strides.iter().enumerate() {
d_model *= 2;
let block = EncoderBlock::new(d_model, *stride, vb.pp(block_idx + 1))?;
blocks.push(block)
}
let snake1 = Snake1d::new(d_model, vb.pp(strides.len() + 1))?;
let cfg2 = Conv1dConfig {
padding: 1,
..Default::default()
};
let conv2 =
encodec::conv1d_weight_norm(d_model, d_latent, 3, cfg2, vb.pp(strides.len() + 2))?;
Ok(Self {
conv1,
blocks,
snake1,
conv2,
})
}
}
#[derive(Debug, Clone)]
pub struct DecoderBlock {
snake1: Snake1d,
conv_tr1: ConvTranspose1d,
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
}
impl DecoderBlock {
pub fn new(in_dim: usize, out_dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("block");
let snake1 = Snake1d::new(in_dim, vb.pp(0))?;
let cfg = ConvTranspose1dConfig {
stride,
padding: stride.div_ceil(2),
..Default::default()
};
let conv_tr1 = encodec::conv_transpose1d_weight_norm(
in_dim,
out_dim,
2 * stride,
true,
cfg,
vb.pp(1),
)?;
let res1 = ResidualUnit::new(out_dim, 1, vb.pp(2))?;
let res2 = ResidualUnit::new(out_dim, 3, vb.pp(3))?;
let res3 = ResidualUnit::new(out_dim, 9, vb.pp(4))?;
Ok(Self {
snake1,
conv_tr1,
res1,
res2,
res3,
})
}
}
impl candle_nn::Module for DecoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.snake1)?
.apply(&self.conv_tr1)?
.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)
}
}
#[derive(Debug, Clone)]
pub struct Decoder {
conv1: Conv1d,
blocks: Vec<DecoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl Decoder {
pub fn new(
in_c: usize,
mut channels: usize,
rates: &[usize],
d_out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("model");
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(in_c, channels, 7, cfg1, vb.pp(0))?;
let mut blocks = Vec::with_capacity(rates.len());
for (idx, stride) in rates.iter().enumerate() {
let block = DecoderBlock::new(channels, channels / 2, *stride, vb.pp(idx + 1))?;
channels /= 2;
blocks.push(block)
}
let snake1 = Snake1d::new(channels, vb.pp(rates.len() + 1))?;
let conv2 = encodec::conv1d_weight_norm(channels, d_out, 7, cfg1, vb.pp(rates.len() + 2))?;
Ok(Self {
conv1,
blocks,
snake1,
conv2,
})
}
}
impl candle::Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
#[allow(unused)]
#[derive(Clone, Debug)]
pub struct VectorQuantizer {
in_proj: Conv1d,
out_proj: Conv1d,
codebook: candle_nn::Embedding,
}
impl VectorQuantizer {
pub fn new(in_dim: usize, cb_size: usize, cb_dim: usize, vb: VarBuilder) -> Result<Self> {
let in_proj =
encodec::conv1d_weight_norm(in_dim, cb_dim, 1, Default::default(), vb.pp("in_proj"))?;
let out_proj =
encodec::conv1d_weight_norm(cb_dim, in_dim, 1, Default::default(), vb.pp("out_proj"))?;
let codebook = candle_nn::embedding(cb_size, cb_dim, vb.pp("codebook"))?;
Ok(Self {
in_proj,
out_proj,
codebook,
})
}
pub fn embed_code(&self, embed_id: &Tensor) -> Result<Tensor> {
embed_id.apply(&self.codebook)
}
pub fn decode_code(&self, embed_id: &Tensor) -> Result<Tensor> {
self.embed_code(embed_id)?.transpose(1, 2)
}
}
#[derive(Clone, Debug)]
pub struct ResidualVectorQuantizer {
quantizers: Vec<VectorQuantizer>,
}
impl ResidualVectorQuantizer {
pub fn new(
input_dim: usize,
n_codebooks: usize,
cb_size: usize,
cb_dim: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = &vb.pp("quantizers");
let quantizers = (0..n_codebooks)
.map(|i| VectorQuantizer::new(input_dim, cb_size, cb_dim, vb.pp(i)))
.collect::<Result<Vec<_>>>()?;
Ok(Self { quantizers })
}
#[allow(clippy::wrong_self_convention)]
pub fn from_codes(&self, codes: &Tensor) -> Result<Tensor> {
let mut sum = None;
for (idx, quantizer) in self.quantizers.iter().enumerate() {
let z_p_i = quantizer.decode_code(&codes.i((.., idx))?)?;
let z_q_i = z_p_i.apply(&quantizer.out_proj)?;
let s = match sum {
None => z_q_i,
Some(s) => (s + z_q_i)?,
};
sum = Some(s)
}
match sum {
Some(s) => Ok(s),
None => candle::bail!("empty codebooks"),
}
}
}
#[derive(Debug, Clone)]
pub struct Model {
pub encoder: Encoder,
pub quantizer: ResidualVectorQuantizer,
pub decoder: Decoder,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(64, &[2, 4, 8, 8], cfg.latent_dim, vb.pp("encoder"))?;
let quantizer = ResidualVectorQuantizer::new(
cfg.latent_dim,
cfg.num_codebooks,
cfg.codebook_size,
8,
vb.pp("quantizer"),
)?;
let decoder = Decoder::new(cfg.latent_dim, 1536, &[8, 8, 4, 2], 1, vb.pp("decoder"))?;
Ok(Self {
encoder,
decoder,
quantizer,
})
}
pub fn decode_codes(&self, audio_codes: &Tensor) -> Result<Tensor> {
let audio_values = self.quantizer.from_codes(audio_codes)?;
audio_values.apply(&self.decoder)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/colpali.rs | candle-transformers/src/models/colpali.rs | //! Colpali Model for text/image similarity scoring.
//!
//! Colpali combines a vision encoder with an efficient LM for retrieving content.
//!
use candle::{Module, Result, Tensor};
use candle_nn::VarBuilder;
use super::paligemma;
use candle_nn::{linear, Linear};
pub struct Model {
pub model: paligemma::Model,
pub custom_text_projection: Linear,
}
impl Model {
pub fn new(config: &paligemma::Config, vb: VarBuilder) -> Result<Self> {
let model = paligemma::Model::new(config, vb.pp("model"))?;
let custom_text_projection = linear(
config.text_config.hidden_size,
128,
vb.pp("custom_text_proj"),
)?;
Ok(Self {
model,
custom_text_projection,
})
}
pub fn forward_images(&mut self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<Tensor> {
let outputs = self
.model
.setup_without_projection(pixel_values, input_ids)?;
let outputs = self.custom_text_projection.forward(&outputs)?;
let outputs = outputs.broadcast_div(&outputs.sqr()?.sum_keepdim(2)?.sqrt()?)?;
Ok(outputs)
}
pub fn forward_text(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let outputs = self.model.forward_without_projection(input_ids)?;
let outputs = self.custom_text_projection.forward(&outputs)?;
let outputs = outputs.broadcast_div(&outputs.sqr()?.sum_keepdim(2)?.sqrt()?)?;
Ok(outputs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_mpt.rs | candle-transformers/src/models/quantized_mpt.rs | //! Quantized MPT model implementation.
//!
//! MPT (MPT-7B) is a causal transformer model series optimized for code generation.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Multi-Query Grouped Attention (MQA)
//! - Support for KV-caching
//! - Pre-computed ALiBi attention biases
//! - Support for 8-bit quantization
//!
//! References:
//! - [Replit Code Models](https://huggingface.co/replit/replit-code-v1_5-3b)
//! - [MPT-7B Implementation](https://github.com/mosaicml/llm-foundry)
//!
/// MPT model used by replit-code-v1_5-3b
/// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py
///
use crate::quantized_nn::{layer_norm_no_bias, linear_no_bias, Embedding, Linear};
pub use crate::quantized_var_builder::VarBuilder;
/// MPT model used by replit-code-v1_5-3b
/// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py
use candle::{IndexOp, Module, Result, Tensor, D};
use candle_nn::LayerNorm;
pub use super::mpt::Config;
#[derive(Debug, Clone)]
struct GroupedQueryAttention {
wqkv: Linear,
out_proj: Linear,
kv_cache: Option<(Tensor, Tensor)>,
softmax_scale: f64,
head_dim: usize,
d_model: usize,
n_heads: usize,
kv_n_heads: usize,
attn_bias: Tensor,
span: tracing::Span,
}
impl GroupedQueryAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.d_model / cfg.n_heads;
let wqkv_size = cfg.d_model + 2 * cfg.kv_n_heads * head_dim;
let wqkv = linear_no_bias(cfg.d_model, wqkv_size, vb.pp("Wqkv"))?;
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
let out_proj = linear_no_bias(cfg.d_model, cfg.d_model, vb.pp("out_proj"))?;
let attn_bias = super::mpt::build_alibi_bias(cfg)?.to_device(vb.device())?;
Ok(Self {
wqkv,
out_proj,
kv_cache: None,
softmax_scale,
head_dim,
d_model: cfg.d_model,
n_heads: cfg.n_heads,
kv_n_heads: cfg.kv_n_heads,
attn_bias,
span: tracing::span!(tracing::Level::TRACE, "gqa"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self.wqkv.forward(xs)?;
let query = qkv.narrow(2, 0, self.d_model)?;
let kv_size = self.kv_n_heads * self.head_dim;
let key = qkv.narrow(2, self.d_model, kv_size)?;
let value = qkv.narrow(2, self.d_model + kv_size, kv_size)?;
// scaled_multihead_dot_product_attention
let query = query
.reshape((b_size, seq_len, self.n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let key = key
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.permute((0, 2, 3, 1))?; // b,h,d,s
let value = value
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let (key, value) = match &self.kv_cache {
None => (key, value),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key], 3)?;
let v = Tensor::cat(&[prev_v, &value], 2)?;
(k, v)
}
};
self.kv_cache = Some((key.clone(), value.clone()));
let query = query.contiguous()?;
let key = crate::utils::repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?;
let value = crate::utils::repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?;
let attn_weights = (query.matmul(&key)? * self.softmax_scale)?;
let attn_bias = {
let s_q = query.dim(D::Minus2)?;
let s_k = key.dim(D::Minus1)?;
let (_, _, a_q, a_k) = self.attn_bias.dims4()?;
let start_q = a_q.saturating_sub(s_q);
let start_k = a_k.saturating_sub(s_k);
self.attn_bias.i((.., .., start_q.., start_k..))?
};
let attn_weights = attn_weights.broadcast_add(&attn_bias)?;
let attn_weights = match mask {
None => attn_weights,
Some(mask) => super::mpt::masked_fill(
&attn_weights,
&mask.broadcast_as(attn_weights.shape())?,
f32::NEG_INFINITY,
)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights
.matmul(&value)?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
let out = attn_output.apply(&self.out_proj)?;
Ok(out)
}
}
#[derive(Debug, Clone)]
struct Ffn {
up_proj: Linear,
down_proj: Linear,
}
impl Ffn {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden = cfg.d_model * cfg.expansion_ratio;
let up_proj = linear_no_bias(cfg.d_model, hidden, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(hidden, cfg.d_model, vb.pp("down_proj"))?;
Ok(Self { up_proj, down_proj })
}
}
impl Module for Ffn {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.up_proj)?.gelu_erf()?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct MPTBlock {
norm1: LayerNorm, // Do we need the low-precision variant?
attn: GroupedQueryAttention,
norm2: LayerNorm,
ffn: Ffn,
}
impl MPTBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let norm1 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_1"))?;
let norm2 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_2"))?;
let attn = GroupedQueryAttention::new(cfg, vb.pp("attn"))?;
let ffn = Ffn::new(cfg, vb.pp("ffn"))?;
Ok(Self {
norm1,
attn,
norm2,
ffn,
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.norm1)?;
let xs = self.attn.forward(&xs, mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?;
xs + residual
}
}
#[derive(Debug, Clone)]
pub struct Model {
wte: Embedding,
blocks: Vec<MPTBlock>,
norm_f: LayerNorm,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let wte = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("wte"))?;
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(cfg.n_layers);
for i in 0..cfg.n_layers {
let block = MPTBlock::new(cfg, vb_b.pp(i))?;
blocks.push(block)
}
let norm_f = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_f"))?;
Ok(Self {
wte,
blocks,
norm_f,
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.wte)?;
let mask = if seq_len <= 1 {
None
} else {
Some(super::mpt::get_mask(seq_len, xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?;
}
let xs = xs.apply(&self.norm_f)?;
let logits = xs
.narrow(1, seq_len - 1, 1)?
.squeeze(1)?
.matmul(&self.wte.embeddings().t()?)?
.squeeze(1)?;
Ok(logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mpt.rs | candle-transformers/src/models/mpt.rs | //! Module implementing the MPT (Multi-Purpose Transformer) model
//!
//! References:
//! - [MPT Model used by replit-code-v1_5-3b](https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py)
//! - [Configuration](https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/configuration_mpt.py)
//!
//! The model uses grouped query attention and alibi positional embeddings.
use crate::models::with_tracing::{linear_no_bias, Embedding, Linear};
/// MPT model used by replit-code-v1_5-3b
/// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, VarBuilder};
// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/configuration_mpt.py
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
pub(crate) d_model: usize,
pub(crate) n_heads: usize,
pub(crate) n_layers: usize,
pub(crate) expansion_ratio: usize,
pub(crate) max_seq_len: usize,
pub(crate) vocab_size: usize,
pub(crate) kv_n_heads: usize,
pub(crate) attn_prefix_lm: bool,
pub(crate) attn_alibi: bool,
pub(crate) attn_alibi_bias_max: usize,
}
impl Config {
pub fn replit_code_v1_5_3b() -> Self {
Self {
d_model: 3072,
n_heads: 24,
n_layers: 32,
expansion_ratio: 4,
max_seq_len: 4096,
vocab_size: 32768,
kv_n_heads: 8,
attn_prefix_lm: false,
attn_alibi: true,
attn_alibi_bias_max: 8,
}
}
pub fn is_causal(&self) -> bool {
!self.attn_prefix_lm
}
}
#[derive(Debug, Clone)]
struct GroupedQueryAttention {
wqkv: Linear,
out_proj: Linear,
kv_cache: Option<(Tensor, Tensor)>,
softmax_scale: f64,
head_dim: usize,
d_model: usize,
n_heads: usize,
kv_n_heads: usize,
attn_bias: Tensor,
span: tracing::Span,
}
impl GroupedQueryAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.d_model / cfg.n_heads;
let wqkv_size = cfg.d_model + 2 * cfg.kv_n_heads * head_dim;
let wqkv = linear_no_bias(cfg.d_model, wqkv_size, vb.pp("Wqkv"))?;
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
let out_proj = linear_no_bias(cfg.d_model, cfg.d_model, vb.pp("out_proj"))?;
let attn_bias = build_alibi_bias(cfg)?.to_device(vb.device())?;
Ok(Self {
wqkv,
out_proj,
kv_cache: None,
softmax_scale,
head_dim,
d_model: cfg.d_model,
n_heads: cfg.n_heads,
kv_n_heads: cfg.kv_n_heads,
attn_bias,
span: tracing::span!(tracing::Level::TRACE, "gqa"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self.wqkv.forward(xs)?;
let query = qkv.narrow(2, 0, self.d_model)?;
let kv_size = self.kv_n_heads * self.head_dim;
let key = qkv.narrow(2, self.d_model, kv_size)?;
let value = qkv.narrow(2, self.d_model + kv_size, kv_size)?;
// scaled_multihead_dot_product_attention
let query = query
.reshape((b_size, seq_len, self.n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let key = key
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.permute((0, 2, 3, 1))?; // b,h,d,s
let value = value
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let (key, value) = match &self.kv_cache {
None => (key, value),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key], 3)?;
let v = Tensor::cat(&[prev_v, &value], 2)?;
(k, v)
}
};
self.kv_cache = Some((key.clone(), value.clone()));
let query = query.contiguous()?;
let key = crate::utils::repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?;
let value = crate::utils::repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?;
let attn_weights = (query.matmul(&key)? * self.softmax_scale)?;
let attn_bias = {
let s_q = query.dim(D::Minus2)?;
let s_k = key.dim(D::Minus1)?;
let (_, _, a_q, a_k) = self.attn_bias.dims4()?;
let start_q = a_q.saturating_sub(s_q);
let start_k = a_k.saturating_sub(s_k);
self.attn_bias.i((.., .., start_q.., start_k..))?
};
let attn_weights = attn_weights.broadcast_add(&attn_bias)?;
let attn_weights = match mask {
None => attn_weights,
Some(mask) => masked_fill(
&attn_weights,
&mask.broadcast_as(attn_weights.shape())?,
f32::NEG_INFINITY,
)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights
.matmul(&value)?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
let out = attn_output.apply(&self.out_proj)?;
Ok(out)
}
}
#[derive(Debug, Clone)]
struct Ffn {
up_proj: Linear,
down_proj: Linear,
}
impl Ffn {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden = cfg.d_model * cfg.expansion_ratio;
let up_proj = linear_no_bias(cfg.d_model, hidden, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(hidden, cfg.d_model, vb.pp("down_proj"))?;
Ok(Self { up_proj, down_proj })
}
}
impl Module for Ffn {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.up_proj)?.gelu_erf()?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct MPTBlock {
norm1: LayerNorm, // Do we need the low-precision variant?
attn: GroupedQueryAttention,
norm2: LayerNorm,
ffn: Ffn,
}
impl MPTBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln_cfg = candle_nn::LayerNormConfig {
affine: false,
..Default::default()
};
let norm1 = layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_1"))?;
let norm2 = layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_2"))?;
let attn = GroupedQueryAttention::new(cfg, vb.pp("attn"))?;
let ffn = Ffn::new(cfg, vb.pp("ffn"))?;
Ok(Self {
norm1,
attn,
norm2,
ffn,
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.norm1)?;
let xs = self.attn.forward(&xs, mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?;
xs + residual
}
}
pub(crate) fn build_alibi_bias(cfg: &Config) -> Result<Tensor> {
let full = !cfg.is_causal();
let seq_len = cfg.max_seq_len;
let alibi_bias = Tensor::arange(1 - seq_len as i64, 1, &Device::Cpu)?;
let alibi_bias = if full {
let a1 = alibi_bias.reshape((1, 1, 1, seq_len))?;
let a2 = alibi_bias.reshape((1, 1, seq_len, 1))?;
a1.broadcast_sub(&a2)?.abs()?.neg()?
} else {
alibi_bias.reshape((1, 1, 1, seq_len))?
};
let mut n_heads2 = 1;
while n_heads2 < cfg.n_heads {
n_heads2 *= 2
}
let slopes = (1..=n_heads2)
.map(|v| 1f32 / 2f32.powf((v * cfg.attn_alibi_bias_max) as f32 / n_heads2 as f32))
.collect::<Vec<_>>();
let slopes = if n_heads2 == cfg.n_heads {
slopes
} else {
slopes
.iter()
.skip(1)
.step_by(2)
.chain(slopes.iter().step_by(2))
.take(cfg.n_heads)
.cloned()
.collect::<Vec<f32>>()
};
let slopes = Tensor::new(slopes, &Device::Cpu)?.reshape((1, (), 1, 1))?;
alibi_bias.to_dtype(DType::F32)?.broadcast_mul(&slopes)
}
#[derive(Debug, Clone)]
pub struct Model {
wte: Embedding,
blocks: Vec<MPTBlock>,
norm_f: LayerNorm,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let wte = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("wte"))?;
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(cfg.n_layers);
for i in 0..cfg.n_layers {
let block = MPTBlock::new(cfg, vb_b.pp(i))?;
blocks.push(block)
}
let ln_cfg = candle_nn::LayerNormConfig {
affine: false,
..Default::default()
};
let norm_f = candle_nn::layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_f"))?;
Ok(Self {
wte,
blocks,
norm_f,
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.wte)?;
let mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?;
}
let xs = xs.apply(&self.norm_f)?;
let logits = xs
.narrow(1, seq_len - 1, 1)?
.squeeze(1)?
.matmul(&self.wte.embeddings().t()?)?
.squeeze(1)?;
Ok(logits)
}
}
pub(crate) fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
pub(crate) fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/modernbert.rs | candle-transformers/src/models/modernbert.rs | //! ModernBERT
//!
//! ModernBERT is a modernized bidirectional encoder-only Transformer model.
//! - [Arxiv](https://arxiv.org/abs/2412.13663) "Smarter, Better, Faster, Longer: A Modern Bidirectional Encoder for Fast, Memory Efficient, and Long Context Finetuning and Inference"
//! - Upstream [GitHub repo](https://github.com/AnswerDotAI/ModernBERT).
//! - See modernbert in [candle-examples](https://github.com/huggingface/candle/tree/main/candle-examples/) for runnable code
//!
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{
embedding, layer_norm_no_bias, linear, linear_no_bias, ops::softmax, Embedding, LayerNorm,
Linear, Module, VarBuilder,
};
use serde::Deserialize;
use core::f32;
use std::collections::HashMap;
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub layer_norm_eps: f64,
pub pad_token_id: u32,
pub global_attn_every_n_layers: usize,
pub global_rope_theta: f64,
pub local_attention: usize,
pub local_rope_theta: f64,
#[serde(default)]
#[serde(flatten)]
pub classifier_config: Option<ClassifierConfig>,
}
#[derive(Debug, Clone, Deserialize, PartialEq, Copy, Default)]
#[serde(rename_all = "lowercase")]
pub enum ClassifierPooling {
#[default]
CLS,
MEAN,
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct ClassifierConfig {
pub id2label: HashMap<String, String>,
pub label2id: HashMap<String, String>,
pub classifier_pooling: ClassifierPooling,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, config: &Config, rope_theta: f64, dev: &Device) -> Result<Self> {
let dim = config.hidden_size / config.num_attention_heads;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let max_seq_len = config.max_position_embeddings;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(&self, q: &Tensor, k: &Tensor) -> Result<(Tensor, Tensor)> {
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &self.cos, &self.sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &self.cos, &self.sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Clone)]
struct ModernBertAttention {
qkv: Linear,
proj: Linear,
num_attention_heads: usize,
attention_head_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
}
impl ModernBertAttention {
fn load(vb: VarBuilder, config: &Config, rotary_emb: Arc<RotaryEmbedding>) -> Result<Self> {
let num_attention_heads = config.num_attention_heads;
let attention_head_size = config.hidden_size / config.num_attention_heads;
let qkv = linear_no_bias(config.hidden_size, config.hidden_size * 3, vb.pp("Wqkv"))?;
let proj = linear_no_bias(config.hidden_size, config.hidden_size, vb.pp("Wo"))?;
Ok(Self {
qkv,
proj,
num_attention_heads,
attention_head_size,
rotary_emb,
})
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let xs = hidden_states.clone();
let (b, seq_len, d) = xs.dims3()?;
let qkv = xs
.apply(&self.qkv)?
.reshape((
b,
seq_len,
3,
self.num_attention_heads,
self.attention_head_size,
))?
.permute((2, 0, 3, 1, 4))?;
let q = qkv.get(0)?;
let k = qkv.get(1)?;
let v = qkv.get(2)?;
let (q, k) = self.rotary_emb.apply_rotary_emb_qkv(&q, &k)?;
let scale = (self.attention_head_size as f64).powf(-0.5);
let q = (q * scale)?;
let att = q.matmul(&k.transpose(D::Minus2, D::Minus1)?)?;
let att = att.broadcast_add(attention_mask)?;
let att = softmax(&att, D::Minus1)?;
let xs = att.matmul(&v)?;
let xs = xs.transpose(1, 2)?.reshape((b, seq_len, d))?;
let xs = xs.apply(&self.proj)?;
let xs = xs.reshape((b, seq_len, d))?;
Ok(xs)
}
}
#[derive(Clone)]
pub struct ModernBertMLP {
wi: Linear,
wo: Linear,
}
impl ModernBertMLP {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let wi = linear_no_bias(
config.hidden_size,
config.intermediate_size * 2,
vb.pp("Wi"),
)?;
let wo = linear_no_bias(config.intermediate_size, config.hidden_size, vb.pp("Wo"))?;
Ok(Self { wi, wo })
}
}
impl Module for ModernBertMLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.wi)?;
let xs = xs.chunk(2, D::Minus1)?;
let xs = (&xs[0].gelu_erf()? * &xs[1])?.apply(&self.wo)?; // GeGLU
Ok(xs)
}
}
#[derive(Clone)]
pub struct ModernBertLayer {
attn: ModernBertAttention,
mlp: ModernBertMLP,
attn_norm: Option<LayerNorm>,
mlp_norm: LayerNorm,
uses_local_attention: bool,
}
impl ModernBertLayer {
fn load(
vb: VarBuilder,
config: &Config,
rotary_emb: Arc<RotaryEmbedding>,
uses_local_attention: bool,
) -> Result<Self> {
let attn = ModernBertAttention::load(vb.pp("attn"), config, rotary_emb)?;
let mlp = ModernBertMLP::load(vb.pp("mlp"), config)?;
let attn_norm = layer_norm_no_bias(
config.hidden_size,
config.layer_norm_eps,
vb.pp("attn_norm"),
)
.ok();
let mlp_norm =
layer_norm_no_bias(config.hidden_size, config.layer_norm_eps, vb.pp("mlp_norm"))?;
Ok(Self {
attn,
mlp,
attn_norm,
mlp_norm,
uses_local_attention,
})
}
fn forward(
&self,
xs: &Tensor,
global_attention_mask: &Tensor,
local_attention_mask: &Tensor,
) -> Result<Tensor> {
let residual = xs.clone();
let mut xs = xs.clone();
if let Some(norm) = &self.attn_norm {
xs = xs.apply(norm)?;
}
let attention_mask = if self.uses_local_attention {
&global_attention_mask.broadcast_add(local_attention_mask)?
} else {
global_attention_mask
};
let xs = self.attn.forward(&xs, attention_mask)?;
let xs = (xs + residual)?;
let mlp_out = xs.apply(&self.mlp_norm)?.apply(&self.mlp)?;
let xs = (xs + mlp_out)?;
Ok(xs)
}
}
#[derive(Clone)]
pub struct ModernBertHead {
dense: Linear,
norm: LayerNorm,
}
impl ModernBertHead {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear_no_bias(config.hidden_size, config.hidden_size, vb.pp("dense"))?;
let norm = layer_norm_no_bias(config.hidden_size, config.layer_norm_eps, vb.pp("norm"))?;
Ok(Self { dense, norm })
}
}
impl Module for ModernBertHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.dense)?.gelu_erf()?.apply(&self.norm)?;
Ok(xs)
}
}
#[derive(Clone)]
pub struct ModernBertDecoder {
decoder: Linear,
}
impl ModernBertDecoder {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
// The decoder weights are tied with the embeddings layer weights
let decoder_weights = vb.get(
(config.vocab_size, config.hidden_size),
"model.embeddings.tok_embeddings.weight",
)?;
let decoder_bias = vb.get(config.vocab_size, "decoder.bias")?;
let decoder = Linear::new(decoder_weights, Some(decoder_bias));
Ok(Self { decoder })
}
}
impl Module for ModernBertDecoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.decoder)?;
Ok(xs)
}
}
// Global attention mask calculated from padded token inputs
fn prepare_4d_attention_mask(
mask: &Tensor,
dtype: DType,
tgt_len: Option<usize>,
) -> Result<Tensor> {
let bsz = mask.dim(0)?;
let src_len = mask.dim(1)?;
let tgt_len = tgt_len.unwrap_or(src_len);
let expanded_mask = mask
.unsqueeze(1)?
.unsqueeze(2)?
.expand((bsz, 1, tgt_len, src_len))?
.to_dtype(dtype)?;
let inverted_mask = (1.0 - expanded_mask)?;
(inverted_mask * f32::MIN as f64)?.to_dtype(dtype)
}
// Attention mask caused by the sliding window
fn get_local_attention_mask(
seq_len: usize,
max_distance: usize,
device: &Device,
) -> Result<Tensor> {
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| {
(0..seq_len).map(move |j| {
if (j as i32 - i as i32).abs() > max_distance as i32 {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
Tensor::from_slice(&mask, (seq_len, seq_len), device)
}
// ModernBERT backbone
#[derive(Clone)]
pub struct ModernBert {
word_embeddings: Embedding,
norm: LayerNorm,
layers: Vec<ModernBertLayer>,
final_norm: LayerNorm,
local_attention_size: usize,
}
impl ModernBert {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let word_embeddings = embedding(
config.vocab_size,
config.hidden_size,
vb.pp("model.embeddings.tok_embeddings"),
)?;
let norm = layer_norm_no_bias(
config.hidden_size,
config.layer_norm_eps,
vb.pp("model.embeddings.norm"),
)?;
let global_rotary_emb = Arc::new(RotaryEmbedding::new(
vb.dtype(),
config,
config.global_rope_theta,
vb.device(),
)?);
let local_rotary_emb = Arc::new(RotaryEmbedding::new(
vb.dtype(),
config,
config.local_rope_theta,
vb.device(),
)?);
let mut layers = Vec::with_capacity(config.num_hidden_layers);
for layer_id in 0..config.num_hidden_layers {
let layer_uses_local_attention = layer_id % config.global_attn_every_n_layers != 0;
layers.push(ModernBertLayer::load(
vb.pp(format!("model.layers.{layer_id}")),
config,
if layer_uses_local_attention {
local_rotary_emb.clone()
} else {
global_rotary_emb.clone()
},
layer_uses_local_attention,
)?);
}
let final_norm = layer_norm_no_bias(
config.hidden_size,
config.layer_norm_eps,
vb.pp("model.final_norm"),
)?;
Ok(Self {
word_embeddings,
norm,
layers,
final_norm,
local_attention_size: config.local_attention,
})
}
pub fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> {
let seq_len = xs.shape().dims()[1];
let global_attention_mask =
prepare_4d_attention_mask(mask, DType::F32, None)?.to_device(xs.device())?;
let local_attention_mask =
get_local_attention_mask(seq_len, self.local_attention_size / 2, xs.device())?;
let mut xs = xs.apply(&self.word_embeddings)?.apply(&self.norm)?;
for layer in self.layers.iter() {
xs = layer.forward(&xs, &global_attention_mask, &local_attention_mask)?;
}
let xs = xs.apply(&self.final_norm)?;
Ok(xs)
}
}
// ModernBERT for the fill-mask task
#[derive(Clone)]
pub struct ModernBertForMaskedLM {
model: ModernBert,
decoder: ModernBertDecoder,
head: ModernBertHead,
}
impl ModernBertForMaskedLM {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let model = ModernBert::load(vb.clone(), config)?;
let decoder = ModernBertDecoder::load(vb.clone(), config)?;
let head = ModernBertHead::load(vb.pp("head"), config)?;
Ok(Self {
model,
decoder,
head,
})
}
pub fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> {
let xs = self
.model
.forward(xs, mask)?
.apply(&self.head)?
.apply(&self.decoder)?;
Ok(xs)
}
}
#[derive(Clone)]
pub struct ModernBertClassifier {
classifier: Linear,
}
impl ModernBertClassifier {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
// The decoder weights are tied with the embeddings layer weights
let classifier = linear(
config.hidden_size,
config
.classifier_config
.as_ref()
.map(|cc| cc.id2label.len())
.unwrap_or_default(),
vb.pp("classifier"),
)?;
Ok(Self { classifier })
}
}
impl Module for ModernBertClassifier {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.classifier)?;
softmax(&xs, D::Minus1)
}
}
#[derive(Clone)]
pub struct ModernBertForSequenceClassification {
model: ModernBert,
head: ModernBertHead,
classifier: ModernBertClassifier,
classifier_pooling: ClassifierPooling,
}
impl ModernBertForSequenceClassification {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let model = ModernBert::load(vb.clone(), config)?;
let classifier = ModernBertClassifier::load(vb.clone(), config)?;
let head = ModernBertHead::load(vb.pp("head"), config)?;
Ok(Self {
model,
head,
classifier,
classifier_pooling: config
.classifier_config
.as_ref()
.map(|cc| cc.classifier_pooling)
.unwrap_or_default(),
})
}
pub fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> {
let output = self.model.forward(xs, mask)?;
let last_hidden_state = match self.classifier_pooling {
ClassifierPooling::CLS => output.i((.., 0, ..))?.contiguous()?,
ClassifierPooling::MEAN => {
let unsqueezed_mask = &mask.unsqueeze(D::Minus1)?.to_dtype(DType::F32)?;
let sum_output = output.broadcast_mul(unsqueezed_mask)?.sum(1)?;
sum_output.broadcast_div(&mask.sum_keepdim(1)?.to_dtype(DType::F32)?)?
}
};
let xs = self
.head
.forward(&last_hidden_state)?
.apply(&self.classifier)?;
Ok(xs)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/recurrent_gemma.rs | candle-transformers/src/models/recurrent_gemma.rs | //! Recurrent Gemma model implementation
//!
//! Recurrent Gemma is a version of the Gemma language model that incorporates recurrent memory.
//! This allows the model to maintain state between predictions and have longer-range memory.
//!
//! Key characteristics:
//! - Real-gated linear recurrent units (RGLRU)
//! - 1D convolution for local context
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Grouped query attention
//!
//! References:
//! - [Gemma: Open Models Based on Gemini Technology](https://blog.google/technology/developers/gemma-open-models/)
//! - [Recurrent Memory model architecture](https://arxiv.org/abs/2402.00441)
//!
//! This implementation is based on the python version from huggingface/transformers.
//! https://github.com/huggingface/transformers/blob/b109257f4fb8b1166e7c53cc5418632014ed53a5/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py#L2
//!
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{linear_b as linear, Linear, VarBuilder};
use std::sync::Arc;
#[derive(serde::Deserialize, Debug, Clone, Copy)]
#[serde(rename_all = "snake_case")]
pub enum TemporalBlockType {
Attention,
Recurrent,
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub num_hidden_layers: usize,
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub head_dim: usize,
pub lru_width: Option<usize>,
pub attention_window_size: usize,
pub conv1d_width: usize,
pub logits_soft_cap: f64,
pub hidden_activation: candle_nn::Activation,
pub partial_rotary_factor: f64,
pub rms_norm_eps: f64,
pub rope_theta: f64,
#[serde(alias = "_block_types")]
pub block_types: Vec<TemporalBlockType>,
pub attention_bias: bool,
#[serde(default = "default_max_seq_len")]
pub max_seq_len: usize,
}
fn default_max_seq_len() -> usize {
8192
}
#[derive(Debug, Clone)]
pub(crate) struct RmsNorm {
weight: Tensor,
eps: f64,
}
impl RmsNorm {
pub(crate) fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(dim, "weight")?;
Ok(Self { weight, eps })
}
pub(crate) fn from_weight(weight: Tensor, eps: f64) -> Self {
Self { weight, eps }
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(&(&self.weight + 1.0)?)
}
}
#[derive(Debug, Clone)]
pub(crate) struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
if cfg.partial_rotary_factor != 0.5 {
candle::bail!("partial-rotary-factor {} <> 0.5", cfg.partial_rotary_factor)
}
let dim = cfg.head_dim / 2;
let max_seq_len = cfg.max_seq_len;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
pub(crate) fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
struct Mlp {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl Mlp {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let intermediate_size = cfg.intermediate_size / 2;
let gate_proj = linear(h, intermediate_size, true, vb.pp("gate_proj"))?;
let up_proj = linear(h, intermediate_size, true, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_size, h, true, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_activation,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let gate = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
(gate * xs.apply(&self.up_proj))?.apply(&self.down_proj)
}
}
// Real-Gated Linear Recurrent Unit
#[derive(Debug, Clone)]
pub(crate) struct Rglru {
pub(crate) recurrent_param: Tensor,
pub(crate) input_gate_weight: Tensor,
pub(crate) input_gate_bias: Tensor,
pub(crate) recurrent_gate_weight: Tensor,
pub(crate) recurrent_gate_bias: Tensor,
pub(crate) block_width: usize,
pub(crate) n_heads: usize,
pub(crate) recurrent_states: Option<Tensor>,
}
fn baddbmm(a: &Tensor, b: &Tensor, c: &Tensor) -> Result<Tensor> {
a.broadcast_add(&b.matmul(c)?)
}
fn softplus(xs: &Tensor) -> Result<Tensor> {
(xs.exp()? + 1.0)?.log()
}
impl Rglru {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let n_heads = cfg.num_attention_heads;
let block_width = lru_width / n_heads;
let recurrent_param = vb.get((lru_width,), "recurrent_param")?;
let input_gate_weight = vb.get((n_heads, block_width, block_width), "input_gate_weight")?;
let input_gate_bias = vb.get((n_heads, block_width), "input_gate_bias")?;
let recurrent_gate_weight =
vb.get((n_heads, block_width, block_width), "recurrent_gate_weight")?;
let recurrent_gate_bias = vb.get((n_heads, block_width), "recurrent_gate_bias")?;
Ok(Self {
recurrent_param,
input_gate_bias,
input_gate_weight,
recurrent_gate_bias,
recurrent_gate_weight,
block_width,
n_heads,
recurrent_states: None,
})
}
// https://github.com/huggingface/transformers/blob/0bd58f1ce0573c0e3269de4215a17d318add49b9/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py#L303
pub(crate) fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (b_sz, seq_len, lru_width) = xs.dims3()?;
let pos = Tensor::arange(pos as u32, (pos + seq_len) as u32, xs.device())?;
let reset = pos.eq(0u32)?.unsqueeze(1)?.unsqueeze(0)?;
let reshape_act = xs
.reshape((b_sz * seq_len, self.n_heads, self.block_width))?
.permute((1, 0, 2))?
.contiguous()?;
let res = baddbmm(
&self.input_gate_bias.unsqueeze(1)?,
&reshape_act,
&self.input_gate_weight,
)?;
let input_gate = res.transpose(0, 1)?.reshape((b_sz, seq_len, lru_width))?;
let input_gate = candle_nn::ops::sigmoid(&input_gate)?;
let res = baddbmm(
&self.recurrent_gate_bias.unsqueeze(1)?,
&reshape_act,
&self.recurrent_gate_weight,
)?;
let recurrent_gate = res.transpose(0, 1)?.reshape((b_sz, seq_len, lru_width))?;
let recurrent_gate = candle_nn::ops::sigmoid(&recurrent_gate)?;
let log_recurrent_gate =
(recurrent_gate * (-8.0))?.broadcast_mul(&softplus(&self.recurrent_param)?)?;
let recurrent_gate = log_recurrent_gate.exp()?;
let a_square = (log_recurrent_gate * 2.)?.exp()?;
// Gate the input.
let gated_inputs = (xs * input_gate)?;
let reset = reset.to_dtype(a_square.dtype())?;
let multiplier =
reset.broadcast_add(&((1.0 - &reset)?.broadcast_mul(&(1.0 - a_square)?.sqrt()?))?)?;
let normalized_x = (gated_inputs * multiplier.to_dtype(xs.dtype()))?;
let (hidden_states, recurrent_states) = rnn_scan(
&normalized_x,
&recurrent_gate,
&reset,
self.recurrent_states.as_ref(),
)?;
self.recurrent_states = Some(recurrent_states);
Ok(hidden_states)
}
}
fn rnn_scan(
hidden_states: &Tensor,
recurrent_gate: &Tensor,
reset: &Tensor,
recurrent_states: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let acc_dtype = DType::F32;
let dev = hidden_states.device();
let in_dtype = hidden_states.dtype();
let inv_reset = (1.0 - reset)?.to_dtype(recurrent_gate.dtype())?;
let recurrent_gate = recurrent_gate.broadcast_mul(&inv_reset)?;
let (c, r) = if hidden_states.dim(1)? == 1 {
match recurrent_states {
None => {
let next_state = hidden_states.i((.., 0))?.to_dtype(acc_dtype)?;
(hidden_states.clone(), next_state)
}
Some(recurrent_states) => {
let contextualized_states =
recurrent_gate.to_dtype(acc_dtype)? * recurrent_states.unsqueeze(1)?;
let contextualized_states =
(contextualized_states + hidden_states.to_dtype(acc_dtype)?)?;
let c = contextualized_states.to_dtype(in_dtype)?;
let l = contextualized_states.dim(1)?;
let r = contextualized_states.i((.., l - 1))?;
(c, r)
}
}
} else {
let mut recurrent_states = match recurrent_states {
None => Tensor::zeros(hidden_states.i((.., 0))?.shape(), acc_dtype, dev)?,
Some(r) => r.clone(),
};
let mut contextualized_states = vec![];
for t in 0..hidden_states.dim(1)? {
recurrent_states =
(recurrent_gate.i((.., t))?.to_dtype(acc_dtype)? * recurrent_states)?;
recurrent_states =
(recurrent_states + hidden_states.i((.., t))?.to_dtype(acc_dtype)?)?;
contextualized_states.push(recurrent_states.to_dtype(in_dtype)?)
}
let contextualized_states = Tensor::stack(&contextualized_states, 1)?;
(contextualized_states, recurrent_states)
};
Ok((c, r))
}
#[derive(Debug, Clone)]
struct RecurrentBlock {
linear_y: Linear,
linear_x: Linear,
linear_out: Linear,
conv_1d: candle_nn::Conv1d,
conv1d_state: Option<Tensor>,
conv1d_width: usize,
rg_lru: Rglru,
act_fn: candle_nn::Activation,
}
impl RecurrentBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let linear_y = linear(h, lru_width, true, vb.pp("linear_y"))?;
let linear_x = linear(h, lru_width, true, vb.pp("linear_x"))?;
let linear_out = linear(lru_width, h, true, vb.pp("linear_out"))?;
let conv_1d = candle_nn::conv1d(
lru_width,
lru_width,
cfg.conv1d_width,
candle_nn::Conv1dConfig {
groups: lru_width,
padding: cfg.conv1d_width - 1,
..Default::default()
},
vb.pp("conv_1d"),
)?;
let rg_lru = Rglru::new(cfg, vb.pp("rg_lru"))?;
Ok(Self {
linear_y,
linear_x,
linear_out,
conv_1d,
conv1d_state: None,
conv1d_width: cfg.conv1d_width,
rg_lru,
act_fn: cfg.hidden_activation,
})
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len, _) = xs.dims3()?;
let y_branch = xs.apply(&self.linear_y)?.apply(&self.act_fn)?;
let x_branch = xs.apply(&self.linear_x)?.transpose(1, 2)?;
let x_branch = if pos == 0 {
let x_len = x_branch.dim(D::Minus1)?;
let pad = self.conv1d_width as i64 - x_len as i64 - 1;
let padded = match pad.cmp(&0) {
std::cmp::Ordering::Equal => x_branch.clone(),
std::cmp::Ordering::Less => {
let rev_pad = (-pad) as usize;
x_branch.narrow(D::Minus1, rev_pad, x_len - rev_pad)?
}
std::cmp::Ordering::Greater => {
x_branch.pad_with_zeros(D::Minus1, pad as usize, 0)?
}
};
self.conv1d_state = Some(padded);
x_branch
.apply(&self.conv_1d)?
.narrow(D::Minus1, 0, seq_len)?
} else {
let conv_state = match self.conv1d_state.as_ref() {
None => candle::bail!("empty cache despite pos > 0"),
Some(s) => Tensor::cat(&[s, &x_branch], D::Minus1)?,
};
let w = self.conv_1d.weight().i((.., 0, ..))?;
let x_branch = conv_state.broadcast_mul(&w)?.sum(D::Minus1)?;
let x_branch = match self.conv_1d.bias() {
None => x_branch,
Some(b) => x_branch.broadcast_add(b)?,
};
let x_branch = x_branch.unsqueeze(D::Minus1)?;
self.conv1d_state = Some(conv_state.i((.., .., 1..))?);
x_branch
};
let x_branch = x_branch.transpose(1, 2)?;
let x_branch = self.rg_lru.forward(&x_branch, pos)?;
(x_branch * y_branch)?.apply(&self.linear_out)
}
}
#[derive(Debug, Clone)]
struct SdpaAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
n_heads: usize,
n_kv_heads: usize,
head_dim: usize,
hidden_size: usize,
kv_cache: Option<(Tensor, Tensor)>,
rotary_emb: Arc<RotaryEmbedding>,
}
impl SdpaAttention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let n_heads = cfg.num_attention_heads;
let n_kv_heads = cfg.num_key_value_heads;
let hd = cfg.head_dim;
let q_proj = linear(h, n_heads * hd, cfg.attention_bias, vb.pp("q_proj"))?;
let k_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("k_proj"))?;
let v_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("v_proj"))?;
let o_proj = linear(n_heads * hd, h, true, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
n_heads,
n_kv_heads,
head_dim: hd,
hidden_size: h,
kv_cache: None,
rotary_emb,
})
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
let n_rep = self.n_heads / self.n_kv_heads;
crate::utils::repeat_kv(x, n_rep)
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let (bsz, q_len, _) = xs.dims3()?;
let query_states = xs.apply(&self.q_proj)?;
let key_states = xs.apply(&self.k_proj)?;
let value_states = xs.apply(&self.v_proj)?;
let query_states = query_states
.reshape((bsz, q_len, self.n_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let query_states = query_states.chunk(2, D::Minus1)?;
let key_states = key_states.chunk(2, D::Minus1)?;
let (query_rot, key_rot) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states[0], &key_states[0], pos)?;
let query_states = Tensor::cat(&[&query_rot, &query_states[1]], D::Minus1)?.contiguous()?;
let key_states = Tensor::cat(&[&key_rot, &key_states[1]], D::Minus1)?.contiguous()?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = self.repeat_kv(key_states)?;
let value_states = self.repeat_kv(value_states)?;
let xs = {
let att = (query_states.matmul(&key_states.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if q_len == 1 {
att
} else {
match attention_mask {
None => att,
Some(mask) => att.broadcast_add(mask)?,
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
att.matmul(&value_states.contiguous()?)?
};
let xs = xs
.transpose(1, 2)?
.reshape((bsz, q_len, self.hidden_size))?;
self.o_proj.forward(&xs)
}
}
#[derive(Debug, Clone)]
enum TemporalBlock {
Recurrent(RecurrentBlock),
Attention(SdpaAttention),
}
impl TemporalBlock {
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
match self {
Self::Recurrent(b) => b.forward(xs, pos),
Self::Attention(b) => b.forward(xs, attention_mask, pos),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
temporal_pre_norm: RmsNorm,
channel_pre_norm: RmsNorm,
temporal_block: TemporalBlock,
mlp_block: Mlp,
}
impl DecoderLayer {
fn new(
block_idx: usize,
rotary_emb: Arc<RotaryEmbedding>,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let h = cfg.hidden_size;
let temporal_pre_norm = RmsNorm::new(h, cfg.rms_norm_eps, vb.pp("temporal_pre_norm"))?;
let channel_pre_norm = RmsNorm::new(h, cfg.rms_norm_eps, vb.pp("channel_pre_norm"))?;
let temporal_block = match cfg.block_types[block_idx % cfg.block_types.len()] {
TemporalBlockType::Recurrent => {
let block = RecurrentBlock::new(cfg, vb.pp("temporal_block"))?;
TemporalBlock::Recurrent(block)
}
TemporalBlockType::Attention => {
let block = SdpaAttention::new(rotary_emb, cfg, vb.pp("temporal_block"))?;
TemporalBlock::Attention(block)
}
};
let mlp_block = Mlp::new(cfg, vb.pp("mlp_block"))?;
Ok(Self {
temporal_pre_norm,
channel_pre_norm,
temporal_block,
mlp_block,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.temporal_pre_norm)?;
let xs = self.temporal_block.forward(&xs, attention_mask, pos)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.channel_pre_norm)?.apply(&self.mlp_block)?;
xs + residual
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
final_norm: RmsNorm,
lm_head: Linear,
hidden_size: usize,
logits_soft_cap: f64,
dtype: DType,
device: Device,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let vb_b = vb.pp("layers");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(idx, rotary_emb.clone(), cfg, vb_b.pp(idx))?;
layers.push(layer)
}
let final_norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("final_norm"))?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
final_norm,
lm_head,
hidden_size: cfg.hidden_size,
logits_soft_cap: cfg.logits_soft_cap,
dtype: vb.dtype(),
device: vb.device().clone(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (b_size, seq_len) = xs.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, pos)?;
Some(mask)
};
let xs = xs.apply(&self.embed_tokens)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), pos)?;
}
let logits = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.final_norm)?
.apply(&self.lm_head)?;
let logits = ((logits / self.logits_soft_cap)?.tanh()? * self.logits_soft_cap)?;
Ok(logits)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/xlm_roberta.rs | candle-transformers/src/models/xlm_roberta.rs | use crate::models::with_tracing::{linear, Linear};
use candle::{DType, Module, Result, Tensor};
use candle_nn::{
embedding, layer_norm, ops::softmax_last_dim, Activation, Embedding, LayerNorm, VarBuilder,
};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub hidden_size: usize,
pub layer_norm_eps: f64,
pub attention_probs_dropout_prob: f32,
pub hidden_dropout_prob: f32,
pub num_attention_heads: usize,
pub position_embedding_type: String,
pub intermediate_size: usize,
pub hidden_act: Activation,
pub num_hidden_layers: usize,
pub vocab_size: usize,
pub max_position_embeddings: usize,
pub type_vocab_size: usize,
pub pad_token_id: u32,
}
struct XLMRobertaEmbeddings {
word_embeddings: Embedding,
position_embeddings: Option<Embedding>,
token_type_embeddings: Embedding,
layer_norm: LayerNorm,
padding_idx: u32,
span: tracing::Span,
}
impl XLMRobertaEmbeddings {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let word_embeddings = embedding(
config.vocab_size,
config.hidden_size,
vb.pp("word_embeddings"),
)?;
let position_embeddings = embedding(
config.max_position_embeddings,
config.hidden_size,
vb.pp("position_embeddings"),
)?;
let token_type_embeddings = embedding(
config.type_vocab_size,
config.hidden_size,
vb.pp("token_type_embeddings"),
)?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
Ok(Self {
word_embeddings,
position_embeddings: Some(position_embeddings),
token_type_embeddings,
layer_norm,
padding_idx: config.pad_token_id,
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
fn forward(&self, input_ids: &Tensor, token_type_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_bsize, _) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)?;
let mut embeddings = (&input_embeddings + token_type_embeddings)?;
if let Some(position_embeddings) = &self.position_embeddings {
let mask = input_ids
.ne(self.padding_idx)?
.to_dtype(input_embeddings.dtype())?;
let cumsum = mask.cumsum(1)?;
let position_ids = (cumsum * mask)?
.broadcast_add(
&Tensor::try_from(self.padding_idx)?
.to_dtype(input_embeddings.dtype())?
.to_device(input_embeddings.device())?,
)?
.to_dtype(candle::DType::U32)?;
embeddings = embeddings.broadcast_add(&position_embeddings.forward(&position_ids)?)?;
}
let embeddings = self.layer_norm.forward(&embeddings)?;
Ok(embeddings)
}
}
struct XLMRobertaSelfAttention {
num_attention_heads: usize,
attention_head_size: usize,
all_head_size: usize,
query: Linear,
key: Linear,
value: Linear,
}
impl XLMRobertaSelfAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention_head_size = cfg.hidden_size / cfg.num_attention_heads;
let all_head_size = cfg.num_attention_heads * attention_head_size;
Ok(Self {
num_attention_heads: cfg.num_attention_heads,
attention_head_size,
all_head_size,
query: linear(cfg.hidden_size, all_head_size, vb.pp("query"))?,
key: linear(cfg.hidden_size, all_head_size, vb.pp("key"))?,
value: linear(cfg.hidden_size, all_head_size, vb.pp("value"))?,
})
}
fn transpose_for_scores(&self, x: &Tensor) -> Result<Tensor> {
let mut new_x_shape = x.dims().to_vec();
new_x_shape[2] = self.num_attention_heads;
new_x_shape.push(self.attention_head_size);
let x = x.reshape(new_x_shape)?;
x.permute((0, 2, 1, 3))?.contiguous()
}
fn forward(
&self,
hidden_states: &Tensor,
encoder_hidden_states: Option<&Tensor>,
attention_mask: &Tensor,
past_key_value: Option<(&Tensor, &Tensor)>,
encoder_attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let mixed_query_layer = self.query.forward(hidden_states)?;
let is_cross_attention = encoder_hidden_states.is_some();
let (key_layer, value_layer, attention_mask) = if is_cross_attention {
if let Some((past_key, past_value)) = past_key_value {
let key_layer = past_key.clone();
let value_layer = past_value.clone();
let attention_mask = encoder_attention_mask.unwrap().clone();
(key_layer, value_layer, Some(attention_mask))
} else {
let key_layer =
self.transpose_for_scores(&self.key.forward(encoder_hidden_states.unwrap())?)?;
let value_layer = self
.transpose_for_scores(&self.value.forward(encoder_hidden_states.unwrap())?)?;
let attention_mask = encoder_attention_mask.unwrap();
(key_layer, value_layer, Some(attention_mask.clone()))
}
} else if let Some((past_key, past_value)) = past_key_value {
let mut key_layer = self.transpose_for_scores(&self.key.forward(hidden_states)?)?;
let mut value_layer = self.transpose_for_scores(&self.value.forward(hidden_states)?)?;
key_layer = Tensor::cat(&[past_key.clone(), key_layer], 2)?;
value_layer = Tensor::cat(&[past_value.clone(), value_layer], 2)?;
(key_layer, value_layer, Some(attention_mask.clone()))
} else {
let key_layer = self.transpose_for_scores(&self.key.forward(hidden_states)?)?;
let value_layer = self.transpose_for_scores(&self.value.forward(hidden_states)?)?;
(key_layer, value_layer, Some(attention_mask.clone()))
};
let query_layer = self.transpose_for_scores(&mixed_query_layer)?;
let mut attention_scores = query_layer.matmul(&key_layer.transpose(2, 3)?)?;
let scale = 1f64 / f64::sqrt(self.attention_head_size as f64);
attention_scores = (attention_scores * scale)?;
attention_scores = match attention_mask {
None => attention_scores,
Some(mask) => {
attention_scores.broadcast_add(&mask.to_dtype(attention_scores.dtype())?)?
}
};
let attention_probs = softmax_last_dim(&attention_scores)?;
let context_layer = attention_probs
.matmul(&value_layer)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let mut new_context_layer_shape =
context_layer.dims()[..context_layer.dims().len() - 2].to_vec();
new_context_layer_shape.push(self.all_head_size);
let context_layer = context_layer.reshape(new_context_layer_shape)?;
Ok(context_layer)
}
}
struct XLMRobertaSelfOutput {
dense: Linear,
layernorm: LayerNorm,
}
impl XLMRobertaSelfOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layernorm =
candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layernorm })
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.layernorm.forward(&(hidden_states + input_tensor)?)?;
Ok(hidden_states)
}
}
struct XLMRobertaAttention {
output: XLMRobertaSelfOutput,
self_attention: XLMRobertaSelfAttention,
}
impl XLMRobertaAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let output = XLMRobertaSelfOutput::new(cfg, vb.pp("output"))?;
let self_attention = XLMRobertaSelfAttention::new(cfg, vb.pp("self"))?;
Ok(Self {
output,
self_attention,
})
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
past_key_value: Option<(&Tensor, &Tensor)>,
) -> Result<(Tensor, Tensor)> {
let self_outputs = self.self_attention.forward(
hidden_states,
encoder_hidden_states,
attention_mask,
past_key_value,
encoder_attention_mask,
)?;
let attention_output = self.output.forward(&self_outputs, hidden_states)?;
Ok((attention_output, self_outputs))
}
}
struct XLMRobertaOutput {
dense: Linear,
layernorm: LayerNorm,
}
impl XLMRobertaOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?;
let layernorm =
candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layernorm })
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.layernorm.forward(&(hidden_states + input_tensor)?)?;
Ok(hidden_states)
}
}
struct XLMRobertaIntermediate {
dense: Linear,
intermediate_act_fn: Activation,
}
impl XLMRobertaIntermediate {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?;
let intermediate_act_fn = cfg.hidden_act;
Ok(Self {
dense,
intermediate_act_fn,
})
}
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.intermediate_act_fn.forward(&hidden_states)?;
Ok(hidden_states)
}
}
struct XLMRobertaLayer {
attention: XLMRobertaAttention,
intermediate: XLMRobertaIntermediate,
output: XLMRobertaOutput,
}
impl XLMRobertaLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = XLMRobertaAttention::new(cfg, vb.pp("attention"))?;
let intermediate = XLMRobertaIntermediate::new(cfg, vb.pp("intermediate"))?;
let output = XLMRobertaOutput::new(cfg, vb.pp("output"))?;
Ok(Self {
attention,
intermediate,
output,
})
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
past_key_value: Option<(&Tensor, &Tensor)>,
) -> Result<(Tensor, Tensor)> {
let self_attention_outputs = self.attention.forward(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
)?;
let attention_output = self_attention_outputs.0;
let outputs = self_attention_outputs.1;
let intermediate_output = self.intermediate.forward(&attention_output)?;
let layer_output = self
.output
.forward(&intermediate_output, &attention_output)?;
Ok((layer_output, outputs))
}
}
struct XLMRobertaEncoder {
layers: Vec<XLMRobertaLayer>,
}
impl XLMRobertaEncoder {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let layers = (0..cfg.num_hidden_layers)
.map(|i| XLMRobertaLayer::new(cfg, vb.pp(format!("layer.{i}"))))
.collect::<Result<Vec<_>>>()?;
Ok(Self { layers })
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
past_key_value: Option<(&Tensor, &Tensor)>,
) -> Result<Tensor> {
let mut hidden_states = hidden_states.clone();
for layer_module in self.layers.iter() {
let layer_outputs = layer_module.forward(
&hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
)?;
hidden_states = layer_outputs.0;
}
Ok(hidden_states)
}
}
pub struct XLMRobertaModel {
encoder: XLMRobertaEncoder,
embeddings: XLMRobertaEmbeddings,
}
impl XLMRobertaModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = XLMRobertaEncoder::new(cfg, vb.pp("encoder"))?;
let embeddings = XLMRobertaEmbeddings::load(vb.pp("embeddings"), cfg)?;
Ok(Self {
encoder,
embeddings,
})
}
pub fn forward(
&self,
input_ids: &Tensor,
attention_mask: &Tensor,
token_type_ids: &Tensor,
past_key_value: Option<(&Tensor, &Tensor)>,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let hidden_states = self.embeddings.forward(input_ids, token_type_ids)?;
let attention_mask = prepare_4d_attention_mask(attention_mask, DType::F32, None)?
.to_device(hidden_states.device())?;
let hidden_states = self.encoder.forward(
&hidden_states,
&attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
)?;
Ok(hidden_states)
}
}
struct XLMRobertaLMHead {
dense: Linear,
layer_norm: LayerNorm,
}
impl XLMRobertaLMHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm =
candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layer_norm"))?;
Ok(Self { dense, layer_norm })
}
fn forward(&self, hidden_states: &Tensor, shared_embeddings: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = candle_nn::Activation::Gelu.forward(&hidden_states)?;
let hidden_states = self.layer_norm.forward(&hidden_states)?;
let hidden_states = hidden_states.broadcast_matmul(shared_embeddings)?;
Ok(hidden_states)
}
}
pub struct XLMRobertaForMaskedLM {
roberta: XLMRobertaModel,
lm_head: XLMRobertaLMHead,
}
impl XLMRobertaForMaskedLM {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let roberta = XLMRobertaModel::new(cfg, vb.pp("roberta"))?;
let lm_head = XLMRobertaLMHead::new(cfg, vb.pp("lm_head"))?;
Ok(Self { roberta, lm_head })
}
pub fn forward(
&self,
input_ids: &Tensor,
attention_mask: &Tensor,
token_type_ids: &Tensor,
past_key_value: Option<(&Tensor, &Tensor)>,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let hidden_states = self.roberta.forward(
input_ids,
attention_mask,
token_type_ids,
past_key_value,
encoder_hidden_states,
encoder_attention_mask,
)?;
let lm_logits = self.lm_head.forward(
&hidden_states,
&self
.roberta
.embeddings
.word_embeddings
.embeddings()
.t()?
.unsqueeze(0)?,
)?;
Ok(lm_logits)
}
}
struct XLMRobertaClassificationHead {
dense: Linear,
out_proj: Linear,
}
impl XLMRobertaClassificationHead {
fn new(num_labels: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let out_proj = linear(cfg.hidden_size, num_labels, vb.pp("out_proj"))?;
Ok(Self { dense, out_proj })
}
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let cls_states = hidden_states.get_on_dim(1, 0)?.contiguous()?;
let hidden_states = self.dense.forward(&cls_states)?;
// The activation used in the classification head is tanh, as per the original
// implementation.
// https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py#L1454
let hidden_states = self.out_proj.forward(&hidden_states.tanh()?)?;
Ok(hidden_states)
}
}
pub struct XLMRobertaForSequenceClassification {
roberta: XLMRobertaModel,
classifier: XLMRobertaClassificationHead,
}
impl XLMRobertaForSequenceClassification {
pub fn new(num_labels: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let roberta = XLMRobertaModel::new(cfg, vb.pp("roberta"))?;
let classifier = XLMRobertaClassificationHead::new(num_labels, cfg, vb.pp("classifier"))?;
Ok(Self {
roberta,
classifier,
})
}
pub fn forward(
&self,
input_ids: &Tensor,
attention_mask: &Tensor,
token_type_ids: &Tensor,
) -> Result<Tensor> {
let hidden_states =
self.roberta
.forward(input_ids, attention_mask, token_type_ids, None, None, None)?;
self.classifier.forward(&hidden_states)
}
}
fn prepare_4d_attention_mask(
mask: &Tensor,
dtype: DType,
tgt_len: Option<usize>,
) -> Result<Tensor> {
let bsz = mask.dim(0)?;
let src_len = mask.dim(1)?;
let tgt_len = tgt_len.unwrap_or(src_len);
let expanded_mask = mask
.unsqueeze(1)?
.unsqueeze(2)?
.expand((bsz, 1, tgt_len, src_len))?
.to_dtype(dtype)?;
let inverted_mask = (1.0 - expanded_mask)?;
(inverted_mask * get_dtype_min_val(dtype))?.to_dtype(dtype)
}
fn get_dtype_min_val(dtype: DType) -> f64 {
match dtype {
DType::F32 => f32::MIN as f64,
DType::F64 => f64::MIN,
_ => panic!("Unsupported data type"),
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/eva2.rs | candle-transformers/src/models/eva2.rs | //! EVA-2 inference implementation.
//!
//! EVA-02 is a computer vision model that can be used as an ImageNet classifier.
//! The model returns the probability for an image to belong to each of the 1000
//! ImageNet categories.
//!
//! - [Paper](https://arxiv.org/abs/2303.11331). EVA-02: A Visual Representation for Neon Genesis
//! - [Code](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/eva2.py)
//!
//! # Example
//!
//! ```bash
//! cargo run \
//! --example eva2 \
//! --release -- \
//! --image candle-examples/examples/yolo-v8/assets/bike.jpg
//!
//! > mountain bike, all-terrain bike, off-roader: 37.09%
//! > maillot : 8.30%
//! > alp : 2.13%
//! > bicycle-built-for-two, tandem bicycle, tandem: 0.84%
//! > crash helmet : 0.73%
//! ```
//!
//! <div align=center>
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640>
//! </div>
//!
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder};
const IMG_SIZE: usize = 448;
const PATCH_SIZE: usize = 14;
const NUM_CLASSES: usize = 1000;
fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
if bias {
candle_nn::linear(in_dim, out_dim, vb)
} else {
candle_nn::linear_no_bias(in_dim, out_dim, vb)
}
}
#[derive(Debug)]
struct Attention {
q: Linear,
k: Linear,
v: Linear,
proj: Linear,
rot_pos_embed: Tensor,
num_heads: usize,
scale: f64,
}
impl Attention {
fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
qkv_bias: bool,
proj_bias: bool,
rot_pos_embed: &Tensor,
) -> Result<Self> {
let q = linear(vb.pp("q_proj"), dim, dim, qkv_bias)?;
let k = linear(vb.pp("k_proj"), dim, dim, false)?; // no bias for Key
let v = linear(vb.pp("v_proj"), dim, dim, qkv_bias)?;
let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?;
let rot_pos_embed = rot_pos_embed.clone();
let scale = 1. / ((dim / num_heads) as f64).sqrt();
Ok(Self {
q,
k,
v,
proj,
rot_pos_embed,
num_heads,
scale,
})
}
}
impl Attention {
// See: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/pos_embed_sincos.py#L210
fn apply_rot_embed_cat(x: &Tensor, emb: &Tensor) -> Result<Tensor> {
let cos_emb = emb.i((0.., 64..128))?; //.transpose(0, 1)?;
let sin_emb = emb.i((0.., 0..64))?; //.transpose(0, 1)?;
let index_even: [u32; 32] = (0u32..=63)
.step_by(2)
.collect::<Vec<_>>()
.try_into()
.expect("wrong size iterator");
let index_odd: [u32; 32] = (1u32..=63)
.step_by(2)
.collect::<Vec<_>>()
.try_into()
.expect("wrong size iterator");
let t_index_even = Tensor::new(&index_even, x.device())?;
let t_index_odd = Tensor::new(&index_odd, x.device())?;
let x_c = x.contiguous()?;
let rot_x_even = x_c.index_select(&t_index_even, D::Minus1)?;
let rot_x_odd_minus = (-1.0 * x_c.index_select(&t_index_odd, D::Minus1)?)?;
let rot_x =
Tensor::stack(&[&rot_x_odd_minus, &rot_x_even], D::Minus1)?.reshape(x.shape())?;
x.broadcast_mul(&cos_emb)? + rot_x.broadcast_mul(&sin_emb)?
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = Tensor::cat(
&[
&self.q.forward(xs)?,
&self.k.forward(xs)?,
&self.v.forward(xs)?,
],
2,
)?
.reshape((b, n, 3, self.num_heads, c / self.num_heads))?
.transpose(1, 2)? // 02134
.transpose(0, 1)? // 20134
.transpose(2, 3)?; // 20314
let q = qkv.i(0)?;
let k = qkv.i(1)?.contiguous()?;
let v = qkv.i(2)?.contiguous()?;
let npt = 1; // num_prefix_tokens = 1 for CLS token
let q = Tensor::cat(
&[
&q.i((0.., 0.., ..npt, 0..))?,
&Self::apply_rot_embed_cat(&q.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?,
],
2,
)?;
let k = Tensor::cat(
&[
&k.i((0.., 0.., ..npt, 0..))?,
&Self::apply_rot_embed_cat(&k.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?,
],
2,
)?;
let q = (q * self.scale)?;
let attn = &q.matmul(&k.t()?)?;
let attn = candle_nn::ops::softmax(attn, D::Minus1)?;
let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?;
self.proj.forward(&attn)
}
}
#[derive(Debug)]
struct Mlp {
fc1_g: Linear,
fc1_x: Linear,
norm: LayerNorm,
fc2: Linear,
}
impl Mlp {
fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> {
let out_features = in_features;
let fc1_g = linear(vb.pp("fc1_g"), in_features, hidden_features, bias)?;
let fc1_x = linear(vb.pp("fc1_x"), in_features, hidden_features, bias)?;
let norm = layer_norm(hidden_features, 1e-6, vb.pp("norm"))?;
let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?;
Ok(Self {
fc1_g,
fc1_x,
norm,
fc2,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs_g = self.fc1_g.forward(xs)?.silu()?;
let xs = self.fc1_x.forward(xs)?;
let xs = self.norm.forward(&(xs_g.mul(&xs)?))?;
self.fc2.forward(&xs)
}
}
#[derive(Debug)]
struct Block {
norm1: LayerNorm,
attn: Attention,
norm2: LayerNorm,
mlp: Mlp,
}
impl Block {
fn new(vb: VarBuilder, dim: usize, num_heads: usize, rot_pos_embed: &Tensor) -> Result<Self> {
let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?;
let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true, rot_pos_embed)?;
let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?;
let hidden_dim = dim * 4 * 2 / 3; // 768 * 4 * 2 / 3 = 3072 * 2 / 3 = 2048
let mlp = Mlp::new(vb.pp("mlp"), dim, hidden_dim, true)?;
Ok(Self {
norm1,
attn,
norm2,
mlp,
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = &self.attn.forward(&self.norm1.forward(xs)?)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = &self.mlp.forward(&self.norm2.forward(&xs)?)?;
xs + residual
}
}
#[derive(Debug)]
struct PatchEmbed {
proj: candle_nn::Conv2d,
patch_size: (usize, usize),
num_patches: usize,
}
impl PatchEmbed {
fn new(
vb: VarBuilder,
img_size: usize,
patch_size: usize,
in_chans: usize,
embed_dim: usize,
) -> Result<Self> {
let config = candle_nn::Conv2dConfig {
stride: patch_size,
..Default::default()
};
let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?;
let num_patches = (img_size / patch_size) * (img_size / patch_size);
Ok(Self {
proj,
patch_size: (patch_size, patch_size),
num_patches,
})
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _c, h, w) = xs.dims4()?;
let (patch_h, patch_w) = self.patch_size;
if (h % patch_h) != 0 {
candle::bail!("image height {h} is not a multiple of patch height {patch_h}")
}
if (w % patch_w) != 0 {
candle::bail!("image width {w} is not a multiple of patch width {patch_w}")
}
let xs = self.proj.forward(xs)?;
let (b, c, h, w) = xs.dims4()?;
// flatten embeddings.
xs.reshape((b, c, h * w))?.transpose(1, 2)
}
}
#[derive(Debug)]
pub struct EVA2VisionTransformer {
patch_embed: PatchEmbed,
cls_token: Tensor,
pos_embed: Tensor,
blocks: Vec<Block>,
norm: LayerNorm,
head: Linear,
}
impl EVA2VisionTransformer {
pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> {
let patch_embed =
PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?;
let cls_token = vb.get((1, 1, embed_dim), "cls_token")?;
let pos_embed = vb.get((1, patch_embed.num_patches + 1, embed_dim), "pos_embed")?;
let rot_pos_embed = vb.get((patch_embed.num_patches, 128), "rot_pos_embed")?;
let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?;
let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?;
let vb_b = vb.pp("blocks");
let blocks = (0..depth)
.map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads, &rot_pos_embed))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
patch_embed,
cls_token,
pos_embed,
blocks,
norm,
head,
})
}
fn interpolate_pos_encoding(
&self,
xs: &Tensor,
w: usize,
h: usize,
num_prefix_tokens: usize,
) -> Result<Tensor> {
let npatch = xs.dim(1)? - 1;
let n = self.pos_embed.dim(1)? - 1;
let sqrt_n = (n as f64).sqrt();
if npatch == n && w == h {
return Ok(self.pos_embed.clone());
}
// Interpolate only local tokens, i.e. those after the CLS token
let prefix_tokens_pos_embed = self.pos_embed.i((0.., ..num_prefix_tokens, 0..))?.clone();
let patch_pos_embed = &self.pos_embed.i((0.., num_prefix_tokens.., 0..))?;
let dim = xs.dim(D::Minus1)?;
let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1);
let patch_pos_embed = patch_pos_embed
.reshape((1, sqrt_n as usize, sqrt_n as usize, dim))?
.transpose(2, 3)?
.transpose(1, 2)?;
// This uses bicubic interpolation in the original implementation.
let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?;
let el_count = patch_pos_embed.shape().elem_count();
let patch_pos_embed =
patch_pos_embed
.transpose(1, 2)?
.transpose(2, 3)?
.reshape((1, el_count / dim, dim))?;
Tensor::cat(&[&prefix_tokens_pos_embed, &patch_pos_embed], 1)
}
fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _nc, w, h) = xs.dims4()?;
if (w != IMG_SIZE) || (h != IMG_SIZE) {
panic!("Error: The input tensor should have the shape: Bx3x518x518.");
}
let xs = self.patch_embed.forward(xs)?;
let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?;
let xs = (&xs + &self.interpolate_pos_encoding(&xs, w, h, 1)?)?;
Ok(xs)
}
fn get_intermediate_layers_not_chunked(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
) -> Result<Vec<Tensor>> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
let mut output = Vec::new();
for (i, blk) in self.blocks.iter().enumerate() {
xs = blk.forward(&xs)?;
if blocks_to_take.contains(&i) {
output.push(xs.clone());
}
}
if output.len() != blocks_to_take.len() {
candle::bail!(
"only {} / {} blocks found",
output.len(),
blocks_to_take.len()
);
}
Ok(output)
}
pub fn get_intermediate_layers(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
reshape: bool,
return_class_token: bool,
norm: bool,
) -> Result<Tensor> {
let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?;
let outputs = if norm {
outputs
.iter()
.map(|out| self.norm.forward(out))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let class_tokens = outputs
.iter()
.map(|out| out.i((.., 0)))
.collect::<Result<Vec<_>>>()?;
let outputs = outputs
.iter()
.map(|out| out.i((.., 1..)))
.collect::<Result<Vec<_>>>()?;
let outputs = if reshape {
let (b, _c, w, h) = xs.dims4()?;
let patch_size = self.patch_embed.patch_size.0;
let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size));
outputs
.iter()
.map(|out| {
out.reshape((b, w / patch_size, h / patch_size, num_channels))?
.transpose(2, 3)?
.transpose(1, 2)
})
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let outputs = if return_class_token {
outputs
.iter()
.zip(class_tokens.iter())
.map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
Tensor::stack(&outputs[..], 0)
}
}
impl Module for EVA2VisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
for blk in self.blocks.iter() {
xs = blk.forward(&xs)?
}
let xs_moy_local_tokens = xs.i((.., 1..))?.mean(1)?;
let xs_norm = self.norm.forward(&xs_moy_local_tokens)?;
self.head.forward(&xs_norm)
}
}
pub fn vit_base(vb: VarBuilder) -> Result<EVA2VisionTransformer> {
EVA2VisionTransformer::new(vb, 12, 768, 12)
}
pub fn vit_large(vb: VarBuilder) -> Result<EVA2VisionTransformer> {
EVA2VisionTransformer::new(vb, 24, 1024, 16)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/efficientvit.rs | candle-transformers/src/models/efficientvit.rs | //! EfficientViT (MSRA) inference implementation based on timm.
//!
//! This crate provides an implementation of the EfficientViT model from Microsoft Research Asia
//! for efficient image classification. The model uses cascaded group attention modules
//! to achieve strong performance while maintaining low memory usage.
//!
//! The model was originally described in the paper:
//! ["EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention"](https://arxiv.org/abs/2305.07027)
//!
//! This implementation is based on the reference implementation from
//! [pytorch-image-models](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/efficientvit_msra.py).
//!
//! # Example Usage
//!
//! This candle implementation uses a pre-trained EfficientViT (from Microsoft Research Asia) network for inference.
//! The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes.
//!
//!
//! ```bash
//! cargo run
//! --example efficientvit \
//! --release -- \
//! --image candle-examples/examples/yolo-v8/assets/bike.jpg --which m1
//!
//! > loaded image Tensor[dims 3, 224, 224; f32]
//! > model built
//! > mountain bike, all-terrain bike, off-roader: 69.80%
//! > unicycle, monocycle : 13.03%
//! > bicycle-built-for-two, tandem bicycle, tandem: 9.28%
//! > crash helmet : 2.25%
//! > alp : 0.46%
//! ```
//!
//! <div align=center>
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640>
//! </div>
//!
use candle::{Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, linear, ops::sigmoid, ops::softmax, Conv2dConfig, Func,
VarBuilder,
};
#[derive(Clone)]
pub struct Config {
channels: [usize; 3],
blocks: [usize; 3],
heads: [usize; 3],
kernels: [usize; 4],
}
impl Config {
pub fn m0() -> Self {
Self {
channels: [64, 128, 192],
blocks: [1, 2, 3],
heads: [4, 4, 4],
kernels: [5, 5, 5, 5],
}
}
pub fn m1() -> Self {
Self {
channels: [128, 144, 192],
blocks: [1, 2, 3],
heads: [2, 3, 3],
kernels: [7, 5, 3, 3],
}
}
pub fn m2() -> Self {
Self {
channels: [128, 192, 224],
blocks: [1, 2, 3],
heads: [4, 3, 2],
kernels: [7, 5, 3, 3],
}
}
pub fn m3() -> Self {
Self {
channels: [128, 240, 320],
blocks: [1, 2, 3],
heads: [4, 3, 4],
kernels: [5, 5, 5, 5],
}
}
pub fn m4() -> Self {
Self {
channels: [128, 256, 384],
blocks: [1, 2, 3],
heads: [4, 4, 4],
kernels: [7, 5, 3, 3],
}
}
pub fn m5() -> Self {
Self {
channels: [192, 288, 384],
blocks: [1, 3, 4],
heads: [3, 3, 4],
kernels: [7, 5, 3, 3],
}
}
}
fn efficientvit_stemblock(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 2,
padding: 1,
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, 3, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?.apply_t(&bn, false)?;
Ok(xs)
}))
}
fn efficientvit_stem(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv1 = efficientvit_stemblock(3, dim / 8, vb.pp("conv1"))?;
let conv2 = efficientvit_stemblock(dim / 8, dim / 4, vb.pp("conv2"))?;
let conv3 = efficientvit_stemblock(dim / 4, dim / 2, vb.pp("conv3"))?;
let conv4 = efficientvit_stemblock(dim / 2, dim, vb.pp("conv4"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.relu()?
.apply(&conv2)?
.relu()?
.apply(&conv3)?
.relu()?
.apply(&conv4)?;
Ok(xs)
}))
}
fn depthwise_conv(
channels: usize,
kernel: usize,
stride: usize,
padding: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding,
groups: channels,
..Default::default()
};
let bn = batch_norm(channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(channels, channels, kernel, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
fn pointwise_conv(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
fn conv_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let pw1 = pointwise_conv(in_channels, out_channels, vb.pp("pw1"))?;
let pw2 = pointwise_conv(out_channels, in_channels, vb.pp("pw2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&pw1)?.relu()?.apply(&pw2)?;
Ok(xs)
}))
}
// Fixed per-stage resolutions
const RESOLUTIONS: [usize; 3] = [14, 7, 4];
// Attention block
fn efficientvit_attn(
cfg: &Config,
stage: usize,
in_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let cga = cascaded_group_attn(cfg, stage, in_channels, vb)?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let (b, c, h, w) = xs.dims4()?;
let win_res = 7; // Fixed window resolution
let pad_b = (win_res - h % win_res) % win_res;
let pad_r = (win_res - w % win_res) % win_res;
let ph = h + pad_b;
let pw = w + pad_r;
let nh = ph / win_res;
let nw = pw / win_res;
if RESOLUTIONS[stage] > win_res {
xs = xs.permute((0, 2, 3, 1))?;
xs = xs.pad_with_zeros(D::Minus1, 0, pad_r)?;
xs = xs.pad_with_zeros(D::Minus2, 0, pad_b)?;
xs = xs
.reshape((b, nh, win_res, nw, win_res, c))?
.transpose(2, 3)?;
xs = xs
.reshape((b * nh * nw, win_res, win_res, c))?
.permute((0, 3, 1, 2))?;
}
xs = xs.apply(&cga)?;
if RESOLUTIONS[stage] > win_res {
xs = xs
.permute((0, 2, 3, 1))?
.reshape((b, nh, nw, win_res, win_res, c))?;
xs = xs.transpose(2, 3)?.reshape((b, ph, pw, c))?;
xs = xs.permute((0, 3, 1, 2))?;
}
Ok(xs)
}))
}
// Cascaded group attention
fn cascaded_group_attn(
cfg: &Config,
stage: usize,
in_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let heads = cfg.heads[stage];
let key_dim = 16;
let val_dim = in_channels / heads;
let scale = (key_dim as f64).powf(-0.5);
let mut dws = Vec::with_capacity(heads);
let mut qkvs = Vec::with_capacity(heads);
for i in 0..heads {
dws.push(depthwise_conv(
key_dim,
cfg.kernels[i],
1,
cfg.kernels[i] / 2,
vb.pp(format!("dws.{i}")),
)?);
qkvs.push(pointwise_conv(
in_channels / heads,
in_channels / heads + 2 * key_dim,
vb.pp(format!("qkvs.{i}")),
)?);
}
let proj = pointwise_conv(in_channels, in_channels, vb.pp("proj.1"))?;
Ok(Func::new(move |xs| {
let (b, _, h, w) = xs.dims4()?;
let feats_in = xs.chunk(heads, 1)?;
let mut feats_out = Vec::with_capacity(heads);
let mut feat = feats_in[0].clone();
for i in 0..heads {
if i > 0 {
feat = (&feat + &feats_in[i])?;
}
feat = feat.apply(&qkvs[i])?;
let res = feat.reshape((b, (), h, w))?;
let q = res.narrow(1, 0, key_dim)?;
let k = res.narrow(1, key_dim, key_dim)?;
let v = res.narrow(1, 2 * key_dim, val_dim)?;
let q = q.apply(&dws[i])?;
let q = q.flatten_from(2)?;
let k = k.flatten_from(2)?;
let v = v.flatten_from(2)?;
let q = (q * scale)?;
let att = q.transpose(D::Minus2, D::Minus1)?.matmul(&k)?;
let att = softmax(&att, D::Minus1)?;
feat = v.matmul(&att.transpose(D::Minus2, D::Minus1)?)?;
feat = feat.reshape((b, val_dim, h, w))?;
feats_out.push(feat.clone());
}
let xs = Tensor::cat(&feats_out, 1)?;
let xs = xs.relu()?.apply(&proj)?;
Ok(xs)
}))
}
// Used by the downsampling layer
fn squeeze_and_excitation(
in_channels: usize,
squeeze_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let fc1 = conv2d(in_channels, squeeze_channels, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(squeeze_channels, in_channels, 1, conv2d_cfg, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let residual = xs;
let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?;
let xs = sigmoid(&xs.apply(&fc1)?.relu()?.apply(&fc2)?)?;
residual.broadcast_mul(&xs)
}))
}
// Used by the downsampling layer
fn patchmerge(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let dim = in_channels;
let hid_dim = in_channels * 4;
let conv1 = pointwise_conv(dim, hid_dim, vb.pp("conv1"))?;
let conv2 = depthwise_conv(hid_dim, 3, 2, 1, vb.pp("conv2"))?;
let conv3 = pointwise_conv(hid_dim, out_channels, vb.pp("conv3"))?;
let se = squeeze_and_excitation(hid_dim, hid_dim / 4, vb.pp("se"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.relu()?
.apply(&conv2)?
.relu()?
.apply(&se)?
.apply(&conv3)?;
Ok(xs)
}))
}
// Used by the downsampling layer
fn res(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let dw = depthwise_conv(dim, 3, 1, 1, vb.pp("0.m"))?;
let mlp = conv_mlp(dim, dim * 2, vb.pp("1.m"))?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
xs = (&xs + &xs.apply(&dw)?)?;
xs = (&xs + &xs.apply(&mlp)?)?;
Ok(xs)
}))
}
// Downsampling
fn efficientvit_downsample(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let res1 = res(in_channels, vb.pp("res1"))?;
let res2 = res(out_channels, vb.pp("res2"))?;
let patchmerge = patchmerge(in_channels, out_channels, vb.pp("patchmerge"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&res1)?.apply(&patchmerge)?.apply(&res2)?;
Ok(xs)
}))
}
fn efficientvit_block(
cfg: &Config,
stage: usize,
dim: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let dw0 = depthwise_conv(dim, 3, 1, 1, vb.pp("dw0.m"))?;
let dw1 = depthwise_conv(dim, 3, 1, 1, vb.pp("dw1.m"))?;
let ffn0 = conv_mlp(dim, dim * 2, vb.pp("ffn0.m"))?;
let ffn1 = conv_mlp(dim, dim * 2, vb.pp("ffn1.m"))?;
let attn = efficientvit_attn(cfg, stage, dim, vb.pp("mixer.m.attn"))?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
xs = (&xs + &xs.apply(&dw0)?)?;
xs = (&xs + &xs.apply(&ffn0)?)?;
xs = (&xs + &xs.apply(&attn)?)?;
xs = (&xs + &xs.apply(&dw1)?)?;
xs = (&xs + &xs.apply(&ffn1)?)?;
Ok(xs)
}))
}
// Each stage is made of blocks. There is a downsampling layer between stages.
fn efficientvit_stage(cfg: &Config, stage: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.blocks[stage];
let mut blocks = Vec::with_capacity(nblocks + 1);
let in_channels = if stage > 0 {
cfg.channels[stage - 1]
} else {
cfg.channels[0]
};
let out_channels = cfg.channels[stage];
if stage > 0 {
blocks.push(efficientvit_downsample(
in_channels,
out_channels,
vb.pp("downsample"),
)?);
}
for i in 0..nblocks {
blocks.push(efficientvit_block(
cfg,
stage,
out_channels,
vb.pp(format!("blocks.{i}")),
)?);
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
// Classification head.
fn efficientvit_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = batch_norm(outputs, 1e-6, vb.pp("bn"))?;
let linear = linear(outputs, nclasses, vb.pp("linear"))?;
Ok(Func::new(move |xs| {
xs.apply_t(&norm, false)?.apply(&linear)
}))
}
// Build a efficientvit model for a given configuration.
fn efficientvit_model(
config: &Config,
nclasses: Option<usize>,
vb: VarBuilder,
) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = config.channels[2];
let head = efficientvit_head(outputs, nclasses, vb.pp("head"))?;
Some(head)
}
};
let stem_dim = config.channels[0];
let stem = efficientvit_stem(stem_dim, vb.pp("patch_embed"))?;
let vb = vb.pp("stages");
let stage1 = efficientvit_stage(config, 0, vb.pp(0))?;
let stage2 = efficientvit_stage(config, 1, vb.pp(1))?;
let stage3 = efficientvit_stage(config, 2, vb.pp(2))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.mean(D::Minus2)?
.mean(D::Minus1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn efficientvit(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
efficientvit_model(cfg, Some(nclasses), vb)
}
pub fn efficientvit_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
efficientvit_model(cfg, None, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/csm.rs | candle-transformers/src/models/csm.rs | //! Implementation of the Conversational Speech Model (CSM) from Sesame
//!
//! See: [CSM](Conversational Speech Model)
//!
/// CSM (Conversational Speech Model) is a speech generation model from Sesame that generates RVQ
/// audio codes from text and audio inputs. The model architecture employs a Llama backbone and a
/// smaller audio decoder that produces Mimi audio codes.
///
use crate::generation::LogitsProcessor;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{embedding, linear_b, Embedding, Linear, RmsNorm, VarBuilder};
use std::sync::Arc;
#[derive(serde::Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum Flavor {
#[serde(rename = "llama-1B")]
Llama1B,
#[serde(rename = "llama-100M")]
Llama100M,
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub audio_num_codebooks: usize,
pub audio_vocab_size: usize,
pub backbone_flavor: Flavor,
pub decoder_flavor: Flavor,
pub text_vocab_size: usize,
}
#[allow(unused)]
#[derive(Debug, Clone)]
pub struct LlamaConfig {
vocab_size: usize,
num_layers: usize,
num_heads: usize,
num_kv_heads: usize,
embed_dim: usize,
max_seq_len: usize,
intermediate_dim: usize,
norm_eps: f64,
rope_base: f32,
scale_factor: usize,
}
impl LlamaConfig {
pub fn from_flavor(flavor: Flavor) -> Self {
match flavor {
Flavor::Llama1B => Self {
vocab_size: 128256,
num_layers: 16,
num_heads: 32,
num_kv_heads: 8,
embed_dim: 2048,
max_seq_len: 2048,
intermediate_dim: 8192,
norm_eps: 1e-5,
rope_base: 500_000.,
scale_factor: 32,
},
Flavor::Llama100M => Self {
vocab_size: 128256,
num_layers: 4,
num_heads: 8,
num_kv_heads: 2,
embed_dim: 1024,
max_seq_len: 2048,
intermediate_dim: 8192,
norm_eps: 1e-5,
rope_base: 500_000.,
scale_factor: 32,
},
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn calculate_default_inv_freq(cfg: &LlamaConfig) -> Vec<f32> {
let head_dim = cfg.embed_dim / cfg.num_heads;
(0..head_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_base.powf(i as f32 / head_dim as f32))
.collect()
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &LlamaConfig, dev: &Device) -> Result<Self> {
let low_freq_factor = 1.0;
let high_freq_factor = 4.0;
let original_max_position_embeddings = 8192;
let scale_factor = cfg.scale_factor as f32;
let theta = {
let low_freq_wavelen = original_max_position_embeddings as f32 / low_freq_factor;
let high_freq_wavelen = original_max_position_embeddings as f32 / high_freq_factor;
calculate_default_inv_freq(cfg)
.into_iter()
.map(|freq| {
let wavelen = 2. * std::f32::consts::PI / freq;
if wavelen < high_freq_wavelen {
freq
} else if wavelen > low_freq_wavelen {
freq / scale_factor
} else {
let smooth = (original_max_position_embeddings as f32 / wavelen
- low_freq_factor)
/ (high_freq_factor - low_freq_factor);
(1. - smooth) * freq / scale_factor + smooth * freq
}
})
.collect::<Vec<_>>()
};
let theta = Tensor::new(theta, dev)?;
let idx_theta = Tensor::arange(0, cfg.max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((cfg.max_seq_len, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
// This is different from the paper, see:
// https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112
let cos = idx_theta.cos()?.to_dtype(dtype)?;
let sin = idx_theta.sin()?.to_dtype(dtype)?;
Ok(Self { cos, sin })
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope_i(q, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope_i(k, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
fn rms_norm(hidden_size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> {
let weight = vb.get((hidden_size,), "scale")?;
Ok(RmsNorm::new(weight, eps))
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
num_heads: usize,
head_dim: usize,
num_kv_heads: usize,
num_kv_groups: usize,
}
impl Attention {
fn new(cfg: &LlamaConfig, rotary_emb: Arc<RotaryEmbedding>, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.embed_dim / cfg.num_heads;
let kv_dim = cfg.num_kv_heads * head_dim;
let q_proj = linear_b(cfg.embed_dim, cfg.embed_dim, false, vb.pp("q_proj"))?;
let k_proj = linear_b(cfg.embed_dim, kv_dim, false, vb.pp("k_proj"))?;
let v_proj = linear_b(cfg.embed_dim, kv_dim, false, vb.pp("v_proj"))?;
let o_proj = linear_b(cfg.embed_dim, cfg.embed_dim, false, vb.pp("output_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
rotary_emb,
kv_cache: None,
num_heads: cfg.num_heads,
num_kv_heads: cfg.num_kv_heads,
num_kv_groups: cfg.num_heads / cfg.num_kv_heads,
head_dim,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.num_heads * self.head_dim))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Mlp {
w1: Linear,
w2: Linear,
w3: Linear,
}
impl Mlp {
fn new(cfg: &LlamaConfig, vb: VarBuilder) -> Result<Self> {
let w1 = linear_b(cfg.embed_dim, cfg.intermediate_dim, false, vb.pp("w1"))?;
let w2 = linear_b(cfg.intermediate_dim, cfg.embed_dim, false, vb.pp("w2"))?;
let w3 = linear_b(cfg.embed_dim, cfg.intermediate_dim, false, vb.pp("w3"))?;
Ok(Self { w1, w2, w3 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.w1)?.silu()?;
let rhs = xs.apply(&self.w3)?;
(lhs * rhs)?.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct Layer {
mlp_norm: RmsNorm,
sa_norm: RmsNorm,
attn: Attention,
mlp: Mlp,
}
impl Layer {
fn new(cfg: &LlamaConfig, rotary_emb: Arc<RotaryEmbedding>, vb: VarBuilder) -> Result<Self> {
let mlp_norm = rms_norm(cfg.embed_dim, cfg.norm_eps, vb.pp("mlp_norm"))?;
let sa_norm = rms_norm(cfg.embed_dim, cfg.norm_eps, vb.pp("sa_norm"))?;
let attn = Attention::new(cfg, rotary_emb, vb.pp("attn"))?;
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
Ok(Self {
mlp_norm,
sa_norm,
attn,
mlp,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.sa_norm.forward(xs)?;
let xs = self.attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.mlp_norm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct LlamaModel {
layers: Vec<Layer>,
norm: RmsNorm,
device: Device,
dtype: DType,
}
impl LlamaModel {
pub fn new(cfg: &LlamaConfig, vb: VarBuilder) -> Result<Self> {
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let mut layers = Vec::with_capacity(cfg.num_layers);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.num_layers {
let layer = Layer::new(cfg, rotary_emb.clone(), vb_l.pp(layer_idx))?;
layers.push(layer);
}
let norm = rms_norm(cfg.embed_dim, cfg.norm_eps, vb.pp("norm"))?;
Ok(Self {
layers,
norm,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
fn prepare_decoder_attention_mask(
&self,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((1, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (_b_size, seq_len, _embed_dim) = xs.dims3()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = xs.clone();
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?;
}
let ys = xs.narrow(1, seq_len - 1, 1)?.apply(&self.norm)?;
Ok(ys)
}
}
#[derive(Debug, Clone)]
pub struct Model {
backbone: LlamaModel,
decoder: LlamaModel,
codebook0_head: Linear,
audio_embeddings: Embedding,
text_embeddings: Embedding,
projection: Linear,
audio_head: Tensor,
config: Config,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let backbone_cfg = LlamaConfig::from_flavor(cfg.backbone_flavor);
let backbone = LlamaModel::new(&backbone_cfg, vb.pp("backbone"))?;
let decoder_cfg = LlamaConfig::from_flavor(cfg.decoder_flavor);
let decoder = LlamaModel::new(&decoder_cfg, vb.pp("decoder"))?;
let backbone_dim = backbone_cfg.embed_dim;
let decoder_dim = decoder_cfg.embed_dim;
let audio_embeddings = embedding(
cfg.audio_vocab_size * cfg.audio_num_codebooks,
backbone_dim,
vb.pp("audio_embeddings"),
)?;
let text_embeddings =
embedding(cfg.text_vocab_size, backbone_dim, vb.pp("text_embeddings"))?;
let projection = linear_b(backbone_dim, decoder_dim, false, vb.pp("projection"))?;
let codebook0_head = linear_b(
backbone_dim,
cfg.audio_vocab_size,
false,
vb.pp("codebook0_head"),
)?;
let audio_head = vb.get(
(
cfg.audio_num_codebooks - 1,
decoder_dim,
cfg.audio_vocab_size,
),
"audio_head",
)?;
Ok(Self {
backbone,
decoder,
codebook0_head,
audio_embeddings,
text_embeddings,
projection,
audio_head,
config: cfg.clone(),
})
}
pub fn clear_kv_cache(&mut self) {
self.backbone.clear_kv_cache();
self.decoder.clear_kv_cache();
}
pub fn generate_frame(
&mut self,
tokens: &Tensor,
tokens_mask: &Tensor,
input_pos: usize,
lp: &mut LogitsProcessor,
) -> Result<Vec<u32>> {
let (b_sz, seq_len, _cb_plus_one) = tokens.dims3()?;
let audio_tokens = tokens.narrow(2, 0, self.config.audio_num_codebooks)?;
let text_tokens = tokens.narrow(2, self.config.audio_num_codebooks, 1)?;
let text_embeds = self.text_embeddings.forward(&text_tokens)?;
let arange = (Tensor::arange(
0u32,
self.config.audio_num_codebooks as u32,
&self.decoder.device,
)? * self.config.audio_vocab_size as f64)?;
let audio_tokens = audio_tokens.broadcast_add(&arange.reshape((1, 1, ()))?)?;
let audio_embeds = self.audio_embeddings.forward(&audio_tokens)?.reshape((
b_sz,
seq_len,
self.config.audio_num_codebooks,
(),
))?;
let embeds = Tensor::cat(&[&audio_embeds, &text_embeds], D::Minus2)?;
let embeds = embeds.broadcast_mul(
&tokens_mask
.to_dtype(self.backbone.dtype)?
.unsqueeze(D::Minus1)?,
)?;
let embeds = embeds.sum(2)?;
let h = self.backbone.forward(&embeds, input_pos)?;
let c0_logits = h.apply(&self.codebook0_head)?;
let c0_sample = lp.sample(&c0_logits.i((0, 0))?)?;
let mut all_samples = vec![c0_sample];
let c0_sample = Tensor::from_slice(&[c0_sample], (1, 1), &self.decoder.device)?;
let c0_embed = self.audio_embeddings.forward(&c0_sample)?;
let mut curr_h = Tensor::cat(&[h, c0_embed], 1)?;
self.decoder.clear_kv_cache();
let mut decoder_pos = 0;
for i in 1..self.config.audio_num_codebooks {
let proj_h = curr_h.apply(&self.projection)?;
let decoder_h = self.decoder.forward(&proj_h, decoder_pos)?;
decoder_pos += curr_h.dim(1)?;
let ci_logits = decoder_h.broadcast_matmul(&self.audio_head.get(i - 1)?)?;
let ci_sample = lp.sample(&ci_logits.i((0, 0))?)?;
all_samples.push(ci_sample);
let ci_sample = Tensor::from_slice(
&[ci_sample + (i * self.config.audio_vocab_size) as u32],
(1, 1),
&self.decoder.device,
)?;
let ci_embed = self.audio_embeddings.forward(&ci_sample)?;
curr_h = ci_embed
}
Ok(all_samples)
}
pub fn audio_tokens_and_mask(&self, mut frame: Vec<u32>) -> Result<(Tensor, Tensor)> {
let cb = self.config.audio_num_codebooks;
let device = &self.backbone.device;
let mut mask = vec![1u8; cb];
mask.push(0);
let mask = Tensor::from_vec(mask, (1, 1, cb + 1), device)?;
frame.push(0);
let tokens = Tensor::from_vec(frame, (1, 1, cb + 1), device)?;
Ok((tokens, mask))
}
pub fn text_tokens_and_mask(&self, ids: &[u32]) -> Result<(Tensor, Tensor)> {
let cb = self.config.audio_num_codebooks;
let device = &self.backbone.device;
let mut tokens = vec![];
let mut mask = vec![];
for &v in ids.iter() {
let mut token = vec![0; cb];
token.push(v);
let token = Tensor::from_vec(token, (1, 1, cb + 1), device)?;
tokens.push(token);
let mut m = vec![0u8; cb];
m.push(1);
let m = Tensor::from_vec(m, (1, 1, cb + 1), device)?;
mask.push(m);
}
let tokens = Tensor::cat(&tokens, 1)?;
let mask = Tensor::cat(&mask, 1)?;
Ok((tokens, mask))
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/beit.rs | candle-transformers/src/models/beit.rs | //! Based on the BEIT vision-language model.
//!
//! See "BEIT: BERT Pre-Training of Image Transformers", Bao et al. 2021
//! - [Arxiv](https://arxiv.org/abs/2106.08254)
//! - [GitHub](https://github.com/microsoft/unilm/tree/master/beit)
//!
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder};
const IMG_SIZE: usize = 384;
const PATCH_SIZE: usize = 16;
const NUM_CLASSES: usize = 1000;
const WINDOW_SIZE: usize = IMG_SIZE / PATCH_SIZE; // 384 / 16 = 24
const NB_TOKENS: usize = WINDOW_SIZE * WINDOW_SIZE + 1; // 24 * 24 + 1 = 577
fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
if bias {
candle_nn::linear(in_dim, out_dim, vb)
} else {
candle_nn::linear_no_bias(in_dim, out_dim, vb)
}
}
#[derive(Debug)]
struct Attention {
qkv: Linear,
proj: Linear,
relative_position_bias_table: Tensor,
relative_position_index: Tensor,
num_heads: usize,
scale: f64,
}
impl Attention {
fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
qkv_bias: bool,
proj_bias: bool,
) -> Result<Self> {
let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?;
let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?;
// num_relative_distance = token-token(47x47) + token-CLS(1) + CLS-token(1) + CLS-CLS(1) = 2212
let num_relative_distance = (2 * WINDOW_SIZE - 1) * (2 * WINDOW_SIZE - 1) + 3;
let relative_position_bias_table = vb.get(
(num_relative_distance, num_heads),
"relative_position_bias_table",
)?;
let relative_position_index =
Self::gen_relative_position_index(relative_position_bias_table.device())?;
let scale = 1. / ((dim / num_heads) as f64).sqrt();
Ok(Self {
qkv,
proj,
relative_position_bias_table,
relative_position_index,
num_heads,
scale,
})
}
}
impl Attention {
// See: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/beit.py#L61
fn gen_relative_position_index(device: &Device) -> Result<Tensor> {
let num_relative_distance = (2 * WINDOW_SIZE - 1) * (2 * WINDOW_SIZE - 1) + 3;
let w_area = WINDOW_SIZE * WINDOW_SIZE;
let t_arange: Tensor = Tensor::arange(0, WINDOW_SIZE as u32, device)?;
let t_ndgrid = Tensor::meshgrid(&[&t_arange, &t_arange], false)?;
let coords_flatten = Tensor::stack(&t_ndgrid, 0)?.flatten(1, 2)?;
let tmp1 = coords_flatten
.unsqueeze(2)?
.broadcast_as((2, w_area, w_area))?
.to_dtype(DType::I64)?;
let tmp2 = coords_flatten
.unsqueeze(1)?
.broadcast_as((2, w_area, w_area))?
.to_dtype(DType::I64)?;
let relative_coords = (tmp1 - tmp2)?
.transpose(0, 1)? // 102
.transpose(1, 2)? // 120
.contiguous()?;
let relative_coords = relative_coords.slice_assign(
&[0..w_area, 0..w_area, 0..1],
&(relative_coords.i((0..w_area, 0..w_area, 0..1))? + (WINDOW_SIZE - 1) as f64)?,
)?;
let relative_coords = relative_coords.slice_assign(
&[0..w_area, 0..w_area, 1..2],
&(relative_coords.i((0..w_area, 0..w_area, 1..2))? + (WINDOW_SIZE - 1) as f64)?,
)?;
let relative_coords = relative_coords.slice_assign(
&[0..w_area, 0..w_area, 0..1],
&(relative_coords.i((.., .., 0..1))? * (2. * (WINDOW_SIZE as f64) - 1.))?,
)?;
Tensor::zeros((w_area + 1, w_area + 1), DType::I64, device)?
.slice_assign(&[1.., 1..], &relative_coords.sum(2)?)?
.slice_assign(
&[0..1, 0..(w_area + 1)],
&(Tensor::ones((1, w_area + 1), DType::I64, device)?
* ((num_relative_distance - 3) as f64))?
.to_dtype(DType::I64)?,
)?
.slice_assign(
&[0..(w_area + 1), 0..1],
&(Tensor::ones((w_area + 1, 1), DType::I64, device)?
* ((num_relative_distance - 2) as f64))?
.to_dtype(DType::I64)?,
)?
.slice_assign(
&[0..1, 0..1],
&(Tensor::ones((1, 1), DType::I64, device)?
* ((num_relative_distance - 1) as f64))?
.to_dtype(DType::I64)?,
)
}
fn _get_rel_pos_bias(&self) -> Result<Tensor> {
self.relative_position_bias_table
.index_select(
&self
.relative_position_index
.flatten_all()?
.to_dtype(DType::U32)?,
0,
)?
.reshape((NB_TOKENS, NB_TOKENS, ()))?
.transpose(0, 1)? // 102
.transpose(0, 2)? // 201
.contiguous()?
.unsqueeze(0)
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = self
.qkv
.forward(xs)?
.reshape((b, n, 3, self.num_heads, c / self.num_heads))?
.transpose(1, 2)? // 02134
.transpose(0, 1)? // 20134
.transpose(2, 3)?; // 20314
let q = (qkv.i(0)? * self.scale)?;
let k = qkv.i(1)?.contiguous()?;
let v = qkv.i(2)?.contiguous()?;
let attn = (&q.matmul(&k.t()?)? + self._get_rel_pos_bias())?;
let attn = candle_nn::ops::softmax(&attn, D::Minus1)?;
let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?;
self.proj.forward(&attn)
}
}
#[derive(Debug)]
struct LayerScale {
gamma: Tensor,
}
impl LayerScale {
fn new(vb: VarBuilder, dim: usize) -> Result<Self> {
let gamma = vb.get(dim, "gamma")?;
Ok(Self { gamma })
}
}
impl Module for LayerScale {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&self.gamma)
}
}
#[derive(Debug)]
struct Mlp {
fc1: Linear,
fc2: Linear,
}
impl Mlp {
fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> {
let out_features = in_features;
let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?;
let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?;
Ok(Self { fc1, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?.gelu()?;
self.fc2.forward(&xs)
}
}
#[derive(Debug)]
struct Block {
norm1: LayerNorm,
attn: Attention,
ls1: LayerScale,
norm2: LayerNorm,
mlp: Mlp,
ls2: LayerScale,
}
impl Block {
fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?;
let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?;
let ls1 = LayerScale::new(vb.pp("ls1"), dim)?;
let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?;
let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?;
let ls2 = LayerScale::new(vb.pp("ls2"), dim)?;
Ok(Self {
norm1,
attn,
ls1,
norm2,
mlp,
ls2,
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = self
.ls1
.forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self
.ls2
.forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?;
xs + residual
}
}
#[derive(Debug)]
struct PatchEmbed {
proj: candle_nn::Conv2d,
patch_size: (usize, usize),
}
impl PatchEmbed {
fn new(vb: VarBuilder, patch_size: usize, in_chans: usize, embed_dim: usize) -> Result<Self> {
let config = candle_nn::Conv2dConfig {
stride: patch_size,
..Default::default()
};
let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?;
Ok(Self {
proj,
patch_size: (patch_size, patch_size),
})
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _c, h, w) = xs.dims4()?;
let (patch_h, patch_w) = self.patch_size;
if (h % patch_h) != 0 {
candle::bail!("image height {h} is not a multiple of patch height {patch_h}")
}
if (w % patch_w) != 0 {
candle::bail!("image width {w} is not a multiple of patch width {patch_w}")
}
let xs = self.proj.forward(xs)?;
let (b, c, h, w) = xs.dims4()?;
// flatten embeddings.
xs.reshape((b, c, h * w))?.transpose(1, 2)
}
}
#[derive(Debug)]
pub struct BeitVisionTransformer {
patch_embed: PatchEmbed,
cls_token: Tensor,
blocks: Vec<Block>,
norm: LayerNorm,
head: Linear,
}
impl BeitVisionTransformer {
pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> {
let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), PATCH_SIZE, 3, embed_dim)?;
let cls_token = vb.get((1, 1, embed_dim), "cls_token")?;
let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?;
let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?;
let vb_b = vb.pp("blocks");
let blocks = (0..depth)
.map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
patch_embed,
cls_token,
blocks,
norm,
head,
})
}
fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.patch_embed.forward(xs)?;
Tensor::cat(&[&self.cls_token, &xs], 1)
}
fn get_intermediate_layers_not_chunked(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
) -> Result<Vec<Tensor>> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
let mut output = Vec::new();
for (i, blk) in self.blocks.iter().enumerate() {
xs = blk.forward(&xs)?;
if blocks_to_take.contains(&i) {
output.push(xs.clone());
}
}
if output.len() != blocks_to_take.len() {
candle::bail!(
"only {} / {} blocks found",
output.len(),
blocks_to_take.len()
);
}
Ok(output)
}
pub fn get_intermediate_layers(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
reshape: bool,
return_class_token: bool,
norm: bool,
) -> Result<Tensor> {
let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?;
let outputs = if norm {
outputs
.iter()
.map(|out| self.norm.forward(out))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let class_tokens = outputs
.iter()
.map(|out| out.i((.., 0)))
.collect::<Result<Vec<_>>>()?;
let outputs = outputs
.iter()
.map(|out| out.i((.., 1..)))
.collect::<Result<Vec<_>>>()?;
let outputs = if reshape {
let (b, _c, w, h) = xs.dims4()?;
let patch_size = self.patch_embed.patch_size.0;
let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size));
outputs
.iter()
.map(|out| {
out.reshape((b, w / patch_size, h / patch_size, num_channels))?
.transpose(2, 3)?
.transpose(1, 2)
})
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let outputs = if return_class_token {
outputs
.iter()
.zip(class_tokens.iter())
.map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
Tensor::stack(&outputs[..], 0)
}
}
impl Module for BeitVisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
for blk in self.blocks.iter() {
xs = blk.forward(&xs)?
}
let xs_moy_local_tokens = xs.i((.., 1..))?.mean(1)?;
let xs_norm = self.norm.forward(&xs_moy_local_tokens)?;
self.head.forward(&xs_norm)
}
}
pub fn vit_base(vb: VarBuilder) -> Result<BeitVisionTransformer> {
BeitVisionTransformer::new(vb, 12, 768, 12)
}
pub fn vit_large(vb: VarBuilder) -> Result<BeitVisionTransformer> {
BeitVisionTransformer::new(vb, 24, 1024, 16)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/quantized_qwen2.rs | candle-transformers/src/models/quantized_qwen2.rs | //! Qwen2 model implementation with quantization support.
//!
//! Qwen2 is a chat-optimized language model that supports 8-bit quantization
//! for reduced memory usage and faster inference.
//!
//! Key characteristics:
//! - Group Query Attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - [Model Card](https://huggingface.co/Qwen/Qwen2)
//!
use crate::{quantized_nn::RmsNorm, utils::repeat_kv};
use candle::{
quantized::{gguf_file, QMatMul},
DType, Device, IndexOp, Result, Tensor,
};
use candle_nn::{Embedding, Module};
use std::collections::HashMap;
#[derive(Debug, Clone)]
struct Mlp {
feed_forward_w1: QMatMul,
feed_forward_w2: QMatMul,
feed_forward_w3: QMatMul,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let w1 = self.feed_forward_w1.forward(xs)?;
let w3 = self.feed_forward_w3.forward(xs)?;
self.feed_forward_w2
.forward(&(candle_nn::ops::silu(&w1)? * w3)?)
}
}
#[derive(Debug, Clone)]
struct LayerWeights {
attention_wq: QMatMul,
attention_wk: QMatMul,
attention_wv: QMatMul,
attention_bq: Tensor,
attention_bk: Tensor,
attention_bv: Tensor,
attention_wo: QMatMul,
attention_norm: RmsNorm,
mlp: Mlp,
ffn_norm: RmsNorm,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
cos: Tensor,
sin: Tensor,
neg_inf: Tensor,
kv_cache: Option<(Tensor, Tensor)>,
span_attn: tracing::Span,
span_rot: tracing::Span,
span_mlp: tracing::Span,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> {
let shape = mask.shape();
let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?;
Ok(m)
}
impl LayerWeights {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _n_head, seq_len, _n_embd) = x.dims4()?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(&x.contiguous()?, &cos, &sin)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, n_embd) = x.dims3()?;
let q = self.attention_wq.forward(x)?;
let k = self.attention_wk.forward(x)?;
let v = self.attention_wv.forward(x)?;
let q = q.broadcast_add(&self.attention_bq)?;
let k = k.broadcast_add(&self.attention_bk)?;
let v = v.broadcast_add(&self.attention_bv)?;
let q = q
.reshape((b_sz, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let v = v
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
// let (q, k) = self
// .rotary_embedding
// .apply_rotary_emb_qkv(&q, &k, index_pos)?;
let q = self.apply_rotary_emb(&q, index_pos)?;
let k = self.apply_rotary_emb(&k, index_pos)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((k_cache, v_cache)) => {
if index_pos == 0 {
(k, v)
} else {
let k = Tensor::cat(&[k_cache, &k], 2)?;
let v = Tensor::cat(&[v_cache, &v], 2)?;
(k, v)
}
}
};
self.kv_cache = Some((k.clone(), v.clone()));
// Support for MQA, useful for 70B models and mistral.
let k = repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = repeat_kv(v, self.n_head / self.n_kv_head)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = match mask {
None => att,
Some(mask) => {
let mask = mask.broadcast_as(att.shape())?;
masked_fill(&att, &mask, &self.neg_inf)?
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let y = att.matmul(&v.contiguous()?)?;
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.attention_wo.forward(&y)?;
Ok(y)
}
}
pub struct ModelWeights {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
norm: RmsNorm,
output: QMatMul,
masks: HashMap<usize, Tensor>,
span: tracing::Span,
span_output: tracing::Span,
}
fn precomput_freqs_cis(
head_dim: usize,
freq_base: f32,
context_length: usize,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, context_length as u32, device)?
.to_dtype(DType::F32)?
.reshape((context_length, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok((cos, sin))
}
impl ModelWeights {
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let md_get = |s: &str| match ct.metadata.get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
let head_count = md_get("qwen2.attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("qwen2.attention.head_count_kv")?.to_u32()? as usize;
let embedding_length = md_get("qwen2.embedding_length")?.to_u32()? as usize;
let context_length = md_get("qwen2.context_length")?.to_u32()? as usize;
let block_count = md_get("qwen2.block_count")?.to_u32()? as usize;
let rms_norm_eps = md_get("qwen2.attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let rope_freq_base = md_get("qwen2.rope.freq_base")
.and_then(|m| m.to_f32())
.unwrap_or(10000f32);
let head_dim = embedding_length / head_count;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings.dequantize(device)?;
let norm = RmsNorm::from_qtensor(
ct.tensor(reader, "output_norm.weight", device)?,
rms_norm_eps,
)?;
let output = match ct.tensor(reader, "output.weight", device) {
Ok(v) => QMatMul::from_qtensor(v)?,
_ => {
// use tie_word_embeddings
QMatMul::from_qtensor(ct.tensor(reader, "token_embd.weight", device)?)?
}
};
let (cos, sin) = precomput_freqs_cis(head_dim, rope_freq_base, context_length, device)?;
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?;
let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?;
let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?;
let attention_bq = ct.tensor(reader, &format!("{prefix}.attn_q.bias"), device)?;
let attention_bk = ct.tensor(reader, &format!("{prefix}.attn_k.bias"), device)?;
let attention_bv = ct.tensor(reader, &format!("{prefix}.attn_v.bias"), device)?;
let attention_wo =
ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?;
let mlp = {
let feed_forward_w1 =
ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?;
let feed_forward_w2 =
ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?;
let feed_forward_w3 =
ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?;
Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
}
};
let attention_norm =
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?;
let ffn_norm = ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp");
layers.push(LayerWeights {
attention_wq: QMatMul::from_qtensor(attention_wq)?,
attention_wk: QMatMul::from_qtensor(attention_wk)?,
attention_wv: QMatMul::from_qtensor(attention_wv)?,
attention_bq: attention_bq.dequantize(device)?,
attention_bk: attention_bk.dequantize(device)?,
attention_bv: attention_bv.dequantize(device)?,
attention_wo: QMatMul::from_qtensor(attention_wo)?,
attention_norm: RmsNorm::from_qtensor(attention_norm, rms_norm_eps)?,
cos: cos.clone(),
sin: sin.clone(),
mlp,
ffn_norm: RmsNorm::from_qtensor(ffn_norm, rms_norm_eps)?,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim,
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_rot,
span_mlp,
});
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
norm,
output,
masks: HashMap::new(),
span,
span_output,
})
}
fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mask = if seq_len == 1 {
None
} else {
Some(self.mask(seq_len, x.device())?)
};
let _enter = self.span.enter();
let mut layer_in = self.tok_embeddings.forward(x)?;
for layer in self.layers.iter_mut() {
let x = layer_in;
let residual = &x;
let x = layer.attention_norm.forward(&x)?;
let attn = layer.forward_attn(&x, mask.as_ref(), index_pos)?;
let x = (attn + residual)?;
// MLP
let _enter = layer.span_mlp.enter();
let residual = &x;
let x = layer.ffn_norm.forward(&x)?;
let x = layer.mlp.forward(&x)?;
let x = (x + residual)?;
layer_in = x
}
let x = self.norm.forward(&layer_in)?;
let x = x.i((.., seq_len - 1, ..))?;
let _enter = self.span_output.enter();
self.output.forward(&x)
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/repvgg.rs | candle-transformers/src/models/repvgg.rs | //! RepVGG inference implementation
//!
//! Key characteristics:
//! - Efficient inference architecture through structural reparameterization
//! - Single 3x3 conv layer after fusing 3x3 branch, 1x1 branch and identity branch
//! - Different configurations including a0-a2, b0-b3 and variants with group convolutions
//! - High accuracy with VGG-like plain architecture and training
//!
//! References:
//! - [RepVGG Paper](https://arxiv.org/abs/2101.03697). RepVGG: Making VGG-style ConvNets Great Again
//! - [Official Implementation](https://github.com/DingXiaoH/RepVGG)
//!
use candle::{Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d_no_bias, linear, BatchNorm, Conv2d, Conv2dConfig, Func, VarBuilder,
};
const CHANNELS_PER_STAGE: [usize; 5] = [64, 64, 128, 256, 512];
#[derive(Clone)]
pub struct Config {
a: f32,
b: f32,
groups: usize,
stages: [usize; 4],
}
impl Config {
pub fn a0() -> Self {
Self {
a: 0.75,
b: 2.5,
groups: 1,
stages: [2, 4, 14, 1],
}
}
pub fn a1() -> Self {
Self {
a: 1.0,
b: 2.5,
groups: 1,
stages: [2, 4, 14, 1],
}
}
pub fn a2() -> Self {
Self {
a: 1.5,
b: 2.75,
groups: 1,
stages: [2, 4, 14, 1],
}
}
pub fn b0() -> Self {
Self {
a: 1.0,
b: 2.5,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b1() -> Self {
Self {
a: 2.0,
b: 4.0,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b2() -> Self {
Self {
a: 2.5,
b: 5.0,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b3() -> Self {
Self {
a: 3.0,
b: 5.0,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b1g4() -> Self {
Self {
a: 2.0,
b: 4.0,
groups: 4,
stages: [4, 6, 16, 1],
}
}
pub fn b2g4() -> Self {
Self {
a: 2.5,
b: 5.0,
groups: 4,
stages: [4, 6, 16, 1],
}
}
pub fn b3g4() -> Self {
Self {
a: 3.0,
b: 5.0,
groups: 4,
stages: [4, 6, 16, 1],
}
}
}
// fuses a convolutional kernel and a batchnorm layer into a convolutional layer
// based on the _fuse_bn_tensor method in timm
// see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602
fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> {
let (gamma, beta) = bn.weight_and_bias().unwrap();
let mu = bn.running_mean();
let sigma = (bn.running_var() + bn.eps())?.sqrt();
let gps = (gamma / sigma)?;
let bias = (beta - mu * &gps)?;
let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?;
Ok((weights, bias))
}
// A RepVGG layer has a different training time and inference time architecture.
// The latter is a simple and efficient equivalent transformation of the former
// realized by a structural reparameterization technique, where 3x3 and 1x1 convolutions
// along with identity branches and batchnorm layers are fused into a single 3x3 convolution.
fn repvgg_layer(
has_identity: bool,
dim: usize,
stride: usize,
in_channels: usize,
out_channels: usize,
groups: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
groups,
padding: 1,
..Default::default()
};
// read and reparameterize the 1x1 conv and bn into w1 and b1
// based on https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L543
let conv1x1_bn = batch_norm(dim, 1e-5, vb.pp("conv_1x1.bn"))?;
let conv1x1 = conv2d_no_bias(
in_channels,
out_channels,
1,
conv2d_cfg,
vb.pp("conv_1x1.conv"),
)?;
let (mut w1, b1) = fuse_conv_bn(conv1x1.weight(), conv1x1_bn)?;
// resize to 3x3
w1 = w1.pad_with_zeros(D::Minus1, 1, 1)?;
w1 = w1.pad_with_zeros(D::Minus2, 1, 1)?;
// read and reparameterize the 3x3 conv and bn into w3 and b3
let convkxk_bn = batch_norm(dim, 1e-5, vb.pp("conv_kxk.bn"))?;
let conv3x3 = conv2d_no_bias(
in_channels,
out_channels,
3,
conv2d_cfg,
vb.pp("conv_kxk.conv"),
)?;
let (w3, b3) = fuse_conv_bn(conv3x3.weight(), convkxk_bn)?;
let mut w = (w1 + w3)?;
let mut b = (b1 + b3)?;
// read and reparameterize the identity bn into wi and bi
if has_identity {
let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"))?;
// create a 3x3 convolution equivalent to the identity branch
let mut weights: Vec<f32> = vec![0.0; conv3x3.weight().elem_count()];
// https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L620
let in_dim = in_channels / groups;
for i in 0..in_channels {
weights[i * in_dim * 3 * 3 + (i % in_dim) * 3 * 3 + 4] = 1.0;
}
let weights = &Tensor::from_vec(weights, w.shape(), w.device())?;
let (wi, bi) = fuse_conv_bn(weights, identity_bn)?;
w = (w + wi)?;
b = (b + bi)?;
}
// create the 3x3 conv equivalent to the sum of 3x3, 1x1 and identity branches
let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg);
Ok(Func::new(move |xs| {
let xs = xs.apply(&reparam_conv)?.relu()?;
Ok(xs)
}))
}
// Get the number of output channels per stage taking into account the multipliers
fn output_channels_per_stage(a: f32, b: f32, stage: usize) -> usize {
let channels = CHANNELS_PER_STAGE[stage] as f32;
match stage {
0 => std::cmp::min(64, (channels * a) as usize),
4 => (channels * b) as usize,
_ => (channels * a) as usize,
}
}
// Each stage is made of layers. The first layer always downsamples with stride 2.
// All but the first layer have a residual connection.
// The G4 variants have a groupwise convolution instead of a dense one on odd layers
// counted across stage boundaries, so we keep track of which layer we are in the
// full model.
fn repvgg_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nlayers = cfg.stages[idx - 1];
let mut layers = Vec::with_capacity(nlayers);
let prev_layers: usize = cfg.stages[..idx - 1].iter().sum();
let out_channels_prev = output_channels_per_stage(cfg.a, cfg.b, idx - 1);
let out_channels = output_channels_per_stage(cfg.a, cfg.b, idx);
for layer_idx in 0..nlayers {
let (has_identity, stride, in_channels) = if layer_idx == 0 {
(false, 2, out_channels_prev)
} else {
(true, 1, out_channels)
};
let groups = if (prev_layers + layer_idx) % 2 == 1 {
cfg.groups
} else {
1
};
layers.push(repvgg_layer(
has_identity,
out_channels,
stride,
in_channels,
out_channels,
groups,
vb.pp(layer_idx),
)?)
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for layer in layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}))
}
// Build a RepVGG model for a given configuration.
fn repvgg_model(config: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = output_channels_per_stage(config.a, config.b, 4);
let linear = linear(outputs, nclasses, vb.pp("head.fc"))?;
Some(linear)
}
};
let stem_dim = output_channels_per_stage(config.a, config.b, 0);
let stem = repvgg_layer(false, stem_dim, 2, 3, stem_dim, 1, vb.pp("stem"))?;
let vb = vb.pp("stages");
let stage1 = repvgg_stage(config, 1, vb.pp(0))?;
let stage2 = repvgg_stage(config, 2, vb.pp(1))?;
let stage3 = repvgg_stage(config, 3, vb.pp(2))?;
let stage4 = repvgg_stage(config, 4, vb.pp(3))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.apply(&stage4)?
.mean(D::Minus1)?
.mean(D::Minus1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn repvgg(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
repvgg_model(cfg, Some(nclasses), vb)
}
pub fn repvgg_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
repvgg_model(cfg, None, vb)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/mod.rs | candle-transformers/src/models/mod.rs | //! Candle implementations for various deep learning models
//!
//! This crate provides implementations of popular machine learning models and architectures for different modalities.
//!
//! - Large language models: [`llama`], [`phi3`], [`mamba`], [`mixtral`], [`bert`], ...
//! - Text to text models: [`t5`], ...
//! - Image to text models: [`blip`], ...
//! - Text to image models: [`stable_diffusion`] and [`wuerstchen`], ...
//! - Audio models: [`whisper`], [`encodec`], [`metavoice`], [`parler_tts`], ...
//! - Computer vision models: [`dinov2`], [`convmixer`], [`efficientnet`], ...
//!
//! Some of the models also have quantized variants, e.g. [`quantized_blip`], [`quantized_llama`] and [`quantized_qwen2`].
//!
//! The implementations aim to be readable while maintaining good performance. For more information
//! on each model see the model's module docs in the links below.
pub mod based;
pub mod beit;
pub mod bert;
pub mod bigcode;
pub mod blip;
pub mod blip_text;
pub mod chatglm;
pub mod chinese_clip;
pub mod clip;
pub mod codegeex4_9b;
pub mod colpali;
pub mod convmixer;
pub mod convnext;
pub mod csm;
pub mod dac;
pub mod debertav2;
pub mod deepseek2;
pub mod depth_anything_v2;
pub mod dinov2;
pub mod dinov2reg4;
pub mod distilbert;
pub mod efficientnet;
pub mod efficientvit;
pub mod encodec;
pub mod eva2;
pub mod falcon;
pub mod fastvit;
pub mod flux;
pub mod gemma;
pub mod gemma2;
pub mod gemma3;
pub mod glm4;
pub mod glm4_new;
pub mod granite;
pub mod granitemoehybrid;
pub mod helium;
pub mod hiera;
pub mod jina_bert;
pub mod llama;
pub mod llama2_c;
pub mod llama2_c_weights;
pub mod llava;
pub mod mamba;
pub mod marian;
pub mod metavoice;
pub mod mimi;
pub mod mistral;
pub mod mixformer;
pub mod mixtral;
pub mod mmdit;
pub mod mobileclip;
pub mod mobilenetv4;
pub mod mobileone;
pub mod modernbert;
pub mod moondream;
pub mod mpt;
pub mod nvembed_v2;
pub mod olmo;
pub mod olmo2;
pub mod openclip;
pub mod paligemma;
pub mod parler_tts;
pub mod persimmon;
pub mod phi;
pub mod phi3;
pub mod pixtral;
pub mod quantized_blip;
pub mod quantized_blip_text;
pub mod quantized_gemma3;
pub mod quantized_llama;
pub mod quantized_llama2_c;
pub mod quantized_metavoice;
pub mod quantized_mistral;
pub mod quantized_mixformer;
pub mod quantized_moondream;
pub mod quantized_mpt;
pub mod quantized_phi;
pub mod quantized_phi3;
pub mod quantized_qwen2;
pub mod quantized_qwen3;
pub mod quantized_qwen3_moe;
pub mod quantized_recurrent_gemma;
pub mod quantized_rwkv_v5;
pub mod quantized_rwkv_v6;
pub mod quantized_stable_lm;
pub mod quantized_t5;
pub mod qwen2;
pub mod qwen2_moe;
pub mod qwen3;
pub mod qwen3_moe;
pub mod qwen3_vl;
pub mod recurrent_gemma;
pub mod repvgg;
pub mod resnet;
pub mod rwkv_v5;
pub mod rwkv_v6;
pub mod segformer;
pub mod segment_anything;
pub mod siglip;
pub mod snac;
pub mod stable_diffusion;
pub mod stable_lm;
pub mod starcoder2;
pub mod stella_en_v5;
pub mod t5;
pub mod trocr;
pub mod vgg;
pub mod vit;
pub mod voxtral;
pub mod whisper;
pub mod with_tracing;
pub mod wuerstchen;
pub mod xlm_roberta;
pub mod yi;
pub mod z_image;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-transformers/src/models/debertav2.rs | candle-transformers/src/models/debertav2.rs | use std::collections::HashMap;
use candle::{bail, Context, DType, Device, Module, Result, Tensor, D};
use candle_nn::{
conv1d, embedding, layer_norm, Conv1d, Conv1dConfig, Embedding, LayerNorm, VarBuilder,
};
use serde::{Deserialize, Deserializer};
pub const DTYPE: DType = DType::F32;
// NOTE: HiddenAct and HiddenActLayer are both direct copies from bert.rs.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HiddenAct {
Gelu,
GeluApproximate,
Relu,
}
pub struct HiddenActLayer {
act: HiddenAct,
span: tracing::Span,
}
impl HiddenActLayer {
fn new(act: HiddenAct) -> Self {
let span = tracing::span!(tracing::Level::TRACE, "hidden-act");
Self { act, span }
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
match self.act {
// https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213
HiddenAct::Gelu => xs.gelu_erf(),
HiddenAct::GeluApproximate => xs.gelu(),
HiddenAct::Relu => xs.relu(),
}
}
}
pub type Id2Label = HashMap<u32, String>;
pub type Label2Id = HashMap<String, u32>;
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub hidden_act: HiddenAct,
pub hidden_dropout_prob: f64,
pub attention_probs_dropout_prob: f64,
pub max_position_embeddings: usize,
pub type_vocab_size: usize,
pub initializer_range: f64,
pub layer_norm_eps: f64,
pub relative_attention: bool,
pub max_relative_positions: isize,
pub pad_token_id: Option<usize>,
pub position_biased_input: bool,
#[serde(deserialize_with = "deserialize_pos_att_type")]
pub pos_att_type: Vec<String>,
pub position_buckets: Option<isize>,
pub share_att_key: Option<bool>,
pub attention_head_size: Option<usize>,
pub embedding_size: Option<usize>,
pub norm_rel_ebd: Option<String>,
pub conv_kernel_size: Option<usize>,
pub conv_groups: Option<usize>,
pub conv_act: Option<String>,
pub id2label: Option<Id2Label>,
pub label2id: Option<Label2Id>,
pub pooler_dropout: Option<f64>,
pub pooler_hidden_act: Option<HiddenAct>,
pub pooler_hidden_size: Option<usize>,
pub cls_dropout: Option<f64>,
}
fn deserialize_pos_att_type<'de, D>(deserializer: D) -> std::result::Result<Vec<String>, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize, Debug)]
#[serde(untagged)]
enum StringOrVec {
String(String),
Vec(Vec<String>),
}
match StringOrVec::deserialize(deserializer)? {
StringOrVec::String(s) => Ok(s.split('|').map(String::from).collect()),
StringOrVec::Vec(v) => Ok(v),
}
}
// NOTE: Dropout is probably not needed for now since this will primarily be used
// in inferencing. However, for training/fine-tuning it will be necessary.
pub struct StableDropout {
_drop_prob: f64,
_count: usize,
}
impl StableDropout {
pub fn new(drop_prob: f64) -> Self {
Self {
_drop_prob: drop_prob,
_count: 0,
}
}
pub fn forward(&self, x: &Tensor) -> Result<Tensor> {
Ok(x.clone())
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L823
pub struct DebertaV2Embeddings {
device: Device,
word_embeddings: Embedding,
position_embeddings: Option<Embedding>,
token_type_embeddings: Option<Embedding>,
layer_norm: LayerNorm,
dropout: StableDropout,
position_ids: Tensor,
config: Config,
embedding_size: usize,
embed_proj: Option<candle_nn::Linear>,
}
impl DebertaV2Embeddings {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let device = vb.device().clone();
let config = config.clone();
let embedding_size = config.embedding_size.unwrap_or(config.hidden_size);
let word_embeddings =
embedding(config.vocab_size, embedding_size, vb.pp("word_embeddings"))?;
let position_embeddings = if config.position_biased_input {
Some(embedding(
config.max_position_embeddings,
embedding_size,
vb.pp("position_embeddings"),
)?)
} else {
None
};
let token_type_embeddings: Option<Embedding> = if config.type_vocab_size > 0 {
Some(candle_nn::embedding(
config.type_vocab_size,
config.hidden_size,
vb.pp("token_type_embeddings"),
)?)
} else {
None
};
let embed_proj: Option<candle_nn::Linear> = if embedding_size != config.hidden_size {
Some(candle_nn::linear_no_bias(
embedding_size,
config.hidden_size,
vb.pp("embed_proj"),
)?)
} else {
None
};
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
let dropout = StableDropout::new(config.hidden_dropout_prob);
let position_ids =
Tensor::arange(0, config.max_position_embeddings as u32, &device)?.unsqueeze(0)?;
Ok(Self {
word_embeddings,
position_embeddings,
token_type_embeddings,
layer_norm,
dropout,
position_ids,
device,
config,
embedding_size,
embed_proj,
})
}
pub fn forward(
&self,
input_ids: Option<&Tensor>,
token_type_ids: Option<&Tensor>,
position_ids: Option<&Tensor>,
mask: Option<&Tensor>,
inputs_embeds: Option<&Tensor>,
) -> Result<Tensor> {
let (input_shape, input_embeds) = match (input_ids, inputs_embeds) {
(Some(ids), None) => {
let embs = self.word_embeddings.forward(ids)?;
(ids.dims(), embs)
}
(None, Some(e)) => (e.dims(), e.clone()),
(None, None) => {
bail!("Must specify either input_ids or inputs_embeds")
}
(Some(_), Some(_)) => {
bail!("Can't specify both input_ids and inputs_embeds")
}
};
let seq_length = match input_shape.last() {
Some(v) => *v,
None => bail!("DebertaV2Embeddings invalid input shape"),
};
let position_ids = match position_ids {
Some(v) => v.clone(),
None => self.position_ids.narrow(1, 0, seq_length)?,
};
let token_type_ids = match token_type_ids {
Some(ids) => ids.clone(),
None => Tensor::zeros(input_shape, DType::U32, &self.device)?,
};
let position_embeddings = match &self.position_embeddings {
Some(emb) => emb.forward(&position_ids)?,
None => Tensor::zeros_like(&input_embeds)?,
};
let mut embeddings = input_embeds;
if self.config.position_biased_input {
embeddings = embeddings.add(&position_embeddings)?;
}
if self.config.type_vocab_size > 0 {
embeddings = self.token_type_embeddings.as_ref().map_or_else(
|| bail!("token_type_embeddings must be set when type_vocab_size > 0"),
|token_type_embeddings| {
embeddings.add(&token_type_embeddings.forward(&token_type_ids)?)
},
)?;
}
if self.embedding_size != self.config.hidden_size {
embeddings = if let Some(embed_proj) = &self.embed_proj {
embed_proj.forward(&embeddings)?
} else {
bail!("embed_proj must exist if embedding_size != config.hidden_size");
}
}
embeddings = self.layer_norm.forward(&embeddings)?;
if let Some(mask) = mask {
let mut mask = mask.clone();
if mask.dims() != embeddings.dims() {
if mask.dims().len() == 4 {
mask = mask.squeeze(1)?.squeeze(1)?;
}
mask = mask.unsqueeze(2)?;
}
mask = mask.to_dtype(embeddings.dtype())?;
embeddings = embeddings.broadcast_mul(&mask)?;
}
self.dropout.forward(&embeddings)
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L72
struct XSoftmax {}
impl XSoftmax {
pub fn apply(input: &Tensor, mask: &Tensor, dim: D, device: &Device) -> Result<Tensor> {
// NOTE: At the time of this writing, candle does not have a logical-not operator.
let mut rmask = mask.broadcast_as(input.shape())?.to_dtype(DType::F32)?;
rmask = rmask
.broadcast_lt(&Tensor::new(&[1.0_f32], device)?)?
.to_dtype(DType::U8)?;
let min_value_tensor = Tensor::new(&[f32::MIN], device)?.broadcast_as(input.shape())?;
let mut output = rmask.where_cond(&min_value_tensor, input)?;
output = candle_nn::ops::softmax(&output, dim)?;
let t_zeroes = Tensor::new(&[0f32], device)?.broadcast_as(input.shape())?;
output = rmask.where_cond(&t_zeroes, &output)?;
Ok(output)
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L605
pub struct DebertaV2DisentangledSelfAttention {
config: Config,
num_attention_heads: usize,
query_proj: candle_nn::Linear,
key_proj: candle_nn::Linear,
value_proj: candle_nn::Linear,
dropout: StableDropout,
device: Device,
relative_attention: bool,
pos_dropout: Option<StableDropout>,
position_buckets: isize,
max_relative_positions: isize,
pos_ebd_size: isize,
share_att_key: bool,
pos_key_proj: Option<candle_nn::Linear>,
pos_query_proj: Option<candle_nn::Linear>,
}
impl DebertaV2DisentangledSelfAttention {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let config = config.clone();
let vb = vb.clone();
if !config
.hidden_size
.is_multiple_of(config.num_attention_heads)
{
return Err(candle::Error::Msg(format!(
"The hidden size {} is not a multiple of the number of attention heads {}",
config.hidden_size, config.num_attention_heads
)));
}
let num_attention_heads = config.num_attention_heads;
let attention_head_size = config
.attention_head_size
.unwrap_or(config.hidden_size / config.num_attention_heads);
let all_head_size = num_attention_heads * attention_head_size;
let query_proj = candle_nn::linear(config.hidden_size, all_head_size, vb.pp("query_proj"))?;
let key_proj = candle_nn::linear(config.hidden_size, all_head_size, vb.pp("key_proj"))?;
let value_proj = candle_nn::linear(config.hidden_size, all_head_size, vb.pp("value_proj"))?;
let share_att_key = config.share_att_key.unwrap_or(false);
let relative_attention = config.relative_attention;
let mut max_relative_positions = config.max_relative_positions;
let mut pos_ebd_size: isize = 0;
let position_buckets = config.position_buckets.unwrap_or(-1);
let mut pos_dropout: Option<StableDropout> = None;
let mut pos_key_proj: Option<candle_nn::Linear> = None;
let mut pos_query_proj: Option<candle_nn::Linear> = None;
if relative_attention {
if max_relative_positions < 1 {
max_relative_positions = config.max_position_embeddings as isize;
}
pos_ebd_size = max_relative_positions;
if position_buckets > 0 {
pos_ebd_size = position_buckets
}
pos_dropout = Some(StableDropout::new(config.hidden_dropout_prob));
if !share_att_key {
if config.pos_att_type.iter().any(|s| s == "c2p") {
pos_key_proj = Some(candle_nn::linear(
config.hidden_size,
all_head_size,
vb.pp("pos_key_proj"),
)?);
}
if config.pos_att_type.iter().any(|s| s == "p2c") {
pos_query_proj = Some(candle_nn::linear(
config.hidden_size,
all_head_size,
vb.pp("pos_query_proj"),
)?);
}
}
}
let dropout = StableDropout::new(config.attention_probs_dropout_prob);
let device = vb.device().clone();
Ok(Self {
config,
num_attention_heads,
query_proj,
key_proj,
value_proj,
dropout,
device,
relative_attention,
pos_dropout,
position_buckets,
max_relative_positions,
pos_ebd_size,
share_att_key,
pos_key_proj,
pos_query_proj,
})
}
pub fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
query_states: Option<&Tensor>,
relative_pos: Option<&Tensor>,
rel_embeddings: Option<&Tensor>,
) -> Result<Tensor> {
let query_states = match query_states {
Some(qs) => qs,
None => hidden_states,
};
let query_layer = self.transpose_for_scores(&self.query_proj.forward(query_states)?)?;
let key_layer = self.transpose_for_scores(&self.key_proj.forward(query_states)?)?;
let value_layer = self.transpose_for_scores(&self.value_proj.forward(query_states)?)?;
let mut rel_att: Option<Tensor> = None;
let mut scale_factor: usize = 1;
if self.config.pos_att_type.iter().any(|s| s == "c2p") {
scale_factor += 1;
}
if self.config.pos_att_type.iter().any(|s| s == "p2c") {
scale_factor += 1;
}
let scale = {
let q_size = query_layer.dim(D::Minus1)?;
Tensor::new(&[(q_size * scale_factor) as f32], &self.device)?.sqrt()?
};
let mut attention_scores: Tensor = {
let key_layer_transposed = key_layer.t()?;
let div = key_layer_transposed
.broadcast_div(scale.to_dtype(query_layer.dtype())?.as_ref())?;
query_layer.matmul(&div)?
};
if self.relative_attention {
if let Some(rel_embeddings) = rel_embeddings {
let rel_embeddings = self
.pos_dropout
.as_ref()
.context("relative_attention requires pos_dropout")?
.forward(rel_embeddings)?;
rel_att = Some(self.disentangled_attention_bias(
query_layer,
key_layer,
relative_pos,
rel_embeddings,
scale_factor,
)?);
}
}
if let Some(rel_att) = rel_att {
attention_scores = attention_scores.broadcast_add(&rel_att)?;
}
attention_scores = attention_scores.reshape((
(),
self.num_attention_heads,
attention_scores.dim(D::Minus2)?,
attention_scores.dim(D::Minus1)?,
))?;
let mut attention_probs =
XSoftmax::apply(&attention_scores, attention_mask, D::Minus1, &self.device)?;
attention_probs = self.dropout.forward(&attention_probs)?;
let mut context_layer = attention_probs
.reshape((
(),
attention_probs.dim(D::Minus2)?,
attention_probs.dim(D::Minus1)?,
))?
.matmul(&value_layer)?;
context_layer = context_layer
.reshape((
(),
self.num_attention_heads,
context_layer.dim(D::Minus2)?,
context_layer.dim(D::Minus1)?,
))?
.permute((0, 2, 1, 3))?
.contiguous()?;
let dims = context_layer.dims();
context_layer = match dims.len() {
2 => context_layer.reshape(())?,
3 => context_layer.reshape((dims[0], ()))?,
4 => context_layer.reshape((dims[0], dims[1], ()))?,
5 => context_layer.reshape((dims[0], dims[1], dims[2], ()))?,
_ => {
bail!(
"Invalid shape for DisentabgledSelfAttention context layer: {:?}",
dims
)
}
};
Ok(context_layer)
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let dims = xs.dims().to_vec();
match dims.len() {
3 => {
let reshaped = xs.reshape((dims[0], dims[1], self.num_attention_heads, ()))?;
reshaped.transpose(1, 2)?.contiguous()?.reshape((
(),
reshaped.dim(1)?,
reshaped.dim(D::Minus1)?,
))
}
shape => {
bail!("Invalid shape for transpose_for_scores. Expected 3 dimensions, got {shape}")
}
}
}
fn disentangled_attention_bias(
&self,
query_layer: Tensor,
key_layer: Tensor,
relative_pos: Option<&Tensor>,
rel_embeddings: Tensor,
scale_factor: usize,
) -> Result<Tensor> {
let mut relative_pos = relative_pos.map_or(
build_relative_position(
query_layer.dim(D::Minus2)?,
key_layer.dim(D::Minus2)?,
&self.device,
Some(self.position_buckets),
Some(self.max_relative_positions),
)?,
|pos| pos.clone(),
);
relative_pos = match relative_pos.dims().len() {
2 => relative_pos.unsqueeze(0)?.unsqueeze(0)?,
3 => relative_pos.unsqueeze(1)?,
other => {
bail!("Relative position ids must be of dim 2 or 3 or 4. Got dim of size {other}")
}
};
let att_span = self.pos_ebd_size;
let rel_embeddings = rel_embeddings
.narrow(0, 0, (att_span * 2) as usize)?
.unsqueeze(0)?;
let mut pos_query_layer: Option<Tensor> = None;
let mut pos_key_layer: Option<Tensor> = None;
let repeat_with = query_layer.dim(0)? / self.num_attention_heads;
if self.share_att_key {
pos_query_layer = Some(
self.transpose_for_scores(&self.query_proj.forward(&rel_embeddings)?)?
.repeat(repeat_with)?,
);
pos_key_layer = Some(
self.transpose_for_scores(&self.key_proj.forward(&rel_embeddings)?)?
.repeat(repeat_with)?,
)
} else {
if self.config.pos_att_type.iter().any(|s| s == "c2p") {
pos_key_layer = Some(
self.transpose_for_scores(
&self
.pos_key_proj
.as_ref()
.context(
"Need pos_key_proj when share_att_key is false or not specified",
)?
.forward(&rel_embeddings)?,
)?
.repeat(repeat_with)?,
)
}
if self.config.pos_att_type.iter().any(|s| s == "p2c") {
pos_query_layer = Some(self.transpose_for_scores(&self
.pos_query_proj
.as_ref()
.context("Need a pos_query_proj when share_att_key is false or not specified")?
.forward(&rel_embeddings)?)?.repeat(repeat_with)?)
}
}
let mut score = Tensor::new(&[0 as f32], &self.device)?;
if self.config.pos_att_type.iter().any(|s| s == "c2p") {
let pos_key_layer = pos_key_layer.context("c2p without pos_key_layer")?;
let scale = Tensor::new(
&[(pos_key_layer.dim(D::Minus1)? * scale_factor) as f32],
&self.device,
)?
.sqrt()?;
let mut c2p_att = query_layer.matmul(&pos_key_layer.t()?)?;
let c2p_pos = relative_pos
.broadcast_add(&Tensor::new(&[att_span as i64], &self.device)?)?
.clamp(0 as f32, (att_span * 2 - 1) as f32)?;
c2p_att = c2p_att.gather(
&c2p_pos
.squeeze(0)?
.expand(&[
query_layer.dim(0)?,
query_layer.dim(1)?,
relative_pos.dim(D::Minus1)?,
])?
.contiguous()?,
D::Minus1,
)?;
score = score.broadcast_add(
&c2p_att.broadcast_div(scale.to_dtype(c2p_att.dtype())?.as_ref())?,
)?;
}
if self.config.pos_att_type.iter().any(|s| s == "p2c") {
let pos_query_layer = pos_query_layer.context("p2c without pos_key_layer")?;
let scale = Tensor::new(
&[(pos_query_layer.dim(D::Minus1)? * scale_factor) as f32],
&self.device,
)?
.sqrt()?;
let r_pos = {
if key_layer.dim(D::Minus2)? != query_layer.dim(D::Minus2)? {
build_relative_position(
key_layer.dim(D::Minus2)?,
key_layer.dim(D::Minus2)?,
&self.device,
Some(self.position_buckets),
Some(self.max_relative_positions),
)?
.unsqueeze(0)?
} else {
relative_pos
}
};
let p2c_pos = r_pos
.to_dtype(DType::F32)?
.neg()?
.broadcast_add(&Tensor::new(&[att_span as f32], &self.device)?)?
.clamp(0f32, (att_span * 2 - 1) as f32)?;
let p2c_att = key_layer
.matmul(&pos_query_layer.t()?)?
.gather(
&p2c_pos
.squeeze(0)?
.expand(&[
query_layer.dim(0)?,
key_layer.dim(D::Minus2)?,
key_layer.dim(D::Minus2)?,
])?
.contiguous()?
.to_dtype(DType::U32)?,
D::Minus1,
)?
.t()?;
score =
score.broadcast_add(&p2c_att.broadcast_div(&scale.to_dtype(p2c_att.dtype())?)?)?;
}
Ok(score)
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L270
pub struct DebertaV2Attention {
dsa: DebertaV2DisentangledSelfAttention,
output: DebertaV2SelfOutput,
}
impl DebertaV2Attention {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dsa = DebertaV2DisentangledSelfAttention::load(vb.pp("attention.self"), config)?;
let output = DebertaV2SelfOutput::load(vb.pp("attention.output"), config)?;
Ok(Self { dsa, output })
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
query_states: Option<&Tensor>,
relative_pos: Option<&Tensor>,
rel_embeddings: Option<&Tensor>,
) -> Result<Tensor> {
let self_output = self.dsa.forward(
hidden_states,
attention_mask,
query_states,
relative_pos,
rel_embeddings,
)?;
self.output
.forward(&self_output, query_states.unwrap_or(hidden_states))
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L255
pub struct DebertaV2SelfOutput {
dense: candle_nn::Linear,
layer_norm: LayerNorm,
dropout: StableDropout,
}
impl DebertaV2SelfOutput {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = candle_nn::linear(config.hidden_size, config.hidden_size, vb.pp("dense"))?;
let layer_norm = candle_nn::layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
let dropout = StableDropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
})
}
pub fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let mut hidden_states = self.dense.forward(hidden_states)?;
hidden_states = self.dropout.forward(&hidden_states)?;
self.layer_norm
.forward(&hidden_states.broadcast_add(input_tensor)?)
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L307
pub struct DebertaV2Intermediate {
dense: candle_nn::Linear,
intermediate_act: HiddenActLayer,
}
impl DebertaV2Intermediate {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = candle_nn::linear(
config.hidden_size,
config.intermediate_size,
vb.pp("intermediate.dense"),
)?;
let intermediate_act = HiddenActLayer::new(config.hidden_act);
Ok(Self {
dense,
intermediate_act,
})
}
pub fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
self.intermediate_act
.forward(&self.dense.forward(hidden_states)?)
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L323
pub struct DebertaV2Output {
dense: candle_nn::Linear,
layer_norm: LayerNorm,
dropout: StableDropout,
}
impl DebertaV2Output {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = candle_nn::linear(
config.intermediate_size,
config.hidden_size,
vb.pp("output.dense"),
)?;
let layer_norm = candle_nn::layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("output.LayerNorm"),
)?;
let dropout = StableDropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
})
}
pub fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let mut hidden_states = self.dense.forward(hidden_states)?;
hidden_states = self.dropout.forward(&hidden_states)?;
hidden_states = {
let to_norm = hidden_states.broadcast_add(input_tensor)?;
self.layer_norm.forward(&to_norm)?
};
Ok(hidden_states)
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L339
pub struct DebertaV2Layer {
attention: DebertaV2Attention,
intermediate: DebertaV2Intermediate,
output: DebertaV2Output,
}
impl DebertaV2Layer {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention = DebertaV2Attention::load(vb.clone(), config)?;
let intermediate = DebertaV2Intermediate::load(vb.clone(), config)?;
let output = DebertaV2Output::load(vb.clone(), config)?;
Ok(Self {
attention,
intermediate,
output,
})
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
query_states: Option<&Tensor>,
relative_pos: Option<&Tensor>,
rel_embeddings: Option<&Tensor>,
) -> Result<Tensor> {
let attention_output = self.attention.forward(
hidden_states,
attention_mask,
query_states,
relative_pos,
rel_embeddings,
)?;
let intermediate_output = self.intermediate.forward(&attention_output)?;
let layer_output = self
.output
.forward(&intermediate_output, &attention_output)?;
Ok(layer_output)
}
}
// TODO: In order to fully test ConvLayer a model needs to be found has a configuration where `conv_kernel_size` exists and is > 0
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L373
pub struct ConvLayer {
_conv_act: String,
_conv: Conv1d,
_layer_norm: LayerNorm,
_dropout: StableDropout,
_config: Config,
}
impl ConvLayer {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let config = config.clone();
let kernel_size = config.conv_kernel_size.unwrap_or(3);
let groups = config.conv_groups.unwrap_or(1);
let conv_act: String = config.conv_act.clone().unwrap_or("tanh".to_string());
let conv_conf = Conv1dConfig {
padding: (kernel_size - 1) / 2,
groups,
..Default::default()
};
let conv = conv1d(
config.hidden_size,
config.hidden_size,
kernel_size,
conv_conf,
vb.pp("conv"),
)?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
let dropout = StableDropout::new(config.hidden_dropout_prob);
Ok(Self {
_conv_act: conv_act,
_conv: conv,
_layer_norm: layer_norm,
_dropout: dropout,
_config: config,
})
}
pub fn forward(
&self,
_hidden_states: &Tensor,
_residual_states: &Tensor,
_input_mask: &Tensor,
) -> Result<Tensor> {
todo!("Need a model that contains a conv layer to test against.")
}
}
// https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/deberta_v2/modeling_deberta_v2.py#L409
pub struct DebertaV2Encoder {
layer: Vec<DebertaV2Layer>,
relative_attention: bool,
max_relative_positions: isize,
position_buckets: isize,
rel_embeddings: Option<Embedding>,
norm_rel_ebd: String,
layer_norm: Option<LayerNorm>,
conv: Option<ConvLayer>,
device: Device,
}
impl DebertaV2Encoder {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let layer = (0..config.num_hidden_layers)
.map(|index| DebertaV2Layer::load(vb.pp(format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let relative_attention = config.relative_attention;
let mut max_relative_positions = config.max_relative_positions;
let position_buckets = config.position_buckets.unwrap_or(-1);
let mut rel_embeddings: Option<Embedding> = None;
if relative_attention {
if max_relative_positions < 1 {
max_relative_positions = config.max_position_embeddings as isize;
}
let mut pos_ebd_size = max_relative_positions * 2;
if position_buckets > 0 {
pos_ebd_size = position_buckets * 2;
}
rel_embeddings = Some(embedding(
pos_ebd_size as usize,
config.hidden_size,
vb.pp("rel_embeddings"),
)?);
}
// NOTE: The Python code assumes that the config attribute "norm_rel_ebd" is an array of some kind, but most examples have it as a string.
// So it might need to be updated at some point.
let norm_rel_ebd = match config.norm_rel_ebd.as_ref() {
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.