text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
use crate::Result;
pub(super) fn nearest_int(v: f32) -> i32 {
v.round() as i32
}
/// Validates that the input and output are the right size and returns an iterator which maps each
/// input region `xs` to its corresponding output block in `ys`. Each output region is guaranteed
/// to be `T::BLCK_SIZE` long.
pub(super) fn group_for_quantization<'a, 'b, T: super::k_quants::GgmlType>(
xs: &'b [f32],
ys: &'a mut [T],
) -> Result<Vec<(&'a mut T, &'b [f32])>> {
let block_size = T::BLCK_SIZE;
let dtype = T::DTYPE;
let expected_blocks = xs.len() / block_size;
let actual_blocks = ys.len();
// Validate that the input is the right size
if expected_blocks != actual_blocks {
crate::bail!("quantize {dtype:?}: expected {expected_blocks} blocks but only {actual_blocks} were provided!")
}
Ok(ys.iter_mut().zip(xs.chunks_exact(block_size)).collect())
}
/// Validates that the input and output are the right size and returns an iterator which maps each
/// input block `xs` to its corresponding output region in `ys`. Each output region is guaranteed
/// to be `T::BLCK_SIZE` long.
pub(super) fn group_for_dequantization<'a, 'b, T: super::k_quants::GgmlType>(
xs: &'a [T],
ys: &'b mut [f32],
) -> Result<Vec<(&'a T, &'b mut [f32])>> {
let block_size = T::BLCK_SIZE;
let dtype = T::DTYPE;
let actual_output_len = ys.len();
let expected_output_len = xs.len() * block_size;
// Validate that the output is the right size
if expected_output_len != actual_output_len {
crate::bail!("dequantize {dtype:?}: ys (len = {actual_output_len}) does not match the expected length of {expected_output_len}!")
}
// Zip the blocks and outputs together
Ok(xs.iter().zip(ys.chunks_exact_mut(block_size)).collect())
}
pub(super) fn get_scale_min_k4(j: usize, q: &[u8]) -> (u8, u8) {
if j < 4 {
let d = q[j] & 63;
let m = q[j + 4] & 63;
(d, m)
} else {
let d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
let m = (q[j + 4] >> 4) | ((q[j] >> 6) << 4);
(d, m)
}
}
pub(super) unsafe fn make_qx_quants(
n: usize,
nmax: i32,
x: *const f32,
ls: *mut i8,
rmse_type: i32,
) -> f32 {
let mut max = 0f32;
let mut amax = 0f32;
for i in 0..n {
let x = *x.add(i);
let ax = x.abs();
if ax > amax {
amax = ax;
max = x;
}
}
if amax == 0. {
// all zero
for i in 0..n {
*ls.add(i) = 0;
}
return 0.;
}
let mut iscale = -(nmax as f32) / max;
if rmse_type == 0 {
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
return 1.0 / iscale;
}
let weight_type = rmse_type % 2;
let mut sumlx = 0f32;
let mut suml2 = 0f32;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
*ls.add(i) = (l + nmax) as i8;
let w = if weight_type == 1 { x * x } else { 1.0 };
let l = l as f32;
sumlx += w * x * l;
suml2 += w * l * l;
}
let mut scale = sumlx / suml2;
let mut best = scale * sumlx;
for _itry in 0..3 {
let iscale = 1.0 / scale;
let mut slx = 0f32;
let mut sl2 = 0f32;
let mut changed = false;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
if l + nmax != *ls.add(i) as i32 {
changed = true;
}
let w = if weight_type == 1 { x * x } else { 1f32 };
let l = l as f32;
slx += w * x * l;
sl2 += w * l * l;
}
if !changed || sl2 == 0.0 || slx * slx <= best * sl2 {
break;
}
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
sumlx = slx;
suml2 = sl2;
scale = sumlx / suml2;
best = scale * sumlx;
}
for _itry in 0..5 {
let mut n_changed = 0;
for i in 0..n {
let x = *x.add(i);
let w = if weight_type == 1 { x * x } else { 1. };
let l = *ls.add(i) as i32 - nmax;
let mut slx = sumlx - w * x * l as f32;
if slx > 0. {
let mut sl2 = suml2 - w * l as f32 * l as f32;
let new_l = nearest_int(x * sl2 / slx);
let new_l = new_l.clamp(-nmax, nmax - 1);
if new_l != l {
slx += w * x * new_l as f32;
sl2 += w * new_l as f32 * new_l as f32;
if sl2 > 0. && slx * slx * suml2 > sumlx * sumlx * sl2 {
*ls.add(i) = (nmax + new_l) as i8;
sumlx = slx;
suml2 = sl2;
scale = sumlx / suml2;
best = scale * sumlx;
n_changed += 1;
}
}
}
}
if n_changed == 0 {
break;
}
}
if rmse_type < 3 {
return scale;
}
for is in -4..4 {
if is == 0 {
continue;
}
iscale = -(nmax as f32 + 0.1f32 * is as f32) / max;
let mut sumlx = 0.;
let mut suml2 = 0.;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
let w = if weight_type == 1 { x * x } else { 1. };
let l = l as f32;
sumlx += w * x * l;
suml2 += w * l * l;
}
if suml2 > 0. && sumlx * sumlx > best * suml2 {
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
scale = sumlx / suml2;
best = scale * sumlx;
}
}
scale
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L224
pub(super) fn make_qkx1_quants(nmax: i32, ntry: usize, x: &[f32]) -> (f32, f32) {
let n = x.len();
let mut l = vec![0; n];
// Get min/max
let min = *x
.iter()
.take(n)
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&x[0]);
let max = *x.iter().max_by(|a, b| a.total_cmp(b)).unwrap_or(&x[0]);
// If min == max, all values are the same => nothing to do here
if max == min {
return (0.0, 0.0);
}
// Ensure min <= 0.0
let mut min = min.min(0.);
// Compute scale and inverse scale
let mut iscale = nmax as f32 / (max - min);
let mut scale = 1.0 / iscale;
for _ in 0..ntry {
let mut sumlx = 0.0;
let mut suml2 = 0;
let mut did_change = false;
for (i, value) in x.iter().enumerate().take(n) {
let li = nearest_int(iscale * (value - min)).clamp(0, nmax);
let clamped_li = li as u8;
if clamped_li != l[i] {
l[i] = clamped_li;
did_change = true;
}
sumlx += (value - min) * li as f32;
suml2 += li * li;
}
scale = sumlx / suml2 as f32;
let sum: f32 = x
.iter()
.take(n)
.zip(l.iter().take(n))
.map(|(xi, &li)| xi - scale * li as f32)
.sum();
min = sum / n as f32;
if min > 0.0 {
min = 0.0;
}
iscale = 1.0 / scale;
if !did_change {
break;
}
}
(scale, -min)
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L165
pub(super) fn make_q3_quants(x: &[f32], nmax: i32, do_rmse: bool) -> f32 {
let n = x.len();
let mut l = vec![0i8; n];
let mut max = 0.0;
let mut amax = 0.0;
for &xi in x.iter().take(n) {
let ax = xi.abs();
if ax > amax {
amax = ax;
max = xi;
}
}
if amax == 0.0 {
return 0.0;
}
let iscale = -(nmax as f32) / max;
if do_rmse {
let mut sumlx = 0.0;
let mut suml2 = 0.0;
for i in 0..n {
let li = (iscale * x[i]).round() as i32;
let li = li.clamp(-nmax, nmax - 1);
l[i] = li as i8;
let w = x[i] * x[i];
sumlx += w * x[i] * li as f32;
suml2 += w * (li * li) as f32;
}
for _ in 0..5 {
let mut n_changed = 0;
for i in 0..n {
let w = x[i] * x[i];
let mut slx = sumlx - w * x[i] * l[i] as f32;
if slx > 0.0 {
let mut sl2 = suml2 - w * (l[i] as i32 * l[i] as i32) as f32;
let mut new_l = (x[i] * sl2 / slx).round() as i32;
new_l = new_l.clamp(-nmax, nmax - 1);
if new_l != l[i] as i32 {
slx += w * x[i] * new_l as f32;
sl2 += w * (new_l * new_l) as f32;
if sl2 > 0.0 && slx * slx * suml2 > sumlx * sumlx * sl2 {
l[i] = new_l as i8;
sumlx = slx;
suml2 = sl2;
n_changed += 1;
}
}
}
}
if n_changed == 0 {
break;
}
}
for li in l.iter_mut() {
*li += nmax as i8;
}
return sumlx / suml2;
}
for i in 0..n {
let li = (iscale * x[i]).round() as i32;
l[i] = (li.clamp(-nmax, nmax - 1) + nmax) as i8;
}
1.0 / iscale
}
| candle/candle-core/src/quantized/utils.rs/0 | {
"file_path": "candle/candle-core/src/quantized/utils.rs",
"repo_id": "candle",
"token_count": 5775
} | 27 |
[package]
name = "candle-datasets"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[dependencies]
byteorder = { workspace = true }
candle = { workspace = true }
candle-nn = { workspace = true }
hf-hub = { workspace = true}
intel-mkl-src = { workspace = true, optional = true }
memmap2 = { workspace = true }
tokenizers = { workspace = true, features = ["onig"] }
rand = { workspace = true }
thiserror = { workspace = true }
parquet = { workspace = true}
image = { workspace = true }
| candle/candle-datasets/Cargo.toml/0 | {
"file_path": "candle/candle-datasets/Cargo.toml",
"repo_id": "candle",
"token_count": 201
} | 28 |
# Colpali
[HuggingFace Model Card](https://huggingface.co/vidore/colpali-v1.2-merged)
```
wget https://arxiv.org/pdf/1706.03762.pdf
cargo run --features cuda,pdf2image --release --example colpali -- --prompt "What is Positional Encoding" --pdf "1706.03762.pdf"
```
```
Prompt: what is position encoding?
top 3 page numbers that contain similarity to the prompt
-----------------------------------
Page: 6
Page: 11
Page: 15
-----------------------------------
``` | candle/candle-examples/examples/colpali/README.md/0 | {
"file_path": "candle/candle-examples/examples/colpali/README.md",
"repo_id": "candle",
"token_count": 153
} | 29 |
# DeepSeek V2
DeepSeek V2 an MoE model featuring MLA (Multi-Latent Attention). There is a lite (16B) and a full (236B) model.
- Context length of **32k tokens** (Lite model), **128k tokens** (full model)
- 64 routed experts (Lite model), 160 routed experts (full model)
## Running the example
```bash
$ cargo run --example deepseekv2 --release --features metal -- --prompt "Recursive fibonacci code in Rust:" --which lite --sample-len 150
fn fibonacci(n: u32) -> u32 {
if n <= 1 {
return n;
} else {
return fibonacci(n - 1) + fibonacci(n - 2);
}
}
## Fibonacci code in Python:
def fibonacci(n):
if n <= 1:
return n
else:
return fibonacci(n-1) + fibonacci(n-2)
## Fibonacci code in JavaScript:
function fibonacci(n) {
if (n <= 1
```
| candle/candle-examples/examples/deepseekv2/README.md/0 | {
"file_path": "candle/candle-examples/examples/deepseekv2/README.md",
"repo_id": "candle",
"token_count": 316
} | 30 |
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use candle_transformers::models::glm4::{Config as ConfigOld, EosTokenId, Model as ModelOld};
use candle_transformers::models::glm4_new::{Config as ConfigNew, ModelForCausalLM as ModelNew};
use clap::Parser;
use hf_hub::{Repo, RepoType};
use tokenizers::Tokenizer;
enum Model {
Old(ModelOld),
New(ModelNew),
}
impl Model {
fn forward(&mut self, input_ids: &Tensor, pos: usize) -> candle::Result<Tensor> {
match self {
Self::Old(m) => m.forward(input_ids),
Self::New(m) => m.forward(input_ids, pos),
}
}
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "glm4-old")]
GLM4Old,
#[value(name = "glm4-new")]
GLM4New,
}
struct TextGeneration {
model: Model,
device: Device,
tokenizer: Tokenizer,
logits_processor: LogitsProcessor,
args: Args,
eos_tokens: Vec<u32>,
}
impl TextGeneration {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
args: Args,
device: &Device,
eos_tokens: Vec<u32>,
) -> Self {
let logits_processor =
LogitsProcessor::new(args.seed, Some(args.temperature), Some(args.top_p));
Self {
model,
tokenizer,
logits_processor,
args,
device: device.clone(),
eos_tokens,
}
}
fn run(&mut self) -> anyhow::Result<()> {
use std::io::Write;
let args = &self.args;
println!("starting the inference loop");
let prompt = format!("[gMASK]<sop><|user|>\n{}<|assistant|>", args.prompt);
let tokens = self.tokenizer.encode(prompt, true).expect("tokens error");
if tokens.is_empty() {
panic!("Empty prompts are not supported in the chatglm model.")
}
if args.verbose {
for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) {
let token = token.replace('โ', " ").replace("<0x0A>", "\n");
println!("{id:7} -> '{token}'");
}
} else {
print!("{}", &args.prompt);
std::io::stdout().flush()?;
}
let mut tokens = tokens.get_ids().to_vec();
let mut generated_tokens = 0usize;
std::io::stdout().flush().expect("output flush error");
let start_gen = std::time::Instant::now();
for index in 0..args.sample_len {
let context_size = if index > 0 { 1 } else { tokens.len() };
let start_pos = tokens.len().saturating_sub(context_size);
let ctxt = &tokens[start_pos..];
let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
let logits = self.model.forward(&input, start_pos)?;
let logits = match self.model {
Model::Old(_) => logits.squeeze(0)?.to_dtype(DType::F32)?,
Model::New(_) => logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?,
};
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
generated_tokens += 1;
if self.eos_tokens.contains(&next_token) {
break;
}
let token = self
.tokenizer
.decode(&[next_token], true)
.expect("token decode error");
if args.verbose {
println!(
"[Count: {generated_tokens}] [Raw Token: {next_token}] [Decode Token: {token}]"
);
} else {
print!("{token}");
std::io::stdout().flush()?;
}
}
let dt = start_gen.elapsed();
println!(
"\n{generated_tokens} tokens generated ({:.2} token/s)",
generated_tokens as f64 / dt.as_secs_f64(),
);
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
#[arg(name = "cache", short)]
cache_path: Option<String>,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Display the token for the specified prompt.
#[arg(long)]
prompt: String,
/// Display the tokens for the specified prompt and outputs.
#[arg(long)]
verbose: bool,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long, default_value_t = 0.8)]
top_p: f64,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(long, short = 'n', default_value_t = 8192)]
sample_len: usize,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
weight_path: Option<String>,
#[arg(long)]
tokenizer: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.2)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// Specifies the model type (e.g., GLM4-Old or GLM4-New, such as GLM4-0414).
/// This argument is required because the two architectures are incompatible.
/// For example, if the user does not explicitly specify the model type (defaulting to "glm4-old"),
/// but provides a GLM4-New model ID, it can cause a runtime panic during model execution!
#[arg(long)]
which: Which,
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature, args.repeat_penalty, args.repeat_last_n
);
let start = std::time::Instant::now();
let api = match args.cache_path.as_ref() {
None => hf_hub::api::sync::Api::new()?,
Some(path) => {
hf_hub::api::sync::ApiBuilder::from_cache(hf_hub::Cache::new(path.to_string().into()))
.build()
.map_err(anyhow::Error::msg)?
}
};
let model_id = match args.model_id.as_ref() {
Some(model_id) => model_id.to_string(),
None => match args.which {
Which::GLM4Old => "THUDM/glm-4-9b".to_string(),
Which::GLM4New => "THUDM/GLM-4-9B-0414".to_string(),
},
};
let revision = match args.revision.as_ref() {
Some(rev) => rev.to_string(),
None => "main".to_string(),
};
let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
let tokenizer_filename = match (args.weight_path.as_ref(), args.tokenizer.as_ref()) {
(Some(_), Some(file)) => std::path::PathBuf::from(file),
(None, Some(file)) => std::path::PathBuf::from(file),
(Some(path), None) => std::path::Path::new(path).join("tokenizer.json"),
(None, None) => repo.get("tokenizer.json")?,
};
let config_filename = match &args.weight_path {
Some(path) => std::path::Path::new(path).join("config.json"),
_ => repo.get("config.json")?,
};
let filenames = match &args.weight_path {
Some(path) => {
candle_examples::hub_load_local_safetensors(path, "model.safetensors.index.json")?
}
_ => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?,
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer_filename).expect("Tokenizer Error");
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let dtype = if device.is_cuda() {
DType::BF16
} else {
DType::F32
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? };
let (model, eos_token_id) = match args.which {
Which::GLM4Old => {
let config: ConfigOld = serde_json::from_slice(&std::fs::read(config_filename)?)?;
let model = ModelOld::new(&config, vb)?;
(Model::Old(model), config.eos_token_id)
}
Which::GLM4New => {
let config: ConfigNew = serde_json::from_slice(&std::fs::read(config_filename)?)?;
let model = ModelNew::new(&config, vb)?;
(Model::New(model), config.eos_token_id)
}
};
let mut eos_tokens = Vec::new();
match eos_token_id {
Some(EosTokenId::Single(eos)) => {
eos_tokens.push(eos);
}
Some(EosTokenId::Multiple(eos_vec)) => {
eos_tokens.extend(eos_vec);
}
_ => {
let eos_token = match args.which {
Which::GLM4Old => "<|endoftext|>",
Which::GLM4New => "<|user|>",
};
match tokenizer.get_vocab(true).get(eos_token) {
Some(token) => eos_tokens.push(*token),
None => panic!("cannot find the endoftext token"),
};
}
}
println!("loaded the model in {:?}", start.elapsed());
let mut pipeline = TextGeneration::new(model, tokenizer, args, &device, eos_tokens);
pipeline.run()?;
Ok(())
}
| candle/candle-examples/examples/glm4/main.rs/0 | {
"file_path": "candle/candle-examples/examples/glm4/main.rs",
"repo_id": "candle",
"token_count": 4770
} | 31 |
use candle::backend::BackendStorage;
use candle::{CpuStorage, CustomOp1, DType, Device, IndexOp, Layout, Result, Shape, Tensor, D};
use candle_nn::var_builder::ShardedVarBuilder as VarBuilder;
use candle_nn::{Embedding, Linear, Module, RmsNorm};
use cudarc::nccl::safe::{Comm, ReduceOp};
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use super::MAX_SEQ_LEN;
pub type Config = candle_transformers::models::llama::LlamaConfig;
struct TensorParallelColumnLinear {
linear: Linear,
}
impl TensorParallelColumnLinear {
fn new(linear: Linear) -> Self {
Self { linear }
}
fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.linear.forward(x)
}
}
struct TensorParallelRowLinear {
linear: Linear,
all_reduce: AllReduce,
}
struct AllReduce {
comm: Rc<Comm>,
}
/// This is actually not safe: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/threadsafety.html
/// But for this example purposes, this will work
unsafe impl Sync for AllReduce {}
unsafe impl Send for AllReduce {}
impl CustomOp1 for AllReduce {
fn name(&self) -> &'static str {
"allreduce"
}
fn cpu_fwd(&self, _s: &CpuStorage, _l: &Layout) -> Result<(CpuStorage, Shape)> {
candle::bail!("AllReduce is never used on cpu")
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
s: &candle::CudaStorage,
l: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
use candle::cuda_backend::WrapErr;
use cudarc::driver::DeviceSlice;
use half::{bf16, f16};
let elem_count = l.shape().elem_count();
let dev = s.device().clone();
let dst = match s.dtype() {
DType::BF16 => {
let s = s.as_cuda_slice::<bf16>()?;
let s = match l.contiguous_offsets() {
Some((0, l)) if l == s.len() => s,
Some(_) | None => candle::bail!("input has to be contiguous"),
};
let mut dst = unsafe { dev.alloc::<bf16>(elem_count) }.w()?;
self.comm
.all_reduce(s, &mut dst, &ReduceOp::Sum)
.map_err(candle::Error::debug)?;
candle::CudaStorage::wrap_cuda_slice(dst, dev)
}
DType::F16 => {
let s = s.as_cuda_slice::<f16>()?;
let s = match l.contiguous_offsets() {
Some((0, l)) if l == s.len() => s,
Some(_) | None => candle::bail!("input has to be contiguous"),
};
let mut dst = unsafe { dev.alloc::<f16>(elem_count) }.w()?;
self.comm
.all_reduce(s, &mut dst, &ReduceOp::Sum)
.map_err(candle::Error::debug)?;
candle::CudaStorage::wrap_cuda_slice(dst, dev)
}
dtype => candle::bail!("unsupported dtype {dtype:?}"),
};
Ok((dst, l.shape().clone()))
}
}
impl TensorParallelRowLinear {
fn new(linear: Linear, comm: Rc<Comm>) -> Self {
let all_reduce = AllReduce { comm };
Self { linear, all_reduce }
}
fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.linear.forward(x)?.apply_op1_no_bwd(&self.all_reduce)
}
}
fn shard(dim: usize, rank: usize, world_size: usize) -> candle_nn::var_builder::Shard {
candle_nn::var_builder::Shard {
dim,
rank,
world_size,
}
}
impl TensorParallelColumnLinear {
fn load(vb: VarBuilder, comm: Rc<Comm>) -> Result<Self> {
let rank = comm.rank();
let size = comm.world_size();
let weight = vb.get_with_hints((), "weight", shard(0, rank, size))?;
Ok(Self::new(Linear::new(weight, None)))
}
fn load_multi(vb: VarBuilder, prefixes: &[&str], comm: Rc<Comm>) -> Result<Self> {
let rank = comm.rank();
let size = comm.world_size();
let weights: Vec<_> = prefixes
.iter()
.map(|p| vb.pp(p).get_with_hints((), "weight", shard(0, rank, size)))
.collect::<Result<Vec<_>>>()?;
let weight = Tensor::cat(&weights, 0)?;
Ok(Self::new(Linear::new(weight, None)))
}
}
impl TensorParallelRowLinear {
fn load(vb: VarBuilder, comm: Rc<Comm>) -> Result<Self> {
let rank = comm.rank();
let size = comm.world_size();
let weight = vb.get_with_hints((), "weight", shard(1, rank, size))?;
Ok(Self::new(Linear::new(weight, None), comm))
}
}
#[derive(Clone)]
pub struct Cache {
#[allow(clippy::type_complexity)]
kvs: Arc<Mutex<Vec<Option<(Tensor, Tensor)>>>>,
cos: Tensor,
sin: Tensor,
}
impl Cache {
pub fn new(dtype: DType, config: &Config, device: &Device) -> Result<Self> {
// precompute freqs_cis
let n_elem = config.hidden_size / config.num_attention_heads;
let theta: Vec<_> = (0..n_elem)
.step_by(2)
.map(|i| 1f32 / config.rope_theta.powf(i as f32 / n_elem as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)?
.to_dtype(DType::F32)?
.reshape((MAX_SEQ_LEN, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
// This is different from the paper, see:
// https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112
let cos = idx_theta.cos()?.to_dtype(dtype)?;
let sin = idx_theta.sin()?.to_dtype(dtype)?;
Ok(Self {
kvs: Arc::new(Mutex::new(vec![None; config.num_hidden_layers])),
cos,
sin,
})
}
}
fn silu(xs: &Tensor) -> Result<Tensor> {
xs / (xs.neg()?.exp()? + 1.0)?
}
fn linear(size1: usize, size2: usize, vb: VarBuilder) -> Result<Linear> {
let weight = vb.get((size2, size1), "weight")?;
Ok(Linear::new(weight, None))
}
fn embedding(cfg: &Config, vb: VarBuilder) -> Result<Embedding> {
let embeddings = vb.get((cfg.vocab_size, cfg.hidden_size), "weight")?;
Ok(Embedding::new(embeddings, cfg.hidden_size))
}
struct CausalSelfAttention {
qkv_proj: TensorParallelColumnLinear,
o_proj: TensorParallelRowLinear,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: usize,
cache: Cache,
}
impl CausalSelfAttention {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, _, seq_len, _hidden_size) = x.shape().dims4()?;
let cos = self.cache.cos.narrow(0, index_pos, seq_len)?;
let sin = self.cache.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(x, &cos, &sin)
}
fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> {
let (b_sz, seq_len, _) = x.shape().dims3()?;
let qkv = self.qkv_proj.forward(x)?;
let hidden_size = self.num_attention_heads * self.head_dim;
let q = qkv.i((.., .., ..self.num_attention_heads * self.head_dim))?;
let k = qkv.i((
..,
..,
self.num_attention_heads * self.head_dim
..self.num_attention_heads * self.head_dim
+ self.num_key_value_heads * self.head_dim,
))?;
let v = qkv.i((
..,
..,
self.num_attention_heads * self.head_dim + self.num_key_value_heads * self.head_dim..,
))?;
// todo!("Q {:?} K {:?} V {:?} - x {:?}", q.shape(), k.shape(), v.shape(), x.shape());
let q = q
.reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let mut v = v
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let q = self.apply_rotary_emb(&q, index_pos)?;
let mut k = self.apply_rotary_emb(&k, index_pos)?;
let mut cache = self.cache.kvs.lock().unwrap();
if let Some((cache_k, cache_v)) = &cache[block_idx] {
k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?;
let k_seq_len = k.dims()[1];
if k_seq_len > MAX_SEQ_LEN {
k = k
.narrow(D::Minus1, k_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)?
.contiguous()?
}
let v_seq_len = v.dims()[1];
if v_seq_len > 2 * MAX_SEQ_LEN {
v = v
.narrow(D::Minus1, v_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)?
.contiguous()?
}
}
cache[block_idx] = Some((k.clone(), v.clone()));
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
let y = candle_flash_attn::flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?
.reshape((b_sz, seq_len, hidden_size))?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
let n_rep = self.num_attention_heads / self.num_key_value_heads;
candle_transformers::utils::repeat_kv(x, n_rep)
}
fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> {
let qkv_proj = TensorParallelColumnLinear::load_multi(
vb.clone(),
&["q_proj", "k_proj", "v_proj"],
comm.clone(),
)?;
let o_proj = TensorParallelRowLinear::load(vb.pp("o_proj"), comm.clone())?;
Ok(Self {
qkv_proj,
o_proj,
num_attention_heads: cfg.num_attention_heads / comm.world_size(),
num_key_value_heads: cfg.num_key_value_heads() / comm.world_size(),
head_dim: cfg.hidden_size / cfg.num_attention_heads,
cache: cache.clone(),
})
}
}
struct Mlp {
c_fc1: TensorParallelColumnLinear,
c_fc2: TensorParallelColumnLinear,
c_proj: TensorParallelRowLinear,
}
impl Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?;
self.c_proj.forward(&x)
}
fn load(vb: VarBuilder, _cfg: &Config, comm: Rc<Comm>) -> Result<Self> {
let c_fc1 = TensorParallelColumnLinear::load(vb.pp("gate_proj"), comm.clone())?;
let c_fc2 = TensorParallelColumnLinear::load(vb.pp("up_proj"), comm.clone())?;
let c_proj = TensorParallelRowLinear::load(vb.pp("down_proj"), comm)?;
Ok(Self {
c_fc1,
c_fc2,
c_proj,
})
}
}
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
mlp: Mlp,
}
fn rms_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> {
let weight = vb.get_with_hints(size, "weight", shard(0, 0, 1))?;
Ok(RmsNorm::new(weight, eps))
}
impl Block {
fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self {
Self {
rms_1,
attn,
rms_2,
mlp,
}
}
fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> {
let residual = x;
let x = self.rms_1.forward(x)?;
let x = (self.attn.forward(&x, index_pos, block_idx)? + residual)?;
let residual = &x;
let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> {
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cache, cfg, comm.clone())?;
let mlp = Mlp::load(vb.pp("mlp"), cfg, comm)?;
let input_layernorm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("input_layernorm"))?;
let post_attention_layernorm =
rms_norm(cfg.hidden_size, 1e-5, vb.pp("post_attention_layernorm"))?;
Ok(Self::new(
input_layernorm,
attn,
post_attention_layernorm,
mlp,
))
}
}
pub struct Llama {
wte: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
lm_head: Linear,
}
impl Llama {
fn new(wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear) -> Self {
Self {
wte,
blocks,
ln_f,
lm_head,
}
}
pub fn forward(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = x.shape().dims2()?;
let mut x = self.wte.forward(x)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx)?;
}
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> {
let wte = embedding(cfg, vb.pp("model.embed_tokens"))?;
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
let norm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.num_hidden_layers)
.map(|i| {
Block::load(
vb.pp(&format!("model.layers.{i}")),
cache,
cfg,
comm.clone(),
)
})
.collect::<Result<Vec<_>>>()?;
Ok(Self::new(wte, blocks, norm, lm_head))
}
}
| candle/candle-examples/examples/llama_multiprocess/model.rs/0 | {
"file_path": "candle/candle-examples/examples/llama_multiprocess/model.rs",
"repo_id": "candle",
"token_count": 7294
} | 32 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use clap::Parser;
use std::io::Write;
use candle_transformers::generation::LogitsProcessor;
use candle_transformers::models::encodec;
use candle_transformers::models::metavoice::{adapters, gpt, tokenizers, transformer};
use candle_transformers::models::quantized_metavoice::transformer as qtransformer;
use candle::{DType, IndexOp, Tensor};
use candle_nn::VarBuilder;
use hf_hub::api::sync::Api;
use rand::{distr::Distribution, SeedableRng};
pub const ENCODEC_NTOKENS: u32 = 1024;
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum ArgDType {
F32,
F16,
Bf16,
}
enum Transformer {
Normal(transformer::Model),
Quantized(qtransformer::Model),
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
prompt: String,
/// Use the quantized version of the model.
#[arg(long)]
quantized: bool,
/// The guidance scale.
#[arg(long, default_value_t = 3.0)]
guidance_scale: f64,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 1.0)]
temperature: f64,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The maximum number of tokens to generate for the first stage.
#[arg(long, default_value_t = 2000)]
max_tokens: u64,
/// The output file using the wav format.
#[arg(long, default_value = "out.wav")]
out_file: String,
#[arg(long)]
first_stage_meta: Option<String>,
#[arg(long)]
first_stage_weights: Option<String>,
#[arg(long)]
second_stage_weights: Option<String>,
#[arg(long)]
encodec_weights: Option<String>,
#[arg(long)]
spk_emb: Option<String>,
#[arg(long, default_value = "f32")]
dtype: ArgDType,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let device = candle_examples::device(args.cpu)?;
let api = Api::new()?;
let repo = api.model("lmz/candle-metavoice".to_string());
let first_stage_meta = match &args.first_stage_meta {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("first_stage.meta.json")?,
};
let first_stage_meta: serde_json::Value =
serde_json::from_reader(&std::fs::File::open(first_stage_meta)?)?;
let first_stage_tokenizer = match first_stage_meta.as_object() {
None => anyhow::bail!("not a json object"),
Some(j) => match j.get("tokenizer") {
None => anyhow::bail!("no tokenizer key"),
Some(j) => j,
},
};
let fs_tokenizer = tokenizers::BPE::from_json(first_stage_tokenizer, 512)?;
let second_stage_weights = match &args.second_stage_weights {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("second_stage.safetensors")?,
};
let encodec_weights = match args.encodec_weights {
Some(w) => std::path::PathBuf::from(w),
None => Api::new()?
.model("facebook/encodec_24khz".to_string())
.get("model.safetensors")?,
};
let dtype = match args.dtype {
ArgDType::F32 => DType::F32,
ArgDType::F16 => DType::F16,
ArgDType::Bf16 => DType::BF16,
};
let first_stage_config = transformer::Config::cfg1b_v0_1();
let mut first_stage_model = if args.quantized {
let filename = match &args.first_stage_weights {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("first_stage_q4k.gguf")?,
};
let vb =
candle_transformers::quantized_var_builder::VarBuilder::from_gguf(filename, &device)?;
let first_stage_model = qtransformer::Model::new(&first_stage_config, vb)?;
Transformer::Quantized(first_stage_model)
} else {
let first_stage_weights = match &args.first_stage_weights {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("first_stage.safetensors")?,
};
let first_stage_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[first_stage_weights], dtype, &device)? };
let first_stage_model = transformer::Model::new(&first_stage_config, first_stage_vb)?;
Transformer::Normal(first_stage_model)
};
let second_stage_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[second_stage_weights], dtype, &device)? };
let second_stage_config = gpt::Config::cfg1b_v0_1();
let second_stage_model = gpt::Model::new(second_stage_config.clone(), second_stage_vb)?;
let encodec_device = if device.is_metal() {
&candle::Device::Cpu
} else {
&device
};
let encodec_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[encodec_weights], dtype, encodec_device)? };
let encodec_config = encodec::Config::default();
let encodec_model = encodec::Model::new(&encodec_config, encodec_vb)?;
println!("prompt: '{}'", args.prompt);
let prompt_tokens = fs_tokenizer.encode(&args.prompt)?;
let mut tokens = prompt_tokens.clone();
println!("{tokens:?}");
let spk_emb_file = match &args.spk_emb {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("spk_emb.safetensors")?,
};
let spk_emb = candle::safetensors::load(&spk_emb_file, &candle::Device::Cpu)?;
let spk_emb = match spk_emb.get("spk_emb") {
None => anyhow::bail!("missing spk_emb tensor in {spk_emb_file:?}"),
Some(spk_emb) => spk_emb.to_dtype(dtype)?,
};
let spk_emb = spk_emb.to_device(&device)?;
let mut logits_processor = LogitsProcessor::new(args.seed, Some(args.temperature), Some(0.95));
// First stage generation.
for index in 0..args.max_tokens {
let context_size = if index > 0 { 1 } else { tokens.len() };
let start_pos = tokens.len().saturating_sub(context_size);
let ctxt = &tokens[start_pos..];
let input = Tensor::new(ctxt, &device)?;
let input = Tensor::stack(&[&input, &input], 0)?;
let logits = match &mut first_stage_model {
Transformer::Normal(m) => m.forward(&input, &spk_emb, tokens.len() - context_size)?,
Transformer::Quantized(m) => {
m.forward(&input, &spk_emb, tokens.len() - context_size)?
}
};
let logits0 = logits.i((0, 0))?;
let logits1 = logits.i((1, 0))?;
let logits = ((logits0 * args.guidance_scale)? + logits1 * (1. - args.guidance_scale))?;
let logits = logits.to_dtype(DType::F32)?;
let next_token = logits_processor.sample(&logits)?;
tokens.push(next_token);
print!(".");
std::io::stdout().flush()?;
if next_token == 2048 {
break;
}
}
println!();
let fie2c = adapters::FlattenedInterleavedEncodec2Codebook::new(ENCODEC_NTOKENS);
let (text_ids, ids1, ids2) = fie2c.decode(&tokens);
println!("text ids len: {}", text_ids.len());
let mut rng = rand::rngs::StdRng::seed_from_u64(args.seed + 1337);
// TODO: Use the config rather than hardcoding the offset here.
let encoded_text: Vec<_> = prompt_tokens.iter().map(|v| v - 1024).collect();
let mut hierarchies_in1 =
[encoded_text.as_slice(), ids1.as_slice(), &[ENCODEC_NTOKENS]].concat();
let mut hierarchies_in2 = [
vec![ENCODEC_NTOKENS; encoded_text.len()].as_slice(),
ids2.as_slice(),
&[ENCODEC_NTOKENS],
]
.concat();
hierarchies_in1.resize(second_stage_config.block_size, ENCODEC_NTOKENS);
hierarchies_in2.resize(second_stage_config.block_size, ENCODEC_NTOKENS);
let in_x1 = Tensor::new(hierarchies_in1, &device)?;
let in_x2 = Tensor::new(hierarchies_in2, &device)?;
let in_x = Tensor::stack(&[in_x1, in_x2], 0)?.unsqueeze(0)?;
let logits = second_stage_model.forward(&in_x)?;
println!("sampling from logits...");
let mut codes = vec![];
for logits in logits.iter() {
let logits = logits.squeeze(0)?;
let (seq_len, _) = logits.dims2()?;
let mut codes_ = Vec::with_capacity(seq_len);
for step in 0..seq_len {
let logits = logits.i(step)?.to_dtype(DType::F32)?;
let logits = &(&logits / 1.0)?;
let prs = candle_nn::ops::softmax_last_dim(logits)?.to_vec1::<f32>()?;
let distr = rand::distr::weighted::WeightedIndex::new(prs.as_slice())?;
let sample = distr.sample(&mut rng) as u32;
codes_.push(sample)
}
codes.push(codes_)
}
let codes = Tensor::new(codes, &device)?.unsqueeze(0)?;
let codes = Tensor::cat(&[in_x, codes], 1)?;
println!("codes: {codes}");
let tilted_encodec = adapters::TiltedEncodec::new(ENCODEC_NTOKENS);
let codes = codes.i(0)?.to_vec2::<u32>()?;
let (text_ids, audio_ids) = tilted_encodec.decode(&codes);
println!("text_ids len: {:?}", text_ids.len());
let audio_ids = Tensor::new(audio_ids, encodec_device)?.unsqueeze(0)?;
println!("audio_ids shape: {:?}", audio_ids.shape());
let pcm = encodec_model.decode(&audio_ids)?;
println!("output pcm shape: {:?}", pcm.shape());
let pcm = pcm.i(0)?.i(0)?.to_dtype(DType::F32)?;
let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?;
let pcm = pcm.to_vec1::<f32>()?;
let mut output = std::fs::File::create(&args.out_file)?;
candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24_000)?;
Ok(())
}
| candle/candle-examples/examples/metavoice/main.rs/0 | {
"file_path": "candle/candle-examples/examples/metavoice/main.rs",
"repo_id": "candle",
"token_count": 4561
} | 33 |
# candle-modernbert
ModernBERT is a bidirectional encoder-only language model. In this example it is used for the fill-mask task:
## Usage
```bash
cargo run --example modernbert --release -- --model modern-bert-large --prompt 'The capital of France is [MASK].'
```
```markdown
Sentence: 1 : The capital of France is Paris.
```
| candle/candle-examples/examples/modernbert/README.md/0 | {
"file_path": "candle/candle-examples/examples/modernbert/README.md",
"repo_id": "candle",
"token_count": 102
} | 34 |
# Orpheus
Orpheus is a 3B text-to-speech model based on Llama.
- Weights on HuggingFace
[canopylabs/orpheus-3b-0.1-ft](https://huggingface.co/canopylabs/orpheus-3b-0.1-ft).
- Code on GitHub [canopyai/Orpheus-TTS](https://github.com/canopyai/Orpheus-TTS).
```bash
cargo run --example orpheus --features cuda -r
```
| candle/candle-examples/examples/orpheus/README.md/0 | {
"file_path": "candle/candle-examples/examples/orpheus/README.md",
"repo_id": "candle",
"token_count": 135
} | 35 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::{Parser, ValueEnum};
use std::io::Write;
use tokenizers::Tokenizer;
use candle::quantized::gguf_file;
use candle::Tensor;
use candle_transformers::generation::{LogitsProcessor, Sampling};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_transformers::models::quantized_qwen2::ModelWeights as Qwen2;
const DEFAULT_PROMPT: &str = "Write a function to count prime numbers up to N. ";
#[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)]
enum Which {
#[value(name = "0.5b")]
W2_0_5b,
#[value(name = "1.5b")]
W2_1_5b,
#[value(name = "7b")]
W2_7b,
#[value(name = "72b")]
W2_72b,
#[value(name = "deepseekr1-qwen7b")]
DeepseekR1Qwen7B,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// GGUF file to load, typically a .gguf file generated by the quantize command from llama.cpp
#[arg(long)]
model: Option<String>,
/// The initial prompt, use 'interactive' for entering multiple prompts in an interactive way
/// and 'chat' for an interactive model where history of previous prompts and generated tokens
/// is preserved.
#[arg(long)]
prompt: Option<String>,
/// The length of the sample to generate (in tokens).
#[arg(short = 'n', long, default_value_t = 1000)]
sample_len: usize,
/// The tokenizer config in json format.
#[arg(long)]
tokenizer: Option<String>,
/// The temperature used to generate samples, use 0 for greedy sampling.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Only sample among the top K samples.
#[arg(long)]
top_k: Option<usize>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// Process prompt elements separately.
#[arg(long)]
split_prompt: bool,
/// Run on CPU rather than GPU even if a GPU is available.
#[arg(long)]
cpu: bool,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// The model size to use.
#[arg(long, default_value = "0.5b")]
which: Which,
}
impl Args {
fn tokenizer(&self) -> anyhow::Result<Tokenizer> {
let tokenizer_path = match &self.tokenizer {
Some(config) => std::path::PathBuf::from(config),
None => {
let api = hf_hub::api::sync::Api::new()?;
let repo = match self.which {
Which::W2_0_5b => "Qwen/Qwen2-0.5B-Instruct",
Which::W2_1_5b => "Qwen/Qwen2-1.5B-Instruct",
Which::W2_7b => "Qwen/Qwen2-7B-Instruct",
Which::W2_72b => "Qwen/Qwen2-72B-Instruct",
Which::DeepseekR1Qwen7B => "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
};
let api = api.model(repo.to_string());
api.get("tokenizer.json")?
}
};
Tokenizer::from_file(tokenizer_path).map_err(anyhow::Error::msg)
}
fn model(&self) -> anyhow::Result<std::path::PathBuf> {
let model_path = match &self.model {
Some(config) => std::path::PathBuf::from(config),
None => {
let (repo, filename, revision) = match self.which {
Which::W2_0_5b => (
"Qwen/Qwen2-0.5B-Instruct-GGUF",
"qwen2-0_5b-instruct-q4_0.gguf",
"main",
),
Which::W2_1_5b => (
"Qwen/Qwen2-1.5B-Instruct-GGUF",
"qwen2-1_5b-instruct-q4_0.gguf",
"main",
),
Which::W2_7b => (
"Qwen/Qwen2-7B-Instruct-GGUF",
"qwen2-7b-instruct-q4_0.gguf",
"main",
),
Which::W2_72b => (
"Qwen/Qwen2-72B-Instruct-GGUF",
"qwen2-72b-instruct-q4_0.gguf",
"main",
),
Which::DeepseekR1Qwen7B => (
"unsloth/DeepSeek-R1-Distill-Qwen-7B-GGUF",
"DeepSeek-R1-Distill-Qwen-7B-Q4_K_M.gguf",
"main",
),
};
let api = hf_hub::api::sync::Api::new()?;
api.repo(hf_hub::Repo::with_revision(
repo.to_string(),
hf_hub::RepoType::Model,
revision.to_string(),
))
.get(filename)?
}
};
Ok(model_path)
}
}
fn format_size(size_in_bytes: usize) -> String {
if size_in_bytes < 1_000 {
format!("{size_in_bytes}B")
} else if size_in_bytes < 1_000_000 {
format!("{:.2}KB", size_in_bytes as f64 / 1e3)
} else if size_in_bytes < 1_000_000_000 {
format!("{:.2}MB", size_in_bytes as f64 / 1e6)
} else {
format!("{:.2}GB", size_in_bytes as f64 / 1e9)
}
}
fn main() -> anyhow::Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature, args.repeat_penalty, args.repeat_last_n
);
let model_path = args.model()?;
let mut file = std::fs::File::open(&model_path)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let mut model = {
let model = gguf_file::Content::read(&mut file).map_err(|e| e.with_path(model_path))?;
let mut total_size_in_bytes = 0;
for (_, tensor) in model.tensor_infos.iter() {
let elem_count = tensor.shape.elem_count();
total_size_in_bytes +=
elem_count * tensor.ggml_dtype.type_size() / tensor.ggml_dtype.block_size();
}
println!(
"loaded {:?} tensors ({}) in {:.2}s",
model.tensor_infos.len(),
&format_size(total_size_in_bytes),
start.elapsed().as_secs_f32(),
);
Qwen2::from_gguf(model, &mut file, &device)?
};
println!("model built");
let tokenizer = args.tokenizer()?;
let mut tos = TokenOutputStream::new(tokenizer);
let prompt_str = args
.prompt
.clone()
.unwrap_or_else(|| DEFAULT_PROMPT.to_string());
let prompt_str = match args.which {
Which::DeepseekR1Qwen7B => format!("<๏ฝUser๏ฝ>{prompt_str}<๏ฝAssistant๏ฝ>"),
_ => format!("<|im_start|>user\n{prompt_str}<|im_end|>\n<|im_start|>assistant\n"),
};
print!("formatted instruct prompt: {}", &prompt_str);
let tokens = tos
.tokenizer()
.encode(prompt_str, true)
.map_err(anyhow::Error::msg)?;
let tokens = tokens.get_ids();
let to_sample = args.sample_len.saturating_sub(1);
let mut all_tokens = vec![];
let mut logits_processor = {
let temperature = args.temperature;
let sampling = if temperature <= 0. {
Sampling::ArgMax
} else {
match (args.top_k, args.top_p) {
(None, None) => Sampling::All { temperature },
(Some(k), None) => Sampling::TopK { k, temperature },
(None, Some(p)) => Sampling::TopP { p, temperature },
(Some(k), Some(p)) => Sampling::TopKThenTopP { k, p, temperature },
}
};
LogitsProcessor::from_sampling(args.seed, sampling)
};
let start_prompt_processing = std::time::Instant::now();
let mut next_token = if !args.split_prompt {
let input = Tensor::new(tokens, &device)?.unsqueeze(0)?;
let logits = model.forward(&input, 0)?;
let logits = logits.squeeze(0)?;
logits_processor.sample(&logits)?
} else {
let mut next_token = 0;
for (pos, token) in tokens.iter().enumerate() {
let input = Tensor::new(&[*token], &device)?.unsqueeze(0)?;
let logits = model.forward(&input, pos)?;
let logits = logits.squeeze(0)?;
next_token = logits_processor.sample(&logits)?
}
next_token
};
let prompt_dt = start_prompt_processing.elapsed();
all_tokens.push(next_token);
if let Some(t) = tos.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
let eos_token = match args.which {
Which::DeepseekR1Qwen7B => "<๏ฝendโofโsentence๏ฝ>",
_ => "<|im_end|>",
};
let eos_token = *tos.tokenizer().get_vocab(true).get(eos_token).unwrap();
let start_post_prompt = std::time::Instant::now();
let mut sampled = 0;
for index in 0..to_sample {
let input = Tensor::new(&[next_token], &device)?.unsqueeze(0)?;
let logits = model.forward(&input, tokens.len() + index)?;
let logits = logits.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = all_tokens.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&all_tokens[start_at..],
)?
};
next_token = logits_processor.sample(&logits)?;
all_tokens.push(next_token);
if let Some(t) = tos.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
sampled += 1;
if next_token == eos_token {
break;
};
}
if let Some(rest) = tos.decode_rest().map_err(candle::Error::msg)? {
print!("{rest}");
}
std::io::stdout().flush()?;
let dt = start_post_prompt.elapsed();
println!(
"\n\n{:4} prompt tokens processed: {:.2} token/s",
tokens.len(),
tokens.len() as f64 / prompt_dt.as_secs_f64(),
);
println!(
"{sampled:4} tokens generated: {:.2} token/s",
sampled as f64 / dt.as_secs_f64(),
);
Ok(())
}
| candle/candle-examples/examples/quantized-qwen2-instruct/main.rs/0 | {
"file_path": "candle/candle-examples/examples/quantized-qwen2-instruct/main.rs",
"repo_id": "candle",
"token_count": 5570
} | 36 |
//! Wrappers around the Python API of Gymnasium (the new version of OpenAI gym)
use candle::{Device, Result, Tensor};
use pyo3::prelude::*;
use pyo3::types::PyDict;
/// The return value for a step.
#[derive(Debug)]
pub struct Step<A> {
pub state: Tensor,
pub action: A,
pub reward: f64,
pub terminated: bool,
pub truncated: bool,
}
impl<A: Copy> Step<A> {
/// Returns a copy of this step changing the observation tensor.
pub fn copy_with_obs(&self, state: &Tensor) -> Step<A> {
Step {
state: state.clone(),
action: self.action,
reward: self.reward,
terminated: self.terminated,
truncated: self.truncated,
}
}
}
/// An OpenAI Gym session.
pub struct GymEnv {
env: PyObject,
action_space: usize,
observation_space: Vec<usize>,
}
fn w(res: PyErr) -> candle::Error {
candle::Error::wrap(res)
}
impl GymEnv {
/// Creates a new session of the specified OpenAI Gym environment.
pub fn new(name: &str) -> Result<GymEnv> {
Python::with_gil(|py| {
let gym = py.import_bound("gymnasium")?;
let make = gym.getattr("make")?;
let env = make.call1((name,))?;
let action_space = env.getattr("action_space")?;
let action_space = if let Ok(val) = action_space.getattr("n") {
val.extract()?
} else {
let action_space: Vec<usize> = action_space.getattr("shape")?.extract()?;
action_space[0]
};
let observation_space = env.getattr("observation_space")?;
let observation_space = observation_space.getattr("shape")?.extract()?;
Ok(GymEnv {
env: env.into(),
action_space,
observation_space,
})
})
.map_err(w)
}
/// Resets the environment, returning the observation tensor.
pub fn reset(&self, seed: u64) -> Result<Tensor> {
let state: Vec<f32> = Python::with_gil(|py| {
let kwargs = PyDict::new_bound(py);
kwargs.set_item("seed", seed)?;
let state = self.env.call_method_bound(py, "reset", (), Some(&kwargs))?;
state.bind(py).get_item(0)?.extract()
})
.map_err(w)?;
Tensor::new(state, &Device::Cpu)
}
/// Applies an environment step using the specified action.
pub fn step<A: pyo3::IntoPy<pyo3::Py<pyo3::PyAny>> + Clone>(
&self,
action: A,
) -> Result<Step<A>> {
let (state, reward, terminated, truncated) = Python::with_gil(|py| {
let step = self
.env
.call_method_bound(py, "step", (action.clone(),), None)?;
let step = step.bind(py);
let state: Vec<f32> = step.get_item(0)?.extract()?;
let reward: f64 = step.get_item(1)?.extract()?;
let terminated: bool = step.get_item(2)?.extract()?;
let truncated: bool = step.get_item(3)?.extract()?;
Ok((state, reward, terminated, truncated))
})
.map_err(w)?;
let state = Tensor::new(state, &Device::Cpu)?;
Ok(Step {
state,
action,
reward,
terminated,
truncated,
})
}
/// Returns the number of allowed actions for this environment.
pub fn action_space(&self) -> usize {
self.action_space
}
/// Returns the shape of the observation tensors.
pub fn observation_space(&self) -> &[usize] {
&self.observation_space
}
}
| candle/candle-examples/examples/reinforcement-learning/gym_env.rs/0 | {
"file_path": "candle/candle-examples/examples/reinforcement-learning/gym_env.rs",
"repo_id": "candle",
"token_count": 1743
} | 37 |
# candle-segment-anything: Segment-Anything Model
This example is based on Meta AI [Segment-Anything
Model](https://github.com/facebookresearch/segment-anything). This model
provides a robust and fast image segmentation pipeline that can be tweaked via
some prompting (requesting some points to be in the target mask, requesting some
points to be part of the background so _not_ in the target mask, specifying some
bounding box).
The default backbone can be replaced by the smaller and faster TinyViT model
based on [MobileSAM](https://github.com/ChaoningZhang/MobileSAM).
## Running some example.
```bash
cargo run --example segment-anything --release -- \
--image candle-examples/examples/yolo-v8/assets/bike.jpg \
--use-tiny \
--point 0.6,0.6 --point 0.6,0.55
```
Running this command generates a `sam_merged.jpg` file containing the original
image with a blue overlay of the selected mask. The red dots represent the prompt
specified by `--point 0.6,0.6 --point 0.6,0.55`, this prompt is assumed to be part
of the target mask.
The values used for `--point` should be a comma delimited pair of float values.
They are proportional to the image dimension, i.e. use 0.5 for the image center.
Original image:

Segment results by prompting with a single point `--point 0.6,0.55`:

Segment results by prompting with multiple points `--point 0.6,0.6 --point 0.6,0.55`:

### Command-line flags
- `--use-tiny`: use the TinyViT based MobileSAM backbone rather than the default
one.
- `--point`: specifies the location of the target points.
- `--threshold`: sets the threshold value to be part of the mask, a negative
value results in a larger mask and can be specified via `--threshold=-1.2`.
| candle/candle-examples/examples/segment-anything/README.md/0 | {
"file_path": "candle/candle-examples/examples/segment-anything/README.md",
"repo_id": "candle",
"token_count": 575
} | 38 |
mod clip;
mod sampling;
mod vae;
use candle::{DType, IndexOp, Tensor};
use candle_transformers::models::mmdit::model::{Config as MMDiTConfig, MMDiT};
use crate::clip::StableDiffusion3TripleClipWithTokenizer;
use crate::vae::{build_sd3_vae_autoencoder, sd3_vae_vb_rename};
use anyhow::{Ok, Result};
use clap::Parser;
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "3-medium")]
V3Medium,
#[value(name = "3.5-large")]
V3_5Large,
#[value(name = "3.5-large-turbo")]
V3_5LargeTurbo,
#[value(name = "3.5-medium")]
V3_5Medium,
}
impl Which {
fn is_3_5(&self) -> bool {
match self {
Self::V3Medium => false,
Self::V3_5Large | Self::V3_5LargeTurbo | Self::V3_5Medium => true,
}
}
}
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The prompt to be used for image generation.
#[arg(
long,
default_value = "A cute rusty robot holding a candle torch in its hand, \
with glowing neon text \"LETS GO RUSTY\" displayed on its chest, \
bright background, high quality, 4k"
)]
prompt: String,
#[arg(long, default_value = "")]
uncond_prompt: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// Use flash_attn to accelerate attention operation in the MMDiT.
#[arg(long)]
use_flash_attn: bool,
/// The height in pixels of the generated image.
#[arg(long, default_value_t = 1024)]
height: usize,
/// The width in pixels of the generated image.
#[arg(long, default_value_t = 1024)]
width: usize,
/// The model to use.
#[arg(long, default_value = "3-medium")]
which: Which,
/// The seed to use when generating random samples.
#[arg(long)]
num_inference_steps: Option<usize>,
/// CFG scale.
#[arg(long)]
cfg_scale: Option<f64>,
/// Time shift factor (alpha).
#[arg(long, default_value_t = 3.0)]
time_shift: f64,
/// Use Skip Layer Guidance (SLG) for the sampling.
/// Currently only supports Stable Diffusion 3.5 Medium.
#[arg(long)]
use_slg: bool,
/// The seed to use when generating random samples.
#[arg(long)]
seed: Option<u64>,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let Args {
prompt,
uncond_prompt,
cpu,
tracing,
use_flash_attn,
height,
width,
num_inference_steps,
cfg_scale,
time_shift,
seed,
which,
use_slg,
} = Args::parse();
let _guard = if tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let device = candle_examples::device(cpu)?;
let default_inference_steps = match which {
Which::V3_5Large => 28,
Which::V3_5LargeTurbo => 4,
Which::V3_5Medium => 28,
Which::V3Medium => 28,
};
let num_inference_steps = num_inference_steps.unwrap_or(default_inference_steps);
let default_cfg_scale = match which {
Which::V3_5Large => 4.0,
Which::V3_5LargeTurbo => 1.0,
Which::V3_5Medium => 4.0,
Which::V3Medium => 4.0,
};
let cfg_scale = cfg_scale.unwrap_or(default_cfg_scale);
let api = hf_hub::api::sync::Api::new()?;
let (mmdit_config, mut triple, vb) = if which.is_3_5() {
let sai_repo_for_text_encoders = {
let name = match which {
Which::V3_5Large => "stabilityai/stable-diffusion-3.5-large",
Which::V3_5LargeTurbo => "stabilityai/stable-diffusion-3.5-large-turbo",
// Unfortunately, stabilityai/stable-diffusion-3.5-medium doesn't have the monolithic text encoders that's usually
// placed under the text_encoders directory, like the case in stabilityai/stable-diffusion-3.5-large and -large-turbo.
// To make things worse, it currently only has partitioned model.fp16-00001-of-00002.safetensors and model.fp16-00002-of-00002.safetensors
// under the text_encoder_3 directory, for the t5xxl_fp16.safetensors model. This means that we need to merge the two partitions
// to get the monolithic text encoders. This is not a trivial task.
// Since the situation can change, we do not want to spend efforts to handle the uniqueness of stabilityai/stable-diffusion-3.5-medium,
// which involves different paths and merging the two partitions files for t5xxl_fp16.safetensors.
// so for now, we'll use the text encoder models from the stabilityai/stable-diffusion-3.5-large repository.
// TODO: Change to "stabilityai/stable-diffusion-3.5-medium" once the maintainers of the repository add back the monolithic text encoders.
Which::V3_5Medium => "stabilityai/stable-diffusion-3.5-large",
Which::V3Medium => unreachable!(),
};
api.repo(hf_hub::Repo::model(name.to_string()))
};
let sai_repo_for_mmdit = {
let name = match which {
Which::V3_5Large => "stabilityai/stable-diffusion-3.5-large",
Which::V3_5LargeTurbo => "stabilityai/stable-diffusion-3.5-large-turbo",
Which::V3_5Medium => "stabilityai/stable-diffusion-3.5-medium",
Which::V3Medium => unreachable!(),
};
api.repo(hf_hub::Repo::model(name.to_string()))
};
let clip_g_file = sai_repo_for_text_encoders.get("text_encoders/clip_g.safetensors")?;
let clip_l_file = sai_repo_for_text_encoders.get("text_encoders/clip_l.safetensors")?;
let t5xxl_file = sai_repo_for_text_encoders.get("text_encoders/t5xxl_fp16.safetensors")?;
let model_file = {
let model_file = match which {
Which::V3_5Large => "sd3.5_large.safetensors",
Which::V3_5LargeTurbo => "sd3.5_large_turbo.safetensors",
Which::V3_5Medium => "sd3.5_medium.safetensors",
Which::V3Medium => unreachable!(),
};
sai_repo_for_mmdit.get(model_file)?
};
let triple = StableDiffusion3TripleClipWithTokenizer::new_split(
&clip_g_file,
&clip_l_file,
&t5xxl_file,
&device,
)?;
let vb = unsafe {
candle_nn::VarBuilder::from_mmaped_safetensors(&[model_file], DType::F16, &device)?
};
match which {
Which::V3_5Large => (MMDiTConfig::sd3_5_large(), triple, vb),
Which::V3_5LargeTurbo => (MMDiTConfig::sd3_5_large(), triple, vb),
Which::V3_5Medium => (MMDiTConfig::sd3_5_medium(), triple, vb),
Which::V3Medium => unreachable!(),
}
} else {
let sai_repo = {
let name = "stabilityai/stable-diffusion-3-medium";
api.repo(hf_hub::Repo::model(name.to_string()))
};
let model_file = sai_repo.get("sd3_medium_incl_clips_t5xxlfp16.safetensors")?;
let vb = unsafe {
candle_nn::VarBuilder::from_mmaped_safetensors(&[&model_file], DType::F16, &device)?
};
let triple = StableDiffusion3TripleClipWithTokenizer::new(vb.pp("text_encoders"))?;
(MMDiTConfig::sd3_medium(), triple, vb)
};
let (context, y) = triple.encode_text_to_embedding(prompt.as_str(), &device)?;
let (context_uncond, y_uncond) =
triple.encode_text_to_embedding(uncond_prompt.as_str(), &device)?;
// Drop the text model early to avoid using too much memory.
drop(triple);
let context = Tensor::cat(&[context, context_uncond], 0)?;
let y = Tensor::cat(&[y, y_uncond], 0)?;
if let Some(seed) = seed {
device.set_seed(seed)?;
}
let slg_config = if use_slg {
match which {
// https://github.com/Stability-AI/sd3.5/blob/4e484e05308d83fb77ae6f680028e6c313f9da54/sd3_infer.py#L388-L394
Which::V3_5Medium => Some(sampling::SkipLayerGuidanceConfig {
scale: 2.5,
start: 0.01,
end: 0.2,
layers: vec![7, 8, 9],
}),
_ => anyhow::bail!("--use-slg can only be used with 3.5-medium"),
}
} else {
None
};
let start_time = std::time::Instant::now();
let x = {
let mmdit = MMDiT::new(
&mmdit_config,
use_flash_attn,
vb.pp("model.diffusion_model"),
)?;
sampling::euler_sample(
&mmdit,
&y,
&context,
num_inference_steps,
cfg_scale,
time_shift,
height,
width,
slg_config,
)?
};
let dt = start_time.elapsed().as_secs_f32();
println!(
"Sampling done. {num_inference_steps} steps. {:.2}s. Average rate: {:.2} iter/s",
dt,
num_inference_steps as f32 / dt
);
let img = {
let vb_vae = vb.rename_f(sd3_vae_vb_rename).pp("first_stage_model");
let autoencoder = build_sd3_vae_autoencoder(vb_vae)?;
// Apply TAESD3 scale factor. Seems to be significantly improving the quality of the image.
// https://github.com/comfyanonymous/ComfyUI/blob/3c60ecd7a83da43d694e26a77ca6b93106891251/nodes.py#L721-L723
autoencoder.decode(&((x / 1.5305)? + 0.0609)?)?
};
let img = ((img.clamp(-1f32, 1f32)? + 1.0)? * 127.5)?.to_dtype(candle::DType::U8)?;
candle_examples::save_image(&img.i(0)?, "out.jpg")?;
Ok(())
}
| candle/candle-examples/examples/stable-diffusion-3/main.rs/0 | {
"file_path": "candle/candle-examples/examples/stable-diffusion-3/main.rs",
"repo_id": "candle",
"token_count": 4715
} | 39 |
use image::{DynamicImage, ImageBuffer};
use serde::Deserialize;
use std::collections::HashMap;
use candle::{DType, Device, Result, Tensor};
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct ProcessorConfig {
do_resize: bool,
height: u32,
width: u32,
do_rescale: bool,
do_normalize: bool,
image_mean: Vec<f32>,
image_std: Vec<f32>,
}
impl Default for ProcessorConfig {
fn default() -> Self {
Self {
do_resize: true,
height: 384,
width: 384,
do_rescale: true,
do_normalize: true,
image_mean: vec![0.5, 0.5, 0.5],
image_std: vec![0.5, 0.5, 0.5],
}
}
}
pub struct ViTImageProcessor {
do_resize: bool,
height: u32,
width: u32,
do_normalize: bool,
image_mean: Vec<f32>,
image_std: Vec<f32>,
}
impl ViTImageProcessor {
pub fn new(config: &ProcessorConfig) -> Self {
Self {
do_resize: config.do_resize,
height: config.height,
width: config.width,
do_normalize: config.do_normalize,
image_mean: config.image_mean.clone(),
image_std: config.image_std.clone(),
}
}
pub fn preprocess(&self, images: Vec<&str>) -> Result<Tensor> {
let height = self.height as usize;
let width = self.width as usize;
let channels = 3;
let images = self.load_images(images)?;
let resized_images: Vec<DynamicImage> = if self.do_resize {
images
.iter()
.map(|image| self.resize(image.clone(), None).unwrap())
.collect()
} else {
images
};
let normalized_images: Vec<Tensor> = if self.do_normalize {
resized_images
.iter()
.map(|image| self.normalize(image.clone(), None, None).unwrap())
.collect()
} else {
let resized_images: Vec<ImageBuffer<image::Rgb<u8>, Vec<u8>>> =
resized_images.iter().map(|image| image.to_rgb8()).collect();
let data = resized_images
.into_iter()
.map(|image| image.into_raw())
.collect::<Vec<Vec<u8>>>();
data.iter()
.map(|image| {
Tensor::from_vec(image.clone(), (height, width, channels), &Device::Cpu)
.unwrap()
.permute((2, 0, 1))
.unwrap()
})
.collect::<Vec<Tensor>>()
};
Tensor::stack(&normalized_images, 0)
}
fn resize(
&self,
image: image::DynamicImage,
size: Option<HashMap<String, u32>>,
) -> Result<image::DynamicImage> {
let (height, width) = match &size {
Some(size) => (size.get("height").unwrap(), size.get("width").unwrap()),
None => (&self.height, &self.width),
};
let resized_image =
image.resize_exact(*width, *height, image::imageops::FilterType::Triangle);
Ok(resized_image)
}
fn normalize(
&self,
image: image::DynamicImage,
mean: Option<Vec<f32>>,
std: Option<Vec<f32>>,
) -> Result<Tensor> {
let mean = match mean {
Some(mean) => mean,
None => self.image_mean.clone(),
};
let std = match std {
Some(std) => std,
None => self.image_std.clone(),
};
let mean = Tensor::from_vec(mean, (3, 1, 1), &Device::Cpu)?;
let std = Tensor::from_vec(std, (3, 1, 1), &Device::Cpu)?;
let image = image.to_rgb8();
let data = image.into_raw();
let height = self.height as usize;
let width = self.width as usize;
let channels = 3;
let data =
Tensor::from_vec(data, &[height, width, channels], &Device::Cpu)?.permute((2, 0, 1))?;
(data.to_dtype(DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)
}
pub fn load_images(&self, image_path: Vec<&str>) -> Result<Vec<image::DynamicImage>> {
let mut images: Vec<image::DynamicImage> = Vec::new();
for path in image_path {
let img = image::ImageReader::open(path)?.decode().unwrap();
images.push(img);
}
Ok(images)
}
}
| candle/candle-examples/examples/trocr/image_processor.rs/0 | {
"file_path": "candle/candle-examples/examples/trocr/image_processor.rs",
"repo_id": "candle",
"token_count": 2273
} | 40 |
# Get the checkpoint from
# https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt
import torch
from safetensors.torch import save_file
data = torch.load("tiny.en.pt")
weights = {}
for k, v in data["model_state_dict"].items():
weights[k] = v.contiguous()
print(k, v.shape, v.dtype)
save_file(weights, "tiny.en.safetensors")
print(data["dims"])
| candle/candle-examples/examples/whisper/extract_weights.py/0 | {
"file_path": "candle/candle-examples/examples/whisper/extract_weights.py",
"repo_id": "candle",
"token_count": 183
} | 41 |
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=16
width= 416
height = 416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=80
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
| candle/candle-examples/examples/yolo-v3/yolo-v3.cfg/0 | {
"file_path": "candle/candle-examples/examples/yolo-v3/yolo-v3.cfg",
"repo_id": "candle",
"token_count": 3586
} | 42 |
[package]
name = "candle-flash-attn"
version = "0.9.1"
edition = "2021"
description = "Flash attention layer for the candle ML framework."
repository = "https://github.com/huggingface/candle"
keywords = ["blas", "tensor", "machine-learning"]
categories = ["science"]
license = "MIT OR Apache-2.0"
readme = "README.md"
[dependencies]
candle = { path = "../candle-core", features = ["cuda"], package = "candle-core", version = "0.9.1" }
half = { version = "2.3.1", features = ["num-traits"] }
[build-dependencies]
bindgen_cuda = "0.1.1"
anyhow = { version = "1", features = ["backtrace"] }
[dev-dependencies]
anyhow = { version = "1", features = ["backtrace"] }
candle-nn = { path = "../candle-nn", features = ["cuda"] }
[features]
default = []
cudnn = ["candle/cudnn"]
| candle/candle-flash-attn/Cargo.toml/0 | {
"file_path": "candle/candle-flash-attn/Cargo.toml",
"repo_id": "candle",
"token_count": 288
} | 43 |
/******************************************************************************
* Copyright (c) 2024, Tri Dao.
******************************************************************************/
#pragma once
#include <cute/tensor.hpp>
namespace flash {
using namespace cute;
template <typename Engine, typename Layout>
__forceinline__ __device__ void apply_mask(Tensor<Engine, Layout> &tensor, const int max_seqlen_k,
const int col_idx_offset_ = 0) {
// tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N))
static_assert(Layout::rank == 2, "Only support 2D Tensor");
const int lane_id = threadIdx.x % 32;
const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2;
#pragma unroll
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
const int col_idx_base = col_idx_offset + nj * 8;
#pragma unroll
for (int j = 0; j < size<1, 0>(tensor); ++j) {
const int col_idx = col_idx_base + j;
if (col_idx >= max_seqlen_k) {
// Without the "make_coord" we get wrong results
#pragma unroll
for (int mi = 0; mi < size<0>(tensor); ++mi) {
tensor(mi, make_coord(j, nj)) = -INFINITY;
}
}
}
}
}
template <bool HasWSLeft=true, typename Engine, typename Layout>
__forceinline__ __device__ void apply_mask_local(Tensor<Engine, Layout> &tensor, const int col_idx_offset_,
const int max_seqlen_k, const int row_idx_offset,
const int max_seqlen_q, const int warp_row_stride,
const int window_size_left, const int window_size_right) {
// tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N))
static_assert(Layout::rank == 2, "Only support 2D Tensor");
const int lane_id = threadIdx.x % 32;
const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2;
#pragma unroll
for (int mi = 0; mi < size<0, 1>(tensor); ++mi) {
const int row_idx_base = row_idx_offset + mi * warp_row_stride;
#pragma unroll
for (int i = 0; i < size<0, 0>(tensor); ++i) {
const int row_idx = row_idx_base + i * 8;
const int col_idx_limit_left = std::max(0, row_idx + max_seqlen_k - max_seqlen_q - window_size_left);
const int col_idx_limit_right = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q + window_size_right);
#pragma unroll
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
const int col_idx_base = col_idx_offset + nj * 8;
#pragma unroll
for (int j = 0; j < size<1, 0>(tensor); ++j) {
const int col_idx = col_idx_base + j;
if (col_idx >= col_idx_limit_right || (HasWSLeft && col_idx < col_idx_limit_left)) {
tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
}
}
}
// if (cute::thread0()) {
// printf("mi = %d, i = %d, row_idx = %d, max_seqlen_k = %d\n", mi, i, row_idx, max_seqlen_k);
// print(tensor(make_coord(i, mi), _));
// // print(tensor(_, j + nj * size<1, 0>(tensor)));
// }
}
}
}
template <typename Engine, typename Layout>
__forceinline__ __device__ void apply_mask_causal(Tensor<Engine, Layout> &tensor, const int col_idx_offset_,
const int max_seqlen_k, const int row_idx_offset,
const int max_seqlen_q, const int warp_row_stride) {
// Causal masking is equivalent to local masking with window_size_left = infinity and window_size_right = 0
apply_mask_local</*HasWSLeft=*/false>(tensor, col_idx_offset_, max_seqlen_k, row_idx_offset,
max_seqlen_q, warp_row_stride, -1, 0);
}
template <typename Engine0, typename Layout0, typename Engine1, typename Layout1>
__forceinline__ __device__ void apply_mask_causal_w_idx(
Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &idx_rowcol,
const int col_idx_offset_, const int max_seqlen_k, const int row_idx_offset)
{
// tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N))
static_assert(Layout0::rank == 2, "Only support 2D Tensor");
static_assert(Layout1::rank == 2, "Only support 2D Tensor");
CUTE_STATIC_ASSERT_V(size<0>(tensor) == size<0>(idx_rowcol));
CUTE_STATIC_ASSERT_V(size<1>(tensor) == size<1>(idx_rowcol));
#pragma unroll
for (int mi = 0; mi < size<0>(tensor); ++mi) {
const int col_idx_limit = std::min(max_seqlen_k, 1 + row_idx_offset + get<0>(idx_rowcol(mi, 0)));
#pragma unroll
for (int ni = 0; ni < size<1, 1>(tensor); ++ni) {
if (col_idx_offset_ + get<1>(idx_rowcol(0, ni)) >= col_idx_limit) {
tensor(mi, ni) = -INFINITY;
}
}
// if (cute::thread0()) {
// printf("ni = %d, j = %d, col_idx = %d, max_seqlen_k = %d\n", ni, j, col_idx, max_seqlen_k);
// print(tensor(_, make_coord(j, ni)));
// // print(tensor(_, j + ni * size<1, 0>(tensor)));
// }
}
}
template <bool Is_causal, bool Is_local, bool Has_alibi>
struct Mask {
const int max_seqlen_k, max_seqlen_q;
const int window_size_left, window_size_right;
const float alibi_slope;
__forceinline__ __device__ Mask(const int max_seqlen_k, const int max_seqlen_q,
const int window_size_left, const int window_size_right,
const float alibi_slope=0.f)
: max_seqlen_k(max_seqlen_k)
, max_seqlen_q(max_seqlen_q)
, window_size_left(window_size_left)
, window_size_right(window_size_right)
, alibi_slope(!Has_alibi ? 0.0 : alibi_slope) {
};
// Causal_mask: whether this particular iteration needs causal masking
template <bool Causal_mask=false, bool Is_even_MN=true, typename Engine, typename Layout>
__forceinline__ __device__ void apply_mask(Tensor<Engine, Layout> &tensor_,
const int col_idx_offset_,
const int row_idx_offset,
const int warp_row_stride) {
static_assert(!(Causal_mask && Is_local), "Cannot be both causal and local");
static_assert(Layout::rank == 3, "Only support 3D Tensor");
static_assert(decltype(size<0>(tensor_))::value == 4, "First dimension must be 4");
static constexpr bool Need_masking = Has_alibi || Causal_mask || Is_local || !Is_even_MN;
// if (cute::thread0()) { printf("Has_alibi = %d, Causal_mask=%d, Is_local=%d, Is_even_MN = %d, Need_masking = %d\n", Has_alibi, Causal_mask, Is_local, Is_even_MN, Need_masking); }
if constexpr (Need_masking) {
// Reshape tensor_ from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N))
Tensor tensor = make_tensor(tensor_.data(), flash::convert_layout_acc_rowcol(tensor_.layout()));
// Do we need both row and column indices, or just column incides?
static constexpr bool Col_idx_only = !(Has_alibi && !Is_causal) && !Is_local && !Causal_mask;
const int lane_id = threadIdx.x % 32;
const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2;
if constexpr (Col_idx_only) {
#pragma unroll
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
const int col_idx_base = col_idx_offset + nj * 8;
#pragma unroll
for (int j = 0; j < size<1, 0>(tensor); ++j) {
const int col_idx = col_idx_base + j;
#pragma unroll
for (int mi = 0; mi < size<0>(tensor); ++mi) {
// No causal, no local
if constexpr (Has_alibi) {
tensor(mi, make_coord(j, nj)) += alibi_slope * col_idx;
}
if constexpr (!Is_even_MN) {
if (col_idx >= max_seqlen_k) { tensor(mi, make_coord(j, nj)) = -INFINITY; }
}
}
}
}
} else {
#pragma unroll
for (int mi = 0; mi < size<0, 1>(tensor); ++mi) {
const int row_idx_base = row_idx_offset + mi * warp_row_stride;
#pragma unroll
for (int i = 0; i < size<0, 0>(tensor); ++i) {
const int row_idx = row_idx_base + i * 8;
const int col_idx_limit_left = std::max(0, row_idx + max_seqlen_k - max_seqlen_q - window_size_left);
const int col_idx_limit_right = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q + window_size_right);
#pragma unroll
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
const int col_idx_base = col_idx_offset + nj * 8;
#pragma unroll
for (int j = 0; j < size<1, 0>(tensor); ++j) {
const int col_idx = col_idx_base + j;
if constexpr (Has_alibi) {
if constexpr (Is_causal) {
tensor(make_coord(i, mi), make_coord(j, nj)) += alibi_slope * col_idx;
} else {
tensor(make_coord(i, mi), make_coord(j, nj)) -= alibi_slope * abs(row_idx + max_seqlen_k - max_seqlen_q - col_idx);
}
}
if constexpr (Causal_mask) {
if (col_idx >= col_idx_limit_right) {
tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
}
}
if constexpr (Is_local) {
if (col_idx >= col_idx_limit_right || col_idx < col_idx_limit_left) {
tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
}
}
if constexpr (!Causal_mask && !Is_local && !Is_even_MN) {
// Causal and Local already handles MN masking
if (col_idx >= max_seqlen_k) {
tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
}
}
}
}
}
}
}
}
};
};
} // namespace flash
| candle/candle-flash-attn/kernels/mask.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/mask.h",
"repo_id": "candle",
"token_count": 6255
} | 44 |
#include "cuda_fp16.h"
#include "cuda_bf16.h"
#include "cuda_fp8.h"
// Table showing which features are supported on which compute capability
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications
// FIXME: the minimum compute capabilities are just guesses since the table is not specific enough
#if (__CUDACC_VER_MAJOR__ < 12 || __CUDACC_VER_MINOR__ < 2) && __CUDA_ARCH__ < 800
__device__ __forceinline__ __half __hmax_nan(__half a, __half b) {
return __hisnan(a) ? a : (__hisnan(b) ? b : __hmax(a, b));
}
__device__ __forceinline__ __half __hmin_nan(__half a, __half b) {
return __hisnan(a) ? a : (__hisnan(b) ? b : __hmin(a, b));
}
#endif
#if __CUDA_ARCH__ < 600
// Copied from https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#if __CUDA_ARCH__ < 700
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicadd
// The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
// Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119
//__device__ __half atomicAdd(__half *address, __half val) {
// unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
// unsigned int old = *address_as_ui;
// unsigned int assumed;
// bool unaligned = (size_t) address & 2;
// do {
// assumed = old;
// unsigned int hsum;
// hsum = unaligned ? (old >> 16) : (old & 0xffff);
// hsum = __half_as_ushort(__ushort_as_half(hsum) + val);
// old = atomicCAS(address_as_ui, assumed,
// unaligned ? (old & 0xffff) | (hsum << 16) : (old & 0xffff0000) | hsum
// );
// } while (assumed != old);
// return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff));
//}
#endif
__device__ __forceinline__ __half atomicMaxf(__half* address, __half val) {
#if __CUDA_ARCH__ < 700
// On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery.
// Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119
unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
unsigned int old = *address_as_ui;
unsigned int assumed;
bool unaligned = (size_t) address & 2;
do {
assumed = old;
unsigned int hmax;
hmax = unaligned ? (old >> 16) : (old & 0xffff);
hmax = __half_as_ushort(__hmax_nan(val, __ushort_as_half(hmax)));
old = atomicCAS(address_as_ui, assumed,
unaligned ? (old & 0xffff) | (hmax << 16) : (old & 0xffff0000) | hmax
);
} while (assumed != old);
return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff));
#else
// Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
unsigned short int* casted_address = (unsigned short int*)address;
unsigned short int old = *casted_address;
unsigned short int assumed;
do {
assumed = old;
old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmax_nan(val, __ushort_as_half(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __ushort_as_half(old);
#endif
}
// atomicMax is not implemented for floats,
// solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda
__device__ __forceinline__ float atomicMaxf(float * addr, float value) {
if (signbit(value)) {
return __uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value)));
} else {
return __int_as_float(atomicMax((int *)addr, __float_as_int(value)));
}
}
__device__ __forceinline__ double atomicMaxf(double * addr, double value) {
if (signbit(value)) {
return __longlong_as_double(atomicMin((unsigned long long int *)addr, __double_as_longlong(value)));
} else {
return __longlong_as_double(atomicMax((long long int *)addr, __double_as_longlong(value)));
}
}
__device__ __forceinline__ __half atomicMinf(__half* address, __half val) {
#if __CUDA_ARCH__ < 700
// On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery.
// Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119
unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
unsigned int old = *address_as_ui;
unsigned int assumed;
bool unaligned = (size_t) address & 2;
do {
assumed = old;
unsigned int hmin;
hmin = unaligned ? (old >> 16) : (old & 0xffff);
hmin = __half_as_ushort(__hmin_nan(val, __ushort_as_half(hmin)));
old = atomicCAS(address_as_ui, assumed,
unaligned ? (old & 0xffff) | (hmin << 16) : (old & 0xffff0000) | hmin
);
} while (assumed != old);
return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff));
#else
// Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
unsigned short int* casted_address = (unsigned short int*)address;
unsigned short int old = *casted_address;
unsigned short int assumed;
do {
assumed = old;
old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmin_nan(val, __ushort_as_half(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __ushort_as_half(old);
#endif
}
// atomicMin is not implemented for floats,
// solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda
__device__ __forceinline__ float atomicMinf(float * addr, float value) {
if (signbit(value)) {
return __uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(value)));
} else {
return __int_as_float(atomicMin((int *)addr, __float_as_int(value)));
}
}
__device__ __forceinline__ double atomicMinf(double * addr, double value) {
if (signbit(value)) {
return __longlong_as_double(atomicMax((unsigned long long int *)addr, __double_as_longlong(value)));
} else {
return __longlong_as_double(atomicMin((long long int *)addr, __double_as_longlong(value)));
}
}
| candle/candle-kernels/src/compatibility.cuh/0 | {
"file_path": "candle/candle-kernels/src/compatibility.cuh",
"repo_id": "candle",
"token_count": 2748
} | 45 |
#include <metal_stdlib>
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
METAL_FUNC uint get_strided_index(
uint idx,
constant size_t &num_dims,
constant size_t *dims,
constant size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
using namespace metal;
#define BINARY(FN, TYPENAME, OUT_TYPENAME, FN_NAME, FN_NAME_STRIDED) \
kernel void FN_NAME( \
constant size_t &dim, \
device const TYPENAME *left, \
device const TYPENAME *right, \
device OUT_TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
TYPENAME x = left[tid]; \
TYPENAME y = right[tid]; \
output[tid] = OUT_TYPENAME(FN); \
}\
kernel void FN_NAME_STRIDED( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *left_strides, \
constant size_t *right_strides, \
device const TYPENAME *left, \
device const TYPENAME *right, \
device OUT_TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
TYPENAME x = left[get_strided_index(tid, num_dims, dims, left_strides)]; \
TYPENAME y = right[get_strided_index(tid, num_dims, dims, right_strides)]; \
output[tid] = OUT_TYPENAME(FN); \
}
#define BINARY_OP(FN, NAME) \
BINARY(FN, float, float, NAME##_f32, NAME##_f32_strided); \
BINARY(FN, half, half, NAME##_f16, NAME##_f16_strided); \
BINARY(FN, uint32_t, uint32_t, NAME##_u32, NAME##_u32_strided); \
BINARY(FN, uint8_t, uint8_t, NAME##_u8, NAME##_u8_strided);
#define BINARY_OP_OUT(NAME, FN) \
BINARY(FN, float, uint8_t, NAME##_f32, NAME##_f32_strided); \
BINARY(FN, half, uint8_t, NAME##_f16, NAME##_f16_strided); \
BINARY(FN, uint32_t, uint8_t, NAME##_u32, NAME##_u32_strided); \
BINARY(FN, uint8_t, uint8_t, NAME##_u8, NAME##_u8_strided);
#define INT64_BINARY_OP(NAME, FN) \
BINARY(FN, int64_t, int64_t, NAME##_i64, NAME##_i64_strided);
#define INT64_BINARY_OP_OUT(NAME, FN) \
BINARY(FN, int64_t, uint8_t, NAME##_i64, NAME##_i64_strided);
#define BFLOAT_BINARY_OP(FN, NAME) \
BINARY(FN, bfloat, bfloat, NAME##_bf16, NAME##_bf16_strided);
#define BFLOAT_BINARY_OP_OUT(NAME, FN) \
BINARY(FN, bfloat, uint8_t, NAME##_bf16, NAME##_bf16_strided);
BINARY_OP(x + y, add)
BINARY_OP(x - y, sub)
BINARY_OP(x * y, mul)
BINARY_OP(x / y, div)
BINARY_OP(MIN(x, y), min)
BINARY_OP(MAX(x, y), max)
BINARY_OP_OUT(eq, x == y)
BINARY_OP_OUT(ne, x != y)
BINARY_OP_OUT(le, x <= y)
BINARY_OP_OUT(lt, x < y)
BINARY_OP_OUT(ge, x >= y)
BINARY_OP_OUT(gt, x > y)
#if __METAL_VERSION__ >= 220
INT64_BINARY_OP(add, x + y)
INT64_BINARY_OP(sub, x - y)
INT64_BINARY_OP(mul, x * y)
INT64_BINARY_OP(div, x / y)
INT64_BINARY_OP(min, MIN(x, y))
INT64_BINARY_OP(max, MAX(x, y))
INT64_BINARY_OP_OUT(eq, x == y)
INT64_BINARY_OP_OUT(ne, x != y)
INT64_BINARY_OP_OUT(le, x <= y)
INT64_BINARY_OP_OUT(lt, x < y)
INT64_BINARY_OP_OUT(ge, x >= y)
INT64_BINARY_OP_OUT(gt, x > y)
#endif
#if defined(__HAVE_BFLOAT__)
BFLOAT_BINARY_OP(x + y, add)
BFLOAT_BINARY_OP(x - y, sub)
BFLOAT_BINARY_OP(x * y, mul)
BFLOAT_BINARY_OP(x / y, div)
BFLOAT_BINARY_OP(MIN(x, y), min)
BFLOAT_BINARY_OP(MAX(x, y), max)
BFLOAT_BINARY_OP_OUT(eq, x == y)
BFLOAT_BINARY_OP_OUT(ne, x != y)
BFLOAT_BINARY_OP_OUT(le, x <= y)
BFLOAT_BINARY_OP_OUT(lt, x < y)
BFLOAT_BINARY_OP_OUT(ge, x >= y)
BFLOAT_BINARY_OP_OUT(gt, x > y)
#endif
| candle/candle-metal-kernels/src/binary.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/binary.metal",
"repo_id": "candle",
"token_count": 1861
} | 46 |
use super::*;
use half::{bf16, f16};
use metal::{Buffer, Device, MTLResourceOptions};
use rand::prelude::SliceRandom;
use rand::thread_rng;
use rand::Rng;
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
let ptr = buffer.contents() as *const T;
assert!(!ptr.is_null());
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
slice.to_vec()
}
fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer {
let options = MTLResourceOptions::StorageModeManaged;
let ptr = data.as_ptr() as *const c_void;
let size = std::mem::size_of_val(data) as u64;
device.new_buffer_with_data(ptr, size, options)
}
fn device() -> Device {
Device::system_default().unwrap()
}
fn approx(v: Vec<f32>, digits: i32) -> Vec<f32> {
let b = 10f32.powi(digits);
v.iter().map(|t| f32::round(t * b) / b).collect()
}
fn approx_f16(v: Vec<f16>, digits: i32) -> Vec<f32> {
let b = 10f32.powi(digits);
v.iter().map(|t| f32::round(t.to_f32() * b) / b).collect()
}
fn approx_bf16(v: Vec<bf16>, digits: i32) -> Vec<f32> {
let b = 10f32.powi(digits);
v.iter().map(|t| f32::round(t.to_f32() * b) / b).collect()
}
fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let input = BufferOffset {
buffer: &input,
offset_in_bytes: 0,
};
let output = new_buffer(&device, v);
call_unary_contiguous(
&device,
command_buffer,
&kernels,
name,
v.len(),
input,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, v.len())
}
fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
let left = new_buffer(&device, x);
let right = new_buffer(&device, y);
let output = device.new_buffer(std::mem::size_of_val(x) as u64, options);
call_binary_contiguous(
&device,
command_buffer,
&kernels,
name,
x.len(),
BufferOffset::zero_offset(&left),
BufferOffset::zero_offset(&right),
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, x.len())
}
fn run_strided<T: Clone>(
v: &[T],
kernel: unary::strided::Kernel,
shape: &[usize],
strides: &[usize],
offset: usize,
) -> Vec<T> {
let device = device();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let input = BufferOffset {
buffer: &input,
offset_in_bytes: offset,
};
let output_b = new_buffer(&device, v);
let output = BufferOffset {
buffer: &output_b,
offset_in_bytes: 0,
};
let kernels = Kernels::new();
call_unary_strided(
&device,
command_buffer,
&kernels,
kernel,
shape,
input,
strides,
output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output_b, v.len())
}
#[test]
fn cos_f32() {
let v = vec![1.0f32, 2.0, 3.0];
let results = run(&v, unary::contiguous::cos::FLOAT);
let expected: Vec<_> = v.iter().map(|v| v.cos()).collect();
assert_eq!(approx(results, 4), vec![0.5403, -0.4161, -0.99]);
assert_eq!(approx(expected, 4), vec![0.5403, -0.4161, -0.99]);
let v = vec![1.0f32; 10_000];
let results = run(&v, unary::contiguous::cos::FLOAT);
let expected: Vec<_> = v.iter().map(|v| v.cos()).collect();
assert_eq!(approx(results, 4), vec![0.5403; 10_000]);
assert_eq!(approx(expected, 4), vec![0.5403; 10_000]);
}
#[test]
fn cos_f32_strided() {
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let shape = vec![6];
let strides = vec![1];
let offset = 0;
let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset);
let expected: Vec<_> = v.iter().map(|v| v.cos()).collect();
assert_eq!(
approx(results, 4),
vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602]
);
assert_eq!(
approx(expected, 4),
vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602]
);
// Contiguous
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let shape = vec![3, 2];
let strides = vec![2, 1];
let offset = 0;
let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset);
let expected: Vec<_> = v.iter().map(|v| v.cos()).collect();
assert_eq!(
approx(results, 4),
vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602]
);
assert_eq!(
approx(expected, 4),
vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602]
);
// Transposed
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let shape = vec![3, 2];
let strides = vec![1, 3];
let offset = 0;
let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset);
let expected: Vec<_> = v.iter().map(|v| v.cos()).collect();
assert_eq!(
approx(results, 4),
vec![0.5403, -0.6536, -0.4161, 0.2837, -0.99, 0.9602]
);
assert_eq!(
approx(expected, 4),
vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602]
);
// Very large
let v = vec![1.0f32; 10_000];
let shape = vec![2, 5_000];
let strides = vec![2, 1];
let offset = 0;
let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset);
let expected: Vec<_> = v.iter().map(|v| v.cos()).collect();
assert_eq!(approx(results, 4), vec![0.5403; 10_000]);
assert_eq!(approx(expected, 4), vec![0.5403; 10_000]);
}
#[test]
fn cos_strided_random() {
let v: Vec<_> = (0..10_000).map(|_| rand::random::<f32>()).collect();
let shape = vec![5_000, 2];
let strides = vec![1, 5_000];
let offset = 0;
let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset);
let expected: Vec<_> = v.iter().map(|v| v.cos()).collect();
assert_eq!(approx(vec![results[0]], 4), approx(vec![expected[0]], 4));
assert_eq!(
approx(vec![results[1]], 4),
approx(vec![expected[5_000]], 4)
);
assert_eq!(approx(vec![results[2]], 4), approx(vec![expected[1]], 4));
assert_eq!(
approx(vec![results[3]], 4),
approx(vec![expected[5_001]], 4)
);
assert_eq!(
approx(vec![results[5_000]], 4),
approx(vec![expected[2_500]], 4)
);
}
#[test]
fn gelu_f16() {
let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let expected: Vec<f32> = vec![-0.0, -0.16, 0.0, 0.84, 1.96, 3.0, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::HALF);
assert_eq!(approx_f16(results, 2), expected);
}
#[test]
fn gelu_f32() {
let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0];
let expected: Vec<f32> = vec![-0.0, -0.159, 0.0, 0.841, 1.955, 2.996, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::FLOAT);
assert_eq!(approx(results, 3), expected);
}
#[test]
fn silu_f16() {
let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let expected: Vec<f32> = vec![-0.0, -0.27, 0.0, 0.73, 1.76, 2.86, 10.0, 20.0];
let results = run(&v, unary::contiguous::silu::HALF);
assert_eq!(approx_f16(results, 2), expected);
}
#[test]
fn silu_f32() {
let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0];
let expected: Vec<f32> = vec![-0.0, -0.269, 0.0, 0.731, 1.762, 2.858, 10.0, 20.0];
let results = run(&v, unary::contiguous::silu::FLOAT);
assert_eq!(approx(results, 3), expected);
}
#[test]
fn binary_add_f32() {
let left = vec![1.0f32, 2.0, 3.0];
let right = vec![2.0f32, 3.1, 4.2];
let results = run_binary(&left, &right, binary::contiguous::add::FLOAT);
let expected: Vec<_> = left
.iter()
.zip(right.iter())
.map(|(&x, &y)| x + y)
.collect();
assert_eq!(approx(results, 4), vec![3.0f32, 5.1, 7.2]);
assert_eq!(approx(expected, 4), vec![3.0f32, 5.1, 7.2]);
}
#[test]
fn binary_ops_bf16() {
let lhs: Vec<bf16> = [1.1f32, 2.2, 3.3].into_iter().map(bf16::from_f32).collect();
let rhs: Vec<bf16> = [4.2f32, 5.5f32, 6.91f32]
.into_iter()
.map(bf16::from_f32)
.collect();
macro_rules! binary_op {
($opname:ident, $opexpr:expr) => {{
let results = run_binary(&lhs, &rhs, binary::contiguous::$opname::BFLOAT);
let expected: Vec<bf16> = lhs
.iter()
.zip(rhs.iter())
.map(|(x, y): (&bf16, &bf16)| $opexpr(*x, *y))
.collect();
assert_eq!(results, expected);
}};
}
binary_op!(add, |x, y| x + y);
binary_op!(sub, |x, y| x - y);
binary_op!(mul, |x, y| x * y);
binary_op!(div, |x, y| x / y);
binary_op!(min, |x: bf16, y| x.min(y));
binary_op!(max, |x: bf16, y| x.max(y));
}
fn run_cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let options = MTLResourceOptions::StorageModeManaged;
let size = (v.len() * std::mem::size_of::<U>()) as u64;
let output = device.new_buffer(size, options);
call_cast_contiguous(
&device,
command_buffer,
&kernels,
name,
v.len(),
BufferOffset::zero_offset(&input),
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, v.len())
}
#[test]
fn cast_f32() {
let v_f64 = [1.0f64, 2.0, 3.0];
let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect();
let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect();
let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect();
let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect();
let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect();
let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect();
// f32 -> f16
let results: Vec<half::f16> = run_cast(&v_f32, "cast_f32_f16");
assert_eq!(results, v_f16);
// f32 -> bf16
let results: Vec<bf16> = run_cast(&v_f32, "cast_f32_bf16");
assert_eq!(results, v_bf16);
// f32 -> u32
let results: Vec<u32> = run_cast(&v_f32, "cast_f32_u32");
assert_eq!(results, v_u32);
// f32 -> u8
let results: Vec<u8> = run_cast(&v_f32, "cast_f32_u8");
assert_eq!(results, v_u8);
// f32 -> i64
let results: Vec<i64> = run_cast(&v_f32, "cast_f32_i64");
assert_eq!(results, v_i64);
}
#[test]
fn cast_f16() {
let v_f64 = [1.0f64, 2.0, 3.0];
let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect();
let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect();
let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect();
let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect();
let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect();
let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect();
// f16 -> f32
let results: Vec<f32> = run_cast(&v_f16, "cast_f16_f32");
assert_eq!(results, v_f32);
// f16 -> bf16
let results: Vec<bf16> = run_cast(&v_f16, "cast_f16_bf16");
assert_eq!(results, v_bf16);
// f16 -> u32
let results: Vec<u32> = run_cast(&v_f16, "cast_f16_u32");
assert_eq!(results, v_u32);
// f16 -> u8
let results: Vec<u8> = run_cast(&v_f16, "cast_f16_u8");
assert_eq!(results, v_u8);
// f16 -> i64
let results: Vec<i64> = run_cast(&v_f16, "cast_f16_i64");
assert_eq!(results, v_i64);
}
#[test]
fn cast_bf16() {
let v_f64 = [1.0f64, 2.0, 3.0];
let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect();
let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect();
let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect();
let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect();
let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect();
let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect();
// bf16 -> f32
let results: Vec<f32> = run_cast(&v_bf16, "cast_bf16_f32");
assert_eq!(results, v_f32);
// bf16 -> f16
let results: Vec<f16> = run_cast(&v_bf16, "cast_bf16_f16");
assert_eq!(results, v_f16);
// bf16 -> u32
let results: Vec<u32> = run_cast(&v_bf16, "cast_bf16_u32");
assert_eq!(results, v_u32);
// bf16 -> u8
let results: Vec<u8> = run_cast(&v_bf16, "cast_bf16_u8");
assert_eq!(results, v_u8);
// bf16 -> i64
let results: Vec<i64> = run_cast(&v_bf16, "cast_bf16_i64");
assert_eq!(results, v_i64);
}
#[test]
fn cast_u32() {
let v_f64 = [1.0f64, 2.0, 3.0];
let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect();
let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect();
let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect();
let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect();
let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect();
let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect();
// u32 -> f32
let results: Vec<f32> = run_cast(&v_u32, "cast_u32_f32");
assert_eq!(results, v_f32);
// u32 -> f16
let results: Vec<f16> = run_cast(&v_u32, "cast_u32_f16");
assert_eq!(results, v_f16);
// u32 -> bf16
let results: Vec<bf16> = run_cast(&v_u32, "cast_u32_bf16");
assert_eq!(results, v_bf16);
// u32 -> u8
let results: Vec<u8> = run_cast(&v_u32, "cast_u32_u8");
assert_eq!(results, v_u8);
// u32 -> i64
let results: Vec<i64> = run_cast(&v_u32, "cast_u32_i64");
assert_eq!(results, v_i64);
}
#[test]
fn cast_u8() {
let v_f64 = [1.0f64, 2.0, 3.0];
let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect();
let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect();
let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect();
let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect();
let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect();
let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect();
// u8 -> f32
let results: Vec<f32> = run_cast(&v_u8, "cast_u8_f32");
assert_eq!(results, v_f32);
// u8 -> f16
let results: Vec<f16> = run_cast(&v_u8, "cast_u8_f16");
assert_eq!(results, v_f16);
// u8 -> bf16
let results: Vec<bf16> = run_cast(&v_u8, "cast_u8_bf16");
assert_eq!(results, v_bf16);
// u8 -> u32
let results: Vec<u32> = run_cast(&v_u8, "cast_u8_u32");
assert_eq!(results, v_u32);
// u8 -> i64
let results: Vec<i64> = run_cast(&v_u8, "cast_u8_i64");
assert_eq!(results, v_i64);
}
#[test]
fn cast_i64() {
let v_f64 = [1.0f64, 2.0, 3.0];
let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect();
let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect();
let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect();
let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect();
let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect();
let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect();
// i64 -> f32
let results: Vec<f32> = run_cast(&v_i64, "cast_i64_f32");
assert_eq!(results, v_f32);
// i64 -> f16
let results: Vec<f16> = run_cast(&v_i64, "cast_i64_f16");
assert_eq!(results, v_f16);
// i64 -> bf16
let results: Vec<bf16> = run_cast(&v_i64, "cast_i64_bf16");
assert_eq!(results, v_bf16);
// i64 -> u32
let results: Vec<u32> = run_cast(&v_i64, "cast_i64_u32");
assert_eq!(results, v_u32);
// i64 -> u8
let results: Vec<u8> = run_cast(&v_i64, "cast_i64_u8");
assert_eq!(results, v_u8);
}
fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let output = new_buffer(&device, v);
let size = v.len();
call_affine(
&device,
command_buffer,
&kernels,
"affine_f32",
size,
BufferOffset::zero_offset(&input),
&output,
mul as f32,
add as f32,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, v.len())
}
fn run_affine_strided<T: Clone>(
v: &[T],
shape: &[usize],
strides: &[usize],
mul: f64,
add: f64,
) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let output = new_buffer(&device, v);
call_affine_strided(
&device,
command_buffer,
&kernels,
"affine_f32_strided",
shape,
BufferOffset::zero_offset(&input),
strides,
&output,
mul as f32,
add as f32,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
let len: usize = shape.iter().product();
read_to_vec(&output, len)
}
#[test]
fn affine() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let mul = 1.5;
let add = 1.1;
let result = run_affine(&input, mul, add);
assert_eq!(result, vec![2.6, 4.1, 5.6, 7.1, 8.6, 10.1, 11.6, 13.1]);
let input = [1.0f32; 40_000];
let mul = 1.5;
let add = 1.1;
let result = run_affine(&input, mul, add);
assert_eq!(result, vec![2.6; 40_000]);
}
#[test]
fn affine_strided() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let mul = 1.5;
let add = 1.1;
let shape = [4];
let strides = [2];
let result = run_affine_strided(&input, &shape, &strides, mul, add);
// 1 on 2
assert_eq!(result, vec![2.6, 5.6, 8.6, 11.6]);
}
fn run_mlx_sort<T: Clone>(v: &[T], ncols: usize) -> Vec<u32> {
let nrows = v.len() / ncols;
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let indexes = vec![0u32; v.len()];
let output = new_buffer(&device, &indexes);
call_mlx_arg_sort(
&device,
command_buffer,
&kernels,
DType::F32,
nrows,
ncols,
BufferOffset::zero_offset(&input),
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, v.len())
}
#[test]
fn mlx_sort() {
use rand::SeedableRng;
use rand_distr::Distribution;
let input: Vec<_> = (0..8).map(|v| v as f32).collect();
let result = run_mlx_sort(&input, 4);
assert_eq!(result, [0, 1, 2, 3, 0, 1, 2, 3]);
let input: Vec<_> = (0..8).rev().map(|v| v as f32).collect();
let result = run_mlx_sort(&input, 4);
assert_eq!(result, [3, 2, 1, 0, 3, 2, 1, 0]);
let input: Vec<_> = (0..1000).rev().map(|v| v as f32).collect();
let result = run_mlx_sort(&input, 200);
let out: Vec<_> = (0..200).rev().collect();
assert_eq!(&result[..200], out);
assert_eq!(&result[200..400], out);
assert_eq!(&result[400..600], out);
assert_eq!(&result[600..800], out);
assert_eq!(&result[800..], out);
// Multi-block test
let ncols = 16000;
let mut rng = rand::rngs::StdRng::seed_from_u64(299792458);
let normal = rand_distr::Normal::new(0.0, 1.0).unwrap();
let input: Vec<f32> = (0..ncols * 16).map(|_| normal.sample(&mut rng)).collect();
let result = run_mlx_sort(&input, ncols);
for start in 0..16 {
let slice = &input[start * ncols..(start + 1) * ncols];
let result = &result[start * ncols..(start + 1) * ncols];
let mut perm: Vec<usize> = (0..ncols).collect();
perm.sort_by(|i1, i2| slice[*i1].total_cmp(&slice[*i2]));
let perm: Vec<_> = perm.into_iter().map(|v| v as u32).collect();
assert_eq!(perm, result);
}
}
#[test]
fn index_select() {
let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
let shape = [5, 2];
let stride = [2, 1];
let ids = [0u32, 4, 2];
let dim = 0;
let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f32");
assert_eq!(result, vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0]);
let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
let shape = [2, 5];
let stride = [1, 2];
let ids = [0u32, 1, 0];
let dim = 0;
let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f32");
assert_eq!(
result,
vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0f32, 2.0, 3.0, 4.0, 5.0]
);
}
#[test]
fn index_select_strided() {
let embedding = (0..16).map(|x| x as f32).collect::<Vec<_>>();
let shape = [2, 2];
let stride = [2, 4];
let ids = [0u32];
let dim = 0;
let result = run_index_select_strided(&embedding, &shape, &stride, &ids, dim, "is_u32_f32");
assert_eq!(result, vec![0.0, 4.0]);
}
#[test]
fn index_select_f16() {
let embedding: Vec<_> = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
.into_iter()
.map(f16::from_f32)
.collect();
let shape = [5, 2];
let stride = [2, 1];
let ids = [0u32, 4, 2];
let dim = 0;
let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f16");
assert_eq!(
approx_f16(result, 4),
vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0]
);
}
#[test]
fn index_select_is_u32_bf16() {
let embedding: Vec<bf16> = (1..=10).map(|x| bf16::from_f32(x as f32)).collect();
let shape = [5, 2];
let stride = [2, 1];
let ids = [0u32, 4, 2];
let dim = 0;
let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_bf16");
assert_eq!(
approx_bf16(result, 4),
vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0]
);
}
#[test]
fn index_select_is_u8_bf16() {
let embedding: Vec<bf16> = (1..=10).map(|x| bf16::from_f32(x as f32)).collect();
let shape = [5, 2];
let stride = [2, 1];
let ids = [0u8, 4, 2];
let dim = 0;
let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u8_bf16");
assert_eq!(
approx_bf16(result, 4),
vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0]
);
}
#[test]
fn index_select_dim1() {
let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
let shape = [5, 2];
let stride = [2, 1];
let ids = [0u32, 1, 0];
let dim = 1;
let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f32");
assert_eq!(
result,
vec![1.0f32, 2.0, 1.0, 3.0, 4.0, 3.0, 5.0, 6.0, 5.0, 7.0, 8.0f32, 7.0, 9.0, 10.0, 9.0]
);
}
fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
embeddings: &[T],
shape: &[usize],
stride: &[usize],
ids: &[I],
dim: usize,
name: &'static str,
) -> Vec<T> {
let device = Device::system_default().expect("no device found");
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let embeddings_buffer = new_buffer(&device, embeddings);
let ids_buffer = new_buffer(&device, ids);
let left_size: usize = shape[..dim].iter().product();
let right_size: usize = shape[dim + 1..].iter().product();
let dst_el = ids.len() * left_size * right_size;
let dst_buffer = new_buffer(&device, &vec![0.0f32; dst_el]);
let kernels = Kernels::new();
call_index_select(
&device,
command_buffer,
&kernels,
name,
shape,
ids.len(),
dim,
true,
shape,
stride,
BufferOffset::zero_offset(&embeddings_buffer),
BufferOffset::zero_offset(&ids_buffer),
&dst_buffer,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&dst_buffer, dst_el)
}
fn run_index_select_strided<T: Clone, I: Clone + std::fmt::Debug>(
embeddings: &[T],
shape: &[usize],
stride: &[usize],
ids: &[I],
dim: usize,
name: &'static str,
) -> Vec<T> {
let device = Device::system_default().expect("no device found");
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let embeddings_buffer = new_buffer(&device, embeddings);
let ids_buffer = new_buffer(&device, ids);
let left_size: usize = shape[..dim].iter().product();
let right_size: usize = shape[dim + 1..].iter().product();
let dst_el = ids.len() * left_size * right_size;
let dst_buffer = new_buffer(&device, &vec![0.0f32; dst_el]);
let kernels = Kernels::new();
call_index_select(
&device,
command_buffer,
&kernels,
name,
shape,
ids.len(),
dim,
false,
shape,
stride,
BufferOffset::zero_offset(&embeddings_buffer),
BufferOffset::zero_offset(&ids_buffer),
&dst_buffer,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&dst_buffer, dst_el)
}
#[test]
fn cos_f16() {
let v: Vec<f16> = [1.0f32, 2.0, 3.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let results = run(&v, unary::contiguous::cos::HALF);
let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect();
assert_eq!(approx_f16(results, 2), vec![0.54, -0.42, -0.99]);
assert_eq!(approx_f16(expected, 2), vec![0.54, -0.42, -0.99]);
}
fn run_reduce<T, U: Clone>(
v: &[T],
in_length: usize,
out_length: usize,
name: &'static str,
) -> Vec<U> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let options = MTLResourceOptions::StorageModeManaged;
let output = device.new_buffer((out_length * core::mem::size_of::<U>()) as u64, options);
let shape = vec![in_length];
match call_reduce_contiguous(
&device,
command_buffer,
&kernels,
name,
&shape,
out_length,
BufferOffset::zero_offset(&input),
&output,
) {
Ok(_) => {}
Err(e) => {
println!("{e}");
panic!();
}
}
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, out_length)
}
fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let output = new_buffer(&device, v);
call_last_softmax(
&device,
command_buffer,
&kernels,
name,
v.len(),
last_dim,
&input,
0,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, v.len())
}
const fn create_array<const N: usize>() -> [f32; N] {
let mut array: [f32; N] = [0.0; N];
let mut i = 1;
while i <= N {
array[i - 1] = i as f32;
i += 1;
}
array
}
const fn correct_sum<const N: usize, const D: usize>() -> [f32; D] {
let mut sum = 0;
let mut results: [f32; D] = [0.0; D];
let mut i = 1;
let mut j = 1;
while i <= N {
sum += i;
i += 1;
if i > j * N / D {
results[j - 1] = sum as f32;
j += 1;
sum = 0;
}
}
results
}
const fn correct_max<const N: usize, const D: usize>() -> [f32; D] {
let mut results: [f32; D] = [0.0; D];
let mut i = 1;
let mut j = 1;
while i <= N {
i += 1;
if i > j * (N / D) {
results[j - 1] = (i - 1) as f32;
j += 1;
}
}
results
}
fn correct_argmax<const N: usize, const D: usize>(arr: [f32; N]) -> [u32; D] {
let mut max = 0.0;
let mut max_index: u32 = 0;
let mut results: [u32; D] = [0; D];
let mut i = 0;
let mut j = 1;
while i <= N {
if i >= (j * N / D) {
results[j - 1] = max_index;
max = 0.0;
max_index = 0;
j += 1;
}
if i == N {
break;
}
if arr[i] > max {
max = arr[i];
max_index = i as u32;
}
i += 1;
}
results
}
fn reduce_sum_case<const N: usize, const D: usize>() {
let mut v = create_array::<N>();
if D == 1 {
// Hardens 1-dimensional test cases
v.shuffle(&mut thread_rng());
}
let results = run_reduce(&v, N, D, "fast_sum_f32");
assert_eq!(approx(results, 4), correct_sum::<N, D>());
}
fn reduce_max_case<const N: usize, const D: usize>() {
let mut v = create_array::<N>();
if D == 1 {
// Hardens 1-dimensional test cases
v.shuffle(&mut thread_rng());
}
let results = run_reduce(&v, N, D, "fast_max_f32");
assert_eq!(approx(results, 4), correct_max::<N, D>());
}
fn reduce_argmax_case<const N: usize, const D: usize>() {
let mut v = create_array::<N>();
if D == 1 {
// Hardens 1-dimensional test cases
v.shuffle(&mut thread_rng());
}
let results: Vec<u32> = run_reduce(&v, N, D, "fast_argmax_f32");
assert_eq!(results, correct_argmax::<N, D>(v));
}
#[test]
fn reduce_sum1() {
reduce_sum_case::<9, 1>();
reduce_sum_case::<6, 1>();
reduce_sum_case::<10, 1>();
reduce_sum_case::<64, 1>();
reduce_sum_case::<128, 1>();
reduce_sum_case::<256, 1>();
reduce_sum_case::<512, 1>();
reduce_sum_case::<1024, 1>();
reduce_sum_case::<2048, 1>();
reduce_sum_case::<4096, 1>();
}
#[test]
fn reduce_sum2() {
reduce_sum_case::<6, 2>();
reduce_sum_case::<10, 2>();
reduce_sum_case::<64, 2>();
reduce_sum_case::<128, 2>();
reduce_sum_case::<256, 2>();
reduce_sum_case::<512, 2>();
reduce_sum_case::<1024, 2>();
reduce_sum_case::<2048, 2>();
reduce_sum_case::<4096, 2>();
}
#[test]
fn reduce_max() {
reduce_max_case::<6, 1>();
reduce_max_case::<9, 1>();
reduce_max_case::<10, 1>();
reduce_max_case::<64, 1>();
reduce_max_case::<128, 1>();
reduce_max_case::<256, 1>();
reduce_max_case::<512, 1>();
reduce_max_case::<1024, 1>();
reduce_max_case::<2048, 1>();
reduce_max_case::<4096, 1>();
reduce_max_case::<6, 2>();
reduce_max_case::<10, 2>();
reduce_max_case::<64, 2>();
reduce_max_case::<128, 2>();
reduce_max_case::<256, 2>();
reduce_max_case::<512, 2>();
reduce_max_case::<1024, 2>();
reduce_max_case::<2048, 2>();
reduce_max_case::<4096, 2>();
reduce_max_case::<6, 3>();
reduce_max_case::<10, 3>();
reduce_max_case::<64, 3>();
reduce_max_case::<128, 3>();
reduce_max_case::<256, 3>();
reduce_max_case::<512, 3>();
reduce_max_case::<1024, 3>();
reduce_max_case::<2048, 3>();
reduce_max_case::<4096, 3>();
}
#[test]
fn reduce_argmax() {
reduce_argmax_case::<6, 1>();
reduce_argmax_case::<9, 1>();
reduce_argmax_case::<10, 1>();
reduce_argmax_case::<64, 1>();
reduce_argmax_case::<128, 1>();
reduce_argmax_case::<256, 1>();
reduce_argmax_case::<512, 1>();
reduce_argmax_case::<1024, 1>();
reduce_argmax_case::<2048, 1>();
}
#[test]
fn reduce_argmax2() {
reduce_argmax_case::<6, 2>();
reduce_argmax_case::<10, 2>();
reduce_argmax_case::<64, 2>();
reduce_argmax_case::<128, 2>();
reduce_argmax_case::<256, 2>();
reduce_argmax_case::<512, 2>();
reduce_argmax_case::<1024, 2>();
reduce_argmax_case::<2048, 2>();
reduce_argmax_case::<4096, 2>();
}
#[test]
fn softmax() {
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_f32");
assert_eq!(
approx(results, 4),
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
);
let last_dim = 4096;
let n = 200;
let mut v = vec![0.0; n * last_dim];
for i in 0..n {
v[i * last_dim] = 20.0;
}
let results = run_softmax(&v, last_dim, "softmax_f32");
let results = approx(results, 4);
assert_eq!(
results.iter().map(|&s| s.round() as usize).sum::<usize>(),
n
);
assert_eq!(results[0], 1.0);
assert_eq!(results[1], 0.0);
assert_eq!(results[last_dim], 1.0);
assert_eq!(results[2 * last_dim], 1.0);
let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0];
let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_f32");
assert_eq!(
approx(results, 4),
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
);
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let last_dim = 3;
let results = run_softmax(&v, last_dim, "softmax_f32");
assert_eq!(
approx(results, 4),
vec![0.0900, 0.2447, 0.6652, 0.0900, 0.2447, 0.6652]
);
let v = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect::<Vec<_>>();
let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_f16");
assert_eq!(
approx_f16(results, 4),
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2332, 0.6338]
);
let v = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]
.iter()
.map(|v| bf16::from_f32(*v))
.collect::<Vec<_>>();
let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_bf16");
assert_eq!(
approx_bf16(results, 4),
vec![0.0043, 0.0116, 0.0315, 0.0859, 0.2324, 0.6328]
);
}
#[allow(clippy::too_many_arguments)]
fn run_where_cond<I: Clone, T: Clone>(
shape: &[usize],
cond: &[I],
(cond_stride, cond_offset): (Vec<usize>, usize),
left_true: &[T],
(left_stride, left_offset): (Vec<usize>, usize),
right_false: &[T],
(_right_stride, _right_offset): (Vec<usize>, usize),
name: &'static str,
) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
let length = cond.len();
let cond = device.new_buffer_with_data(
cond.as_ptr() as *const core::ffi::c_void,
std::mem::size_of_val(cond) as u64,
options,
);
let left = device.new_buffer_with_data(
left_true.as_ptr() as *const core::ffi::c_void,
(length * core::mem::size_of::<T>()) as u64,
options,
);
let right = device.new_buffer_with_data(
right_false.as_ptr() as *const core::ffi::c_void,
(length * core::mem::size_of::<T>()) as u64,
options,
);
let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options);
let cond = BufferOffset {
buffer: &cond,
offset_in_bytes: cond_offset,
};
let left = BufferOffset {
buffer: &left,
offset_in_bytes: left_offset,
};
let right = BufferOffset {
buffer: &right,
offset_in_bytes: cond_offset,
};
call_where_cond_strided(
&device,
command_buffer,
&kernels,
name,
shape,
cond,
&cond_stride,
left,
&left_stride,
right,
&cond_stride,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, length)
}
#[test]
fn where_cond() {
let shape = vec![6];
let cond = vec![0u8, 1, 0, 0, 1, 1];
let cond_l = (vec![1], 0);
let left_true = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let left_l = (vec![1], 0);
let right_false = vec![-1.0f32, -2.0, -3.0, -4.0, -5.0, -6.0];
let right_l = (vec![1], 0);
let results = run_where_cond(
&shape,
&cond,
cond_l,
&left_true,
left_l,
&right_false,
right_l,
"where_u8_f32",
);
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
}
#[test]
fn where_cond_u32_f32() {
let shape = vec![6];
let cond = vec![0u32, 1, 0, 0, 1, 1];
let cond_l = (vec![1], 0);
let left_true = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let left_l = (vec![1], 0);
let right_false = vec![-1.0f32, -2.0, -3.0, -4.0, -5.0, -6.0];
let right_l = (vec![1], 0);
let results = run_where_cond(
&shape,
&cond,
cond_l,
&left_true,
left_l,
&right_false,
right_l,
"where_u32_f32",
);
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
}
#[allow(clippy::too_many_arguments)]
fn run_mlx_gemm<T: Clone>(
dtype: GemmDType,
(b, m, n, k): (usize, usize, usize, usize),
lhs: &[T],
lhs_stride: &[usize],
lhs_offset: usize,
rhs: &[T],
rhs_stride: &[usize],
rhs_offset: usize,
) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
let lhs = device.new_buffer_with_data(
lhs.as_ptr() as *const core::ffi::c_void,
std::mem::size_of_val(lhs) as u64,
options,
);
let rhs = device.new_buffer_with_data(
rhs.as_ptr() as *const core::ffi::c_void,
std::mem::size_of_val(rhs) as u64,
options,
);
let length = b * m * n;
let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options);
call_mlx_gemm(
&device,
command_buffer,
&kernels,
dtype,
(b, m, n, k),
lhs_stride,
lhs_offset,
&lhs,
rhs_stride,
rhs_offset,
&rhs,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, length)
}
#[test]
fn mlx_gemm() {
let (b, m, n, k) = (1, 2, 4, 3);
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_mlx_gemm(
GemmDType::F32,
(b, m, n, k),
&lhs,
&[m * k, k, 1],
0,
&rhs,
&[n * k, n, 1],
0,
);
assert_eq!(
approx(results, 4),
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
);
let (b, m, n, k) = (2, 2, 4, 3);
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_mlx_gemm(
GemmDType::F32,
(b, m, n, k),
&lhs,
&[m * k, k, 1],
0,
&rhs,
&[n * k, n, 1],
0,
);
assert_eq!(
approx(results, 4),
vec![
20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0, 344.0, 365.0, 386.0, 407.0, 488.0,
518.0, 548.0, 578.0
]
);
// OFFSET
let (b, m, n, k) = (2, 2, 4, 3);
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
// Manually set batch_size=1 and offset 12 elements * 4 the number of bytes for f32
let results = run_mlx_gemm(
GemmDType::F32,
(1, m, n, k),
&lhs,
&[m * k, k, 1],
0,
&rhs,
&[n * k, n, 1],
12 * 4,
);
assert_eq!(
approx(results, 4),
vec![56.0, 59.0, 62.0, 65.0, 200.0, 212.0, 224.0, 236.0]
);
// bgemm sanity test
{
let (b, m, n, k) = (1, 2, 4, 3);
let lhs: Vec<bf16> = (0..b * m * k).map(|f| bf16::from_f32(f as f32)).collect();
let rhs: Vec<bf16> = (0..b * n * k).map(|f| bf16::from_f32(f as f32)).collect();
let results = run_mlx_gemm(
GemmDType::BF16,
(b, m, n, k),
&lhs,
&[m * k, k, 1],
0,
&rhs,
&[n * k, n, 1],
0,
);
assert_eq!(
approx_bf16(results, 4),
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
);
}
{
// hgemm sanity test
let (b, m, n, k) = (1, 2, 4, 3);
let lhs: Vec<f16> = (0..b * m * k).map(|f| f16::from_f32(f as f32)).collect();
let rhs: Vec<f16> = (0..b * n * k).map(|f| f16::from_f32(f as f32)).collect();
let results = run_mlx_gemm(
GemmDType::F16,
(b, m, n, k),
&lhs,
&[m * k, k, 1],
0,
&rhs,
&[n * k, n, 1],
0,
);
assert_eq!(
approx_f16(results, 4),
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
);
}
}
fn run_random<T: Clone>(name: &'static str, seed: u32, length: usize, a: f32, b: f32) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
let output = device.new_buffer((length * core::mem::size_of::<T>()) as NSUInteger, options);
let seed = device.new_buffer_with_data(
&seed as *const u32 as *const core::ffi::c_void,
std::mem::size_of::<u32>() as NSUInteger,
options,
);
if name.starts_with("rand_uniform") {
call_random_uniform(
&device,
command_buffer,
&kernels,
name,
a,
b,
length,
&seed,
&output,
)
.unwrap();
} else {
call_random_normal(
&device,
command_buffer,
&kernels,
name,
a,
b,
length,
&seed,
&output,
)
.unwrap();
}
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, length)
}
#[test]
fn random() {
fn calc_mean(data: &[f32]) -> f32 {
let sum = data.iter().sum::<f32>();
let count = data.len();
assert!(count > 0);
sum / count as f32
}
fn calc_stddev(data: &[f32]) -> f32 {
let mean = calc_mean(data);
let count = data.len();
assert!(count > 0);
let variance = data
.iter()
.map(|value| {
let diff = mean - *value;
diff * diff
})
.sum::<f32>()
/ count as f32;
variance.sqrt()
}
let shape = [1024, 10];
let length = shape.iter().product::<usize>();
let seed = 299792458;
let min = -30.0;
let max = 30.0;
let mean = 100.0;
let stddev = 50.0;
macro_rules! validate_random {
($type:ty) => {
let results: Vec<f32> = run_random::<$type>(
concat!("rand_uniform_", stringify!($type)),
seed,
length,
min,
max,
)
.into_iter()
.map(f32::from)
.collect();
results.iter().for_each(|v| {
assert!(*v >= min && *v <= max);
});
assert!(calc_mean(&results) > -1.0 && calc_mean(&results) < 1.0);
let results: Vec<f32> = run_random::<$type>(
concat!("rand_normal_", stringify!($type)),
seed,
length,
mean,
stddev,
)
.into_iter()
.map(f32::from)
.collect();
assert!((calc_mean(&results) - mean).abs() < mean / 10.0);
assert!((calc_stddev(&results) - stddev).abs() < stddev / 10.0);
};
}
validate_random!(f32);
validate_random!(f16);
validate_random!(bf16);
}
fn run_scatter_add<T: Clone, I: Clone + std::fmt::Debug>(
input: &[T],
ids: &[I],
shape: &[usize],
dim: usize,
name: &'static str,
) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
let input_buffer = new_buffer(&device, input);
let ids_buffer = new_buffer(&device, ids);
let output = device.new_buffer(std::mem::size_of_val(input) as u64, options);
call_scatter(
&device,
command_buffer,
&kernels,
name,
shape,
shape,
dim,
BufferOffset::zero_offset(&input_buffer),
BufferOffset::zero_offset(&ids_buffer),
BufferOffset::zero_offset(&output),
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, input.len())
}
#[test]
fn scatter_add() {
let ids_u8 = [0u8, 0, 1, 0, 2, 2, 3, 3];
let ids_u32 = [0u32, 0, 1, 0, 2, 2, 3, 3];
let ids_i64 = [0i64, 0, 1, 0, 2, 2, 3, 3];
let input_f32 = [5.0f32, 1.0, 7.0, 2.0, 3.0, 2.0, 1.0, 3.0];
let input_f16 = input_f32
.iter()
.map(|v| f16::from_f32(*v))
.collect::<Vec<_>>();
let input_bf16 = input_f32
.iter()
.map(|v| bf16::from_f32(*v))
.collect::<Vec<_>>();
let output_dim1_f32 = vec![8.0, 7.0, 5.0, 4.0, 0.0, 0.0, 0.0, 0.0];
let output_dim1_f16 = output_dim1_f32
.iter()
.map(|v| f16::from_f32(*v))
.collect::<Vec<_>>();
let output_dim1_bf16 = output_dim1_f32
.iter()
.map(|v| bf16::from_f32(*v))
.collect::<Vec<_>>();
let output_dim2_f32 = vec![5.0, 3.0, 7.0, 0.0, 3.0, 2.0, 1.0, 3.0];
let output_dim2_f16 = output_dim2_f32
.iter()
.map(|v| f16::from_f32(*v))
.collect::<Vec<_>>();
let output_dim2_bf16 = output_dim2_f32
.iter()
.map(|v| bf16::from_f32(*v))
.collect::<Vec<_>>();
for (shape, output_f32, output_f16, output_bf16) in [
(vec![8], output_dim1_f32, output_dim1_f16, output_dim1_bf16),
(
vec![4, 2],
output_dim2_f32,
output_dim2_f16,
output_dim2_bf16,
),
] {
for results in [
run_scatter_add(&input_f32, &ids_u8, &shape, 0, "sa_u8_f32"),
run_scatter_add(&input_f32, &ids_u32, &shape, 0, "sa_u32_f32"),
run_scatter_add(&input_f32, &ids_i64, &shape, 0, "sa_i64_f32"),
] {
assert_eq!(results, output_f32);
}
for results in [
run_scatter_add(&input_f16, &ids_u8, &shape, 0, "sa_u8_f16"),
run_scatter_add(&input_f16, &ids_u32, &shape, 0, "sa_u32_f16"),
run_scatter_add(&input_f16, &ids_i64, &shape, 0, "sa_i64_f16"),
] {
assert_eq!(results, output_f16);
}
for results in [
run_scatter_add(&input_bf16, &ids_u8, &shape, 0, "sa_u8_bf16"),
run_scatter_add(&input_bf16, &ids_u32, &shape, 0, "sa_u32_bf16"),
run_scatter_add(&input_bf16, &ids_i64, &shape, 0, "sa_i64_bf16"),
] {
assert_eq!(results, output_bf16);
}
}
}
fn run_index_add<T: Clone, I: Clone + std::fmt::Debug>(
left: &[T],
right: &[T],
indices: &[I],
shape: &[usize],
dim: usize,
name: &'static str,
) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input_buffer = new_buffer(&device, right);
let output = new_buffer(&device, left);
let indices_buffer = new_buffer(&device, indices);
call_index_add(
&device,
command_buffer,
&kernels,
name,
shape,
shape,
shape,
dim,
BufferOffset::zero_offset(&input_buffer),
BufferOffset::zero_offset(&indices_buffer),
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, left.len())
}
#[test]
fn index_add() {
let left = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let right = vec![1.0f32, 1.0, 1.0, 1.0, 1.0, 1.0];
let indices = vec![0u32, 1, 0, 1, 0, 1];
let shape = vec![6];
// u32, f32
{
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u32_f32");
assert_eq!(results, vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// u32, f16
{
let left = left.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>();
let right = right.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u32_f16");
assert_eq!(approx_f16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// u32, bf16
{
let left = left.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>();
let right = right.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u32_bf16");
assert_eq!(approx_bf16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// u8, f32
{
let indices = indices.iter().map(|v| *v as u8).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u8_f32");
assert_eq!(results, vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// u8, f16
{
let indices = indices.iter().map(|v| *v as u8).collect::<Vec<_>>();
let left = left.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>();
let right = right.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u8_f16");
assert_eq!(approx_f16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// u8, bf16
{
let indices = indices.iter().map(|v| *v as u8).collect::<Vec<_>>();
let left = left.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>();
let right = right.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u8_bf16");
assert_eq!(approx_bf16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// i64, f32
{
let indices = indices.iter().map(|v| *v as i64).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_i64_f32");
assert_eq!(results, vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// i64, f16
{
let indices = indices.iter().map(|v| *v as i64).collect::<Vec<_>>();
let left = left.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>();
let right = right.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_i64_f16");
assert_eq!(approx_f16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
// i64, bf16
{
let indices = indices.iter().map(|v| *v as i64).collect::<Vec<_>>();
let left = left.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>();
let right = right.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>();
let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_i64_bf16");
assert_eq!(approx_bf16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]);
}
}
fn run_pool2d<T: Clone>(
v: &[T],
(w_k, h_k): (usize, usize),
(w_stride, h_stride): (usize, usize),
shape: &[usize],
strides: &[usize],
name: &'static str,
) -> Vec<T> {
let device = device();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let out_w = (shape[2] - w_k) / w_stride + 1;
let out_h = (shape[3] - h_k) / h_stride + 1;
let dst_el = out_w * out_h * shape[0] * shape[1];
let input = new_buffer(&device, v);
let output = new_buffer(&device, &vec![0.0f32; dst_el]);
let kernels = Kernels::new();
call_pool2d(
&device,
command_buffer,
&kernels,
name,
shape,
strides,
out_w,
out_h,
w_k,
h_k,
w_stride,
h_stride,
&input,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, dst_el)
}
#[test]
fn max_pool2d_f32() {
// kernel 2 stride 1
let v: Vec<f32> = (0..16).map(|v| v as f32).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_f32",
);
let expected = vec![5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 13.0, 14.0, 15.0];
assert_eq!(results, expected);
// kernel 2 stride 2
let v: Vec<f32> = (0..16).map(|v| v as f32).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 2;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_f32",
);
let expected = vec![5.0, 7.0, 13.0, 15.0];
assert_eq!(results, expected);
}
#[test]
fn max_pool2d_f16() {
// kernel 2 stride 1
let v: Vec<half::f16> = (0..16).map(|v| half::f16::from_f32(v as f32)).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_f16",
);
let expected = [5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 13.0, 14.0, 15.0]
.iter()
.map(|v| half::f16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
// kernel 2 stride 2
let v: Vec<half::f16> = (0..16).map(|v| half::f16::from_f32(v as f32)).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 2;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_f16",
);
let expected = [5.0, 7.0, 13.0, 15.0]
.iter()
.map(|v| half::f16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
}
#[test]
fn max_pool2d_bf16() {
// kernel 2 stride 1
let v: Vec<half::bf16> = (0..16).map(|v| half::bf16::from_f32(v as f32)).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_bf16",
);
let expected = [5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 13.0, 14.0, 15.0]
.iter()
.map(|v| half::bf16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
// kernel 2 stride 2
let v: Vec<half::bf16> = (0..16).map(|v| half::bf16::from_f32(v as f32)).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 2;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_bf16",
);
let expected = [5.0, 7.0, 13.0, 15.0]
.iter()
.map(|v| half::bf16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
}
#[test]
fn max_pool2d_u8() {
// kernel 2 stride 1
let v: Vec<u8> = (0..16).map(|v| v as u8).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_u8",
);
let expected = vec![5, 6, 7, 9, 10, 11, 13, 14, 15];
assert_eq!(results, expected);
// kernel 2 stride 2
let v: Vec<u8> = (0..16).map(|v| v as u8).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 2;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_u8",
);
let expected = vec![5, 7, 13, 15];
assert_eq!(results, expected);
}
#[test]
fn max_pool2d_u32() {
// kernel 2 stride 1
let v: Vec<u32> = (0..16).map(|v| v as u32).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_u32",
);
let expected = vec![5, 6, 7, 9, 10, 11, 13, 14, 15];
assert_eq!(results, expected);
// kernel 2 stride 2
let v: Vec<u32> = (0..16).map(|v| v as u32).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 2;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"max_pool2d_u32",
);
let expected = vec![5, 7, 13, 15];
assert_eq!(results, expected);
}
#[test]
fn avg_pool2d_f32() {
// kernel 2 stride 1
let v: Vec<f32> = (0..16).map(|v| v as f32).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"avg_pool2d_f32",
);
let expected = vec![
2.5000, 3.5000, 4.5000, 6.5000, 7.5000, 8.5000, 10.5000, 11.5000, 12.5000,
];
assert_eq!(results, expected);
}
#[test]
fn avg_pool2d_f16() {
// kernel 2 stride 1
let v: Vec<f16> = (0..16).map(|v| f16::from_f32(v as f32)).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"avg_pool2d_f16",
);
let expected = [
2.5000, 3.5000, 4.5000, 6.5000, 7.5000, 8.5000, 10.5000, 11.5000, 12.5000,
]
.iter()
.map(|v| f16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
}
#[test]
fn avg_pool2d_bf16() {
// kernel 2 stride 1
let v: Vec<bf16> = (0..16).map(|v| bf16::from_f32(v as f32)).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"avg_pool2d_bf16",
);
let expected = [
2.5000, 3.5000, 4.5000, 6.5000, 7.5000, 8.5000, 10.5000, 11.5000, 12.5000,
]
.iter()
.map(|v| bf16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
}
#[test]
fn avg_pool2d_u8() {
// kernel 2 stride 1
let v: Vec<u8> = (0..16).map(|v| v as u8).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"avg_pool2d_u8",
);
let expected = vec![2, 3, 4, 6, 7, 8, 10, 11, 12];
assert_eq!(results, expected);
}
#[test]
fn avg_pool2d_u32() {
// kernel 2 stride 1
let v: Vec<u32> = (0..16).map(|v| v as u32).collect();
let shape = vec![1, 1, 4, 4];
let strides = vec![16, 16, 4, 1];
let kernel = 2;
let stride = 1;
let results = run_pool2d(
&v,
(kernel, kernel),
(stride, stride),
&shape,
&strides,
"avg_pool2d_u32",
);
let expected = vec![2, 3, 4, 6, 7, 8, 10, 11, 12];
assert_eq!(results, expected);
}
#[allow(clippy::too_many_arguments)]
fn run_conv_transpose1d<T: Clone>(
input: &[T],
input_shape: &[usize],
input_stride: &[usize],
kernel: &[T],
kernel_shape: &[usize],
kernel_stride: &[usize],
dilation: usize,
stride: usize,
padding: usize,
out_padding: usize,
name: &'static str,
) -> Vec<T> {
let device = device();
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let c_out = kernel_shape[1];
let k_size = kernel_shape[2];
let b_size = input_shape[0];
let l_in = input_shape[2];
let l_out = (l_in - 1) * stride - 2 * padding + dilation * (k_size - 1) + out_padding + 1;
let dst_el = c_out * l_out * b_size;
let input = new_buffer(&device, input);
let kernel = new_buffer(&device, kernel);
let output = new_buffer(&device, &vec![0.0f32; dst_el]);
let kernels = Kernels::new();
call_conv_transpose1d(
&device,
command_buffer,
&kernels,
name,
dilation,
stride,
padding,
out_padding,
c_out,
l_out,
b_size,
input_shape,
input_stride,
kernel_shape,
kernel_stride,
&input,
0,
&kernel,
0,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, dst_el)
}
#[test]
fn conv_transpose1d_f32() {
let input = vec![1.0f32, 2.0, 3.0, 4.0];
let input_shape = &[1, 1, 4];
let input_stride = &[4, 4, 1];
let kernel = vec![1.0f32, 2.0, 3.0, 4.0];
let kernel_shape = &[1, 1, 4];
let kernel_stride = &[4, 4, 1];
let results = run_conv_transpose1d(
&input,
input_shape,
input_stride,
&kernel,
kernel_shape,
kernel_stride,
1,
1,
0,
0,
"conv_transpose1d_f32",
);
let expected = vec![1., 4., 10., 20., 25., 24., 16.];
assert_eq!(results, expected);
}
#[test]
fn conv_transpose1d_f16() {
let input: Vec<f16> = [1.0, 2.0, 3.0, 4.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let input_shape = &[1, 1, 4];
let input_stride = &[4, 4, 1];
let kernel: Vec<f16> = [1.0, 2.0, 3.0, 4.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let kernel_shape = &[1, 1, 4];
let kernel_stride = &[4, 4, 1];
let results = run_conv_transpose1d(
&input,
input_shape,
input_stride,
&kernel,
kernel_shape,
kernel_stride,
1,
1,
0,
0,
"conv_transpose1d_f16",
);
let expected = [1., 4., 10., 20., 25., 24., 16.]
.iter()
.map(|v| f16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
}
#[test]
fn conv_transpose1d_bf16() {
let input: Vec<bf16> = [1.0, 2.0, 3.0, 4.0]
.iter()
.map(|v| bf16::from_f32(*v))
.collect();
let input_shape = &[1, 1, 4];
let input_stride = &[4, 4, 1];
let kernel: Vec<bf16> = [1.0, 2.0, 3.0, 4.0]
.iter()
.map(|v| bf16::from_f32(*v))
.collect();
let kernel_shape = &[1, 1, 4];
let kernel_stride = &[4, 4, 1];
let results = run_conv_transpose1d(
&input,
input_shape,
input_stride,
&kernel,
kernel_shape,
kernel_stride,
1,
1,
0,
0,
"conv_transpose1d_bf16",
);
let expected = [1., 4., 10., 20., 25., 24., 16.]
.iter()
.map(|v| bf16::from_f32(*v))
.collect::<Vec<_>>();
assert_eq!(results, expected);
}
#[test]
fn conv_transpose1d_u8() {
let input: Vec<u8> = vec![1, 2, 3, 4];
let input_shape = &[1, 1, 4];
let input_stride = &[4, 4, 1];
let kernel: Vec<u8> = vec![1, 2, 3, 4];
let kernel_shape = &[1, 1, 4];
let kernel_stride = &[4, 4, 1];
let results = run_conv_transpose1d(
&input,
input_shape,
input_stride,
&kernel,
kernel_shape,
kernel_stride,
1,
1,
0,
0,
"conv_transpose1d_u8",
);
let expected = vec![1, 4, 10, 20, 25, 24, 16];
assert_eq!(results, expected);
}
#[test]
fn conv_transpose1d_u32() {
let input: Vec<u32> = vec![1, 2, 3, 4];
let input_shape = &[1, 1, 4];
let input_stride = &[4, 4, 1];
let kernel: Vec<u32> = vec![1, 2, 3, 4];
let kernel_shape = &[1, 1, 4];
let kernel_stride = &[4, 4, 1];
let results = run_conv_transpose1d(
&input,
input_shape,
input_stride,
&kernel,
kernel_shape,
kernel_stride,
1,
1,
0,
0,
"conv_transpose1d_u32",
);
let expected = vec![1, 4, 10, 20, 25, 24, 16];
assert_eq!(results, expected);
}
#[test]
fn const_fill() {
fn constant_fill<T: Clone + EncoderParam>(name: &'static str, len: usize, value: T) -> Vec<T> {
let dev = device();
let kernels = Kernels::new();
let command_queue = dev.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let buffer = dev.new_buffer(
(len * std::mem::size_of::<T>()) as u64,
MTLResourceOptions::StorageModePrivate,
);
call_const_fill(&dev, command_buffer, &kernels, name, len, &buffer, value).unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec::<T>(&buffer, len)
}
fn test<T: Clone + Copy + EncoderParam + PartialEq + std::fmt::Debug, F: FnOnce(f32) -> T>(
name: &'static str,
f: F,
) {
let len = rand::thread_rng().gen_range(2..16) * rand::thread_rng().gen_range(4..16);
let value = rand::thread_rng().gen_range(1. ..19.);
let value = f(value);
let v = constant_fill::<T>(name, len, value);
assert_eq!(v, vec![value; len])
}
test::<u8, _>("fill_u8", |v| v as u8);
test::<u32, _>("fill_u32", |v| v as u32);
test::<i64, _>("fill_i64", |v| v as i64);
test::<f16, _>("fill_f16", f16::from_f32);
test::<bf16, _>("fill_bf16", bf16::from_f32);
test::<f32, _>("fill_f32", |v| v);
}
| candle/candle-metal-kernels/src/tests.rs/0 | {
"file_path": "candle/candle-metal-kernels/src/tests.rs",
"repo_id": "candle",
"token_count": 35346
} | 47 |
/// This example contains some simple benchmarks so that it's easy to run them in perf etc.
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::quantized::GgmlType;
use candle::{CpuStorage, Device, Layout, Module, Result, Shape, Tensor, D};
use clap::{Parser, Subcommand};
const CHECK_CONV2D: bool = false;
trait Benchmark {
type PreProcessData;
type RunResult;
fn preprocess() -> Result<Self::PreProcessData>;
fn run_one(_: &Self::PreProcessData) -> Result<Self::RunResult>;
const ITERS: usize;
}
struct Im2Col {
h_k: usize,
w_k: usize,
stride: usize,
dilation: usize,
padding: usize,
}
impl Im2Col {
fn hw_out(&self, h: usize, w: usize) -> (usize, usize) {
let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1;
let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1;
(h_out, w_out)
}
}
impl candle::CustomOp1 for Im2Col {
fn name(&self) -> &'static str {
"im2col"
}
fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> {
let &Self {
h_k,
w_k,
stride,
dilation,
padding,
} = self;
let (b, c, h, w) = layout.shape().dims4()?;
let (h_out, w_out) = self.hw_out(h, w);
let slice = storage.as_slice::<f32>()?;
let src = &slice[layout.start_offset()..];
let mut dst = vec![0f32; b * h_out * w_out * c * h_k * w_k];
let (src_s0, src_s1, src_s2, src_s3) = {
let s = layout.stride();
(s[0], s[1], s[2], s[3])
};
// TODO: provide specialized kernels for the common use cases.
// - h_k = w_k = 1
// - padding = 0
// - stride = 1
// - dilation = 1
for b_idx in 0..b {
let src_idx = b_idx * src_s0;
let dst_idx = b_idx * h_out * w_out * c * h_k * w_k;
for h_idx in 0..h_out {
let dst_idx = dst_idx + h_idx * w_out * c * h_k * w_k;
for w_idx in 0..w_out {
let dst_idx = dst_idx + w_idx * c * h_k * w_k;
for c_idx in 0..c {
let dst_idx = dst_idx + c_idx * h_k * w_k;
let src_idx = c_idx * src_s1 + src_idx;
for h_k_idx in 0..h_k {
let src_h = h_idx * stride + h_k_idx * dilation;
if padding != 0 && (src_h < padding || src_h >= h + padding) {
continue;
}
let src_h = src_h - padding;
let src_idx = src_idx + src_h * src_s2;
let dst_idx = dst_idx + h_k_idx * w_k;
for w_k_idx in 0..w_k {
let src_w = w_idx * stride + w_k_idx * dilation;
if padding != 0 && (src_w < padding || src_w >= w + padding) {
continue;
}
let src_w = src_w - padding;
let src_idx = src_idx + src_w * src_s3;
let dst_idx = dst_idx + w_k_idx;
dst[dst_idx] = src[src_idx]
}
}
}
}
}
}
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, (b * h_out * w_out, c * h_k * w_k).into()))
}
}
// Conv1d example as used in whisper.
struct Conv1d;
impl Benchmark for Conv1d {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let inp = Tensor::randn(0f32, 1., (1, 384, 3000), &Device::Cpu)?;
let w = Tensor::randn(0f32, 1., (384, 384, 3), &Device::Cpu)?;
Ok((inp, w))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.conv1d(&d.1, 0, 1, 1, 1)
}
const ITERS: usize = 5;
}
// Conv2d example as used in stable-diffusion.
struct Conv2d;
impl Benchmark for Conv2d {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?;
let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?;
Ok((inp, w))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.conv2d(&d.1, 0, 1, 1, 1)
}
const ITERS: usize = 5;
}
// Conv2d example as used in stable-diffusion, im2col implementation.
struct Conv2dIm2Col;
impl Benchmark for Conv2dIm2Col {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?;
let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?;
Ok((inp, w))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
// d.0.conv2d(&d.1, 0, 1, 1, 1)
let (b, _, h, w) = d.0.dims4()?;
let (_, _, h_k, w_k) = d.1.dims4()?;
let op = Im2Col {
h_k,
w_k,
stride: 1,
dilation: 1,
padding: 0,
};
let (h_out, w_out) = op.hw_out(h, w);
let col = d.0.apply_op1_no_bwd(&op)?;
let res = col.matmul(&d.1.flatten_from(1)?.t()?)?;
let res = res
.reshape((b, h_out, w_out, ()))?
.permute((0, 3, 1, 2))?
.contiguous()?;
if CHECK_CONV2D {
let res2 = d.0.conv2d(&d.1, op.padding, op.stride, op.dilation, 1);
let diff = (&res - res2)?.sqr()?.mean_all()?;
println!("{diff}");
}
Ok(res)
}
const ITERS: usize = 5;
}
struct MatMul;
impl Benchmark for MatMul {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let lhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?;
let rhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?;
Ok((lhs, rhs))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.matmul(&d.1)
}
const ITERS: usize = 100;
}
struct MatVec;
impl Benchmark for MatVec {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let lhs = Tensor::randn(0f32, 1., (1024 * 4, 1024 * 4), &Device::Cpu)?;
let rhs = Tensor::randn(0f32, 1., (1024 * 4, 1), &Device::Cpu)?;
Ok((lhs, rhs))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.matmul(&d.1)
}
const ITERS: usize = 100;
}
// This benchmark is similar to:
// https://github.com/ggerganov/llama.cpp/blob/master/examples/benchmark/benchmark-matmult.cpp
struct QMatMul;
impl Benchmark for QMatMul {
type PreProcessData = (candle::quantized::QMatMul, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let zeros = vec![candle::quantized::k_quants::BlockQ4_0::zeros(); 4096 * 11008 / 32];
let mm = candle::quantized::QTensor::new(
candle::quantized::QStorage::Cpu(Box::new(zeros)),
(4096, 11008),
)?;
let mm = candle::quantized::QMatMul::from_qtensor(mm)?;
let arg = Tensor::randn(0f32, 1., (128, 11008), &Device::Cpu)?;
Ok((mm, arg))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
d.0.forward(&d.1)
}
const ITERS: usize = 100;
}
struct Cat;
impl Benchmark for Cat {
type PreProcessData = (Tensor, Tensor);
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
let lhs = Tensor::randn(0f32, 1., (1, 32, 2000, 128), &Device::Cpu)?;
let rhs = Tensor::randn(0f32, 1., (1, 32, 1, 128), &Device::Cpu)?;
Ok((lhs, rhs))
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
Tensor::cat(&[&d.0, &d.1], 2)
}
const ITERS: usize = 1000;
}
struct Softmax;
impl Benchmark for Softmax {
type PreProcessData = Tensor;
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
// Typical whisper tiny size.
let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?;
Ok(x)
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
candle_nn::ops::softmax(d, D::Minus1)
}
const ITERS: usize = 100;
}
struct SoftmaxLastDim;
impl Benchmark for SoftmaxLastDim {
type PreProcessData = Tensor;
type RunResult = Tensor;
fn preprocess() -> Result<Self::PreProcessData> {
// Typical whisper tiny size.
let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?;
Ok(x)
}
fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> {
candle_nn::ops::softmax_last_dim(d)
}
const ITERS: usize = 100;
}
fn run<B: Benchmark>(iters: Option<usize>) -> Result<()> {
use std::hint::black_box;
let iters = iters.unwrap_or(B::ITERS);
let d = B::preprocess()?;
let start = std::time::Instant::now();
for _iter in 0..iters {
let _res = black_box(B::run_one(black_box(&d))?);
}
println!("{:?}", start.elapsed() / iters as u32);
Ok(())
}
#[derive(Subcommand, Debug, Clone)]
enum Task {
Conv1d,
Conv2d,
Conv2dIm2Col,
Matmul,
Matvec,
Qmatmul,
Softmax,
SoftmaxLastDim,
Cat,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Args {
/// The benchmark to be run.
#[command(subcommand)]
task: Task,
#[arg(long)]
iters: Option<usize>,
}
fn main() -> Result<()> {
let args = Args::parse();
match args.task {
Task::Conv1d => run::<Conv1d>(args.iters)?,
Task::Conv2d => run::<Conv2d>(args.iters)?,
Task::Conv2dIm2Col => run::<Conv2dIm2Col>(args.iters)?,
Task::Matmul => run::<MatMul>(args.iters)?,
Task::Matvec => run::<MatVec>(args.iters)?,
Task::Softmax => run::<Softmax>(args.iters)?,
Task::SoftmaxLastDim => run::<SoftmaxLastDim>(args.iters)?,
Task::Qmatmul => run::<QMatMul>(args.iters)?,
Task::Cat => run::<Cat>(args.iters)?,
}
Ok(())
}
| candle/candle-nn/examples/cpu_benchmarks.rs/0 | {
"file_path": "candle/candle-nn/examples/cpu_benchmarks.rs",
"repo_id": "candle",
"token_count": 5543
} | 48 |
//! Recurrent Neural Networks
use candle::{DType, Device, IndexOp, Result, Tensor};
/// Trait for Recurrent Neural Networks.
#[allow(clippy::upper_case_acronyms)]
pub trait RNN {
type State: Clone;
/// A zero state from which the recurrent network is usually initialized.
fn zero_state(&self, batch_dim: usize) -> Result<Self::State>;
/// Applies a single step of the recurrent network.
///
/// The input should have dimensions [batch_size, features].
fn step(&self, input: &Tensor, state: &Self::State) -> Result<Self::State>;
/// Applies multiple steps of the recurrent network.
///
/// The input should have dimensions [batch_size, seq_len, features].
/// The initial state is the result of applying zero_state.
fn seq(&self, input: &Tensor) -> Result<Vec<Self::State>> {
let batch_dim = input.dim(0)?;
let state = self.zero_state(batch_dim)?;
self.seq_init(input, &state)
}
/// Applies multiple steps of the recurrent network.
///
/// The input should have dimensions [batch_size, seq_len, features].
fn seq_init(&self, input: &Tensor, init_state: &Self::State) -> Result<Vec<Self::State>> {
let (_b_size, seq_len, _features) = input.dims3()?;
let mut output = Vec::with_capacity(seq_len);
for seq_index in 0..seq_len {
let input = input.i((.., seq_index, ..))?.contiguous()?;
let state = if seq_index == 0 {
self.step(&input, init_state)?
} else {
self.step(&input, &output[seq_index - 1])?
};
output.push(state);
}
Ok(output)
}
/// Converts a sequence of state to a tensor.
fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor>;
}
/// The state for a LSTM network, this contains two tensors.
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone)]
pub struct LSTMState {
pub h: Tensor,
pub c: Tensor,
}
impl LSTMState {
pub fn new(h: Tensor, c: Tensor) -> Self {
LSTMState { h, c }
}
/// The hidden state vector, which is also the output of the LSTM.
pub fn h(&self) -> &Tensor {
&self.h
}
/// The cell state vector.
pub fn c(&self) -> &Tensor {
&self.c
}
}
#[derive(Debug, Clone, Copy)]
pub enum Direction {
Forward,
Backward,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone, Copy)]
pub struct LSTMConfig {
pub w_ih_init: super::Init,
pub w_hh_init: super::Init,
pub b_ih_init: Option<super::Init>,
pub b_hh_init: Option<super::Init>,
pub layer_idx: usize,
pub direction: Direction,
}
impl Default for LSTMConfig {
fn default() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: Some(super::Init::Const(0.)),
b_hh_init: Some(super::Init::Const(0.)),
layer_idx: 0,
direction: Direction::Forward,
}
}
}
impl LSTMConfig {
pub fn default_no_bias() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: None,
b_hh_init: None,
layer_idx: 0,
direction: Direction::Forward,
}
}
}
/// A Long Short-Term Memory (LSTM) layer.
///
/// <https://en.wikipedia.org/wiki/Long_short-term_memory>
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct LSTM {
w_ih: Tensor,
w_hh: Tensor,
b_ih: Option<Tensor>,
b_hh: Option<Tensor>,
hidden_dim: usize,
config: LSTMConfig,
device: Device,
dtype: DType,
}
impl LSTM {
/// Creates a LSTM layer.
pub fn new(
in_dim: usize,
hidden_dim: usize,
config: LSTMConfig,
vb: crate::VarBuilder,
) -> Result<Self> {
let layer_idx = config.layer_idx;
let direction_str = match config.direction {
Direction::Forward => "",
Direction::Backward => "_reverse",
};
let w_ih = vb.get_with_hints(
(4 * hidden_dim, in_dim),
&format!("weight_ih_l{layer_idx}{direction_str}"), // Only a single layer is supported.
config.w_ih_init,
)?;
let w_hh = vb.get_with_hints(
(4 * hidden_dim, hidden_dim),
&format!("weight_hh_l{layer_idx}{direction_str}"), // Only a single layer is supported.
config.w_hh_init,
)?;
let b_ih = match config.b_ih_init {
Some(init) => Some(vb.get_with_hints(
4 * hidden_dim,
&format!("bias_ih_l{layer_idx}{direction_str}"),
init,
)?),
None => None,
};
let b_hh = match config.b_hh_init {
Some(init) => Some(vb.get_with_hints(
4 * hidden_dim,
&format!("bias_hh_l{layer_idx}{direction_str}"),
init,
)?),
None => None,
};
Ok(Self {
w_ih,
w_hh,
b_ih,
b_hh,
hidden_dim,
config,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
pub fn config(&self) -> &LSTMConfig {
&self.config
}
}
/// Creates a LSTM layer.
pub fn lstm(
in_dim: usize,
hidden_dim: usize,
config: LSTMConfig,
vb: crate::VarBuilder,
) -> Result<LSTM> {
LSTM::new(in_dim, hidden_dim, config, vb)
}
impl RNN for LSTM {
type State = LSTMState;
fn zero_state(&self, batch_dim: usize) -> Result<Self::State> {
let zeros =
Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?;
Ok(Self::State {
h: zeros.clone(),
c: zeros.clone(),
})
}
fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> {
let w_ih = input.matmul(&self.w_ih.t()?)?;
let w_hh = in_state.h.matmul(&self.w_hh.t()?)?;
let w_ih = match &self.b_ih {
None => w_ih,
Some(b_ih) => w_ih.broadcast_add(b_ih)?,
};
let w_hh = match &self.b_hh {
None => w_hh,
Some(b_hh) => w_hh.broadcast_add(b_hh)?,
};
let chunks = (&w_ih + &w_hh)?.chunk(4, 1)?;
let in_gate = crate::ops::sigmoid(&chunks[0])?;
let forget_gate = crate::ops::sigmoid(&chunks[1])?;
let cell_gate = chunks[2].tanh()?;
let out_gate = crate::ops::sigmoid(&chunks[3])?;
let next_c = ((forget_gate * &in_state.c)? + (in_gate * cell_gate)?)?;
let next_h = (out_gate * next_c.tanh()?)?;
Ok(LSTMState {
c: next_c,
h: next_h,
})
}
fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> {
let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>();
Tensor::stack(&states, 1)
}
}
/// The state for a GRU network, this contains a single tensor.
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone)]
pub struct GRUState {
pub h: Tensor,
}
impl GRUState {
/// The hidden state vector, which is also the output of the LSTM.
pub fn h(&self) -> &Tensor {
&self.h
}
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone, Copy)]
pub struct GRUConfig {
pub w_ih_init: super::Init,
pub w_hh_init: super::Init,
pub b_ih_init: Option<super::Init>,
pub b_hh_init: Option<super::Init>,
}
impl Default for GRUConfig {
fn default() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: Some(super::Init::Const(0.)),
b_hh_init: Some(super::Init::Const(0.)),
}
}
}
impl GRUConfig {
pub fn default_no_bias() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: None,
b_hh_init: None,
}
}
}
/// A Gated Recurrent Unit (GRU) layer.
///
/// <https://en.wikipedia.org/wiki/Gated_recurrent_unit>
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct GRU {
w_ih: Tensor,
w_hh: Tensor,
b_ih: Option<Tensor>,
b_hh: Option<Tensor>,
hidden_dim: usize,
config: GRUConfig,
device: Device,
dtype: DType,
}
impl GRU {
/// Creates a GRU layer.
pub fn new(
in_dim: usize,
hidden_dim: usize,
config: GRUConfig,
vb: crate::VarBuilder,
) -> Result<Self> {
let w_ih = vb.get_with_hints(
(3 * hidden_dim, in_dim),
"weight_ih_l0", // Only a single layer is supported.
config.w_ih_init,
)?;
let w_hh = vb.get_with_hints(
(3 * hidden_dim, hidden_dim),
"weight_hh_l0", // Only a single layer is supported.
config.w_hh_init,
)?;
let b_ih = match config.b_ih_init {
Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_ih_l0", init)?),
None => None,
};
let b_hh = match config.b_hh_init {
Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_hh_l0", init)?),
None => None,
};
Ok(Self {
w_ih,
w_hh,
b_ih,
b_hh,
hidden_dim,
config,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
pub fn config(&self) -> &GRUConfig {
&self.config
}
}
pub fn gru(
in_dim: usize,
hidden_dim: usize,
config: GRUConfig,
vb: crate::VarBuilder,
) -> Result<GRU> {
GRU::new(in_dim, hidden_dim, config, vb)
}
impl RNN for GRU {
type State = GRUState;
fn zero_state(&self, batch_dim: usize) -> Result<Self::State> {
let h =
Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?;
Ok(Self::State { h })
}
fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> {
let w_ih = input.matmul(&self.w_ih.t()?)?;
let w_hh = in_state.h.matmul(&self.w_hh.t()?)?;
let w_ih = match &self.b_ih {
None => w_ih,
Some(b_ih) => w_ih.broadcast_add(b_ih)?,
};
let w_hh = match &self.b_hh {
None => w_hh,
Some(b_hh) => w_hh.broadcast_add(b_hh)?,
};
let chunks_ih = w_ih.chunk(3, 1)?;
let chunks_hh = w_hh.chunk(3, 1)?;
let r_gate = crate::ops::sigmoid(&(&chunks_ih[0] + &chunks_hh[0])?)?;
let z_gate = crate::ops::sigmoid(&(&chunks_ih[1] + &chunks_hh[1])?)?;
let n_gate = (&chunks_ih[2] + (r_gate * &chunks_hh[2])?)?.tanh();
let next_h = ((&z_gate * &in_state.h)? - ((&z_gate - 1.)? * n_gate)?)?;
Ok(GRUState { h: next_h })
}
fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> {
let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>();
Tensor::cat(&states, 1)
}
}
| candle/candle-nn/src/rnn.rs/0 | {
"file_path": "candle/candle-nn/src/rnn.rs",
"repo_id": "candle",
"token_count": 5697
} | 49 |
[package]
name = "candle-onnx"
version = "0.9.1"
edition = "2021"
description = "ONNX support for Candle"
repository = "https://github.com/huggingface/candle"
keywords = ["blas", "tensor", "machine-learning"]
categories = ["science"]
license = "MIT OR Apache-2.0"
[dependencies]
candle = { path = "../candle-core", package = "candle-core", version = "0.9.1" }
candle-nn = { path = "../candle-nn", version = "0.9.1" }
prost = "0.12.1"
[build-dependencies]
prost-build = "0.12.1"
[dev-dependencies]
anyhow = { version = "1", features = ["backtrace"] }
clap = { version = "4.2.4", features = ["derive"] }
| candle/candle-onnx/Cargo.toml/0 | {
"file_path": "candle/candle-onnx/Cargo.toml",
"repo_id": "candle",
"token_count": 242
} | 50 |
# Generated content DO NOT EDIT
from .. import functional
avg_pool2d = functional.avg_pool2d
gelu = functional.gelu
max_pool2d = functional.max_pool2d
relu = functional.relu
silu = functional.silu
softmax = functional.softmax
tanh = functional.tanh
| candle/candle-pyo3/py_src/candle/functional/__init__.py/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/functional/__init__.py",
"repo_id": "candle",
"token_count": 84
} | 51 |
# Generated content DO NOT EDIT
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence
from os import PathLike
from candle.typing import _ArrayLike, Device, Scalar, Index, Shape
from candle import Tensor, DType, QTensor
@staticmethod
def cuda_is_available() -> bool:
"""
Returns true if the 'cuda' backend is available.
"""
pass
@staticmethod
def get_num_threads() -> int:
"""
Returns the number of threads used by the candle.
"""
pass
@staticmethod
def has_accelerate() -> bool:
"""
Returns true if candle was compiled with 'accelerate' support.
"""
pass
@staticmethod
def has_mkl() -> bool:
"""
Returns true if candle was compiled with MKL support.
"""
pass
@staticmethod
def load_ggml(path, device=None) -> Tuple[Dict[str, QTensor], Dict[str, Any], List[str]]:
"""
Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors,
a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary.
"""
pass
@staticmethod
def load_gguf(path, device=None) -> Tuple[Dict[str, QTensor], Dict[str, Any]]:
"""
Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors,
and the second maps metadata keys to metadata values.
"""
pass
@staticmethod
def load_safetensors(path: Union[str, PathLike]) -> Dict[str, Tensor]:
"""
Loads a safetensors file. Returns a dictionary mapping tensor names to tensors.
"""
pass
@staticmethod
def save_gguf(path, tensors, metadata):
"""
Save quanitzed tensors and metadata to a GGUF file.
"""
pass
@staticmethod
def save_safetensors(path: Union[str, PathLike], tensors: Dict[str, Tensor]) -> None:
"""
Saves a dictionary of tensors to a safetensors file.
"""
pass
| candle/candle-pyo3/py_src/candle/utils/__init__.pyi/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/utils/__init__.pyi",
"repo_id": "candle",
"token_count": 654
} | 52 |
import candle
from candle import Tensor, QTensor
from candle.utils import load_safetensors, save_gguf, load_gguf, save_safetensors
from pathlib import Path
TEST_DIR = Path(__file__).parent.parent / "_workdir"
TEST_DIR.mkdir(exist_ok=True)
def test_can_roundtrip_safetensors():
tensors = {
"a": candle.randn((16, 256)),
"b": candle.randn((16, 16)),
}
file = str(TEST_DIR / "test.safetensors")
save_safetensors(file, tensors)
loaded_tensors = load_safetensors(file)
assert set(tensors.keys()) == set(loaded_tensors.keys())
for key in tensors.keys():
assert tensors[key].values() == loaded_tensors[key].values(), "Values are not equal"
assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal"
assert str(tensors[key].dtype) == str(loaded_tensors[key].dtype), "Dtypes are not equal"
def test_can_roundtrip_gguf():
metadata = {
"a": 1,
"b": "foo",
"c": [1, 2, 3],
"d": [[1, 2], [3, 4]],
}
tensors = {
"a": candle.randn((16, 256)).quantize("q4_0"),
"b": candle.randn((16, 16)).quantize("f32"),
}
file = str(TEST_DIR / "test.gguf")
save_gguf(file, tensors, metadata)
loaded_tensors, loaded_metadata = load_gguf(file)
assert set(metadata.keys()) == set(loaded_metadata.keys())
for key in metadata.keys():
assert metadata[key] == loaded_metadata[key]
assert set(tensors.keys()) == set(loaded_tensors.keys())
for key in tensors.keys():
assert tensors[key].dequantize().values() == loaded_tensors[key].dequantize().values(), "Values are not equal"
assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal"
assert str(tensors[key].ggml_dtype) == str(loaded_tensors[key].ggml_dtype), "Dtypes are not equal"
| candle/candle-pyo3/tests/native/test_utils.py/0 | {
"file_path": "candle/candle-pyo3/tests/native/test_utils.py",
"repo_id": "candle",
"token_count": 774
} | 53 |
//! Contrastive Language-Image Pre-Training
//!
//! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - [GH](https://github.com/openai/CLIP)
//! - [Code](https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip)
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn as nn;
use candle_nn::Module;
use super::EncoderConfig;
#[derive(Debug, Clone, Copy)]
pub enum Activation {
QuickGelu,
}
impl Module for Activation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?,
}
}
}
#[derive(Debug, Clone)]
pub struct ClipTextConfig {
pub vocab_size: usize,
pub embed_dim: usize,
pub activation: Activation,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub pad_with: Option<String>,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
#[allow(dead_code)]
pub projection_dim: usize,
}
impl ClipTextConfig {
// The config details can be found in the "text_config" section of this json file:
// https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json
pub fn vit_base_patch32() -> Self {
Self {
vocab_size: 49408,
embed_dim: 512,
intermediate_size: 2048,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 12,
num_attention_heads: 8,
projection_dim: 512,
activation: Activation::QuickGelu,
}
}
}
// ClipTextEmbeddings mostly based on the existing implementation in the stable diffision model.
// TODO rewrite to be more similar to https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L142
#[derive(Clone, Debug)]
struct ClipTextEmbeddings {
token_embedding: candle_nn::Embedding,
position_embedding: candle_nn::Embedding,
position_ids: Tensor,
}
impl ClipTextEmbeddings {
fn new(vs: candle_nn::VarBuilder, c: &ClipTextConfig) -> Result<Self> {
let token_embedding =
candle_nn::embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?;
let position_embedding: nn::Embedding = candle_nn::embedding(
c.max_position_embeddings,
c.embed_dim,
vs.pp("position_embedding"),
)?;
let position_ids =
Tensor::arange(0u32, c.max_position_embeddings as u32, vs.device())?.unsqueeze(0)?;
Ok(Self {
token_embedding,
position_embedding,
position_ids,
})
}
}
impl Module for ClipTextEmbeddings {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let seq_length = input_ids.dim(D::Minus1)?;
let inputs_embeds = self.token_embedding.forward(input_ids)?;
let position_ids = self.position_ids.narrow(1, 0, seq_length)?;
let position_embedding = self.position_embedding.forward(&position_ids)?;
inputs_embeds.broadcast_add(&position_embedding)
}
}
#[derive(Clone, Debug)]
struct ClipAttention {
k_proj: candle_nn::Linear,
v_proj: candle_nn::Linear,
q_proj: candle_nn::Linear,
out_proj: candle_nn::Linear,
head_dim: usize,
scale: f64,
num_attention_heads: usize,
}
impl ClipAttention {
fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let embed_dim = c.embed_dim();
let num_attention_heads = c.num_attention_heads();
let k_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("k_proj"))?;
let v_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("v_proj"))?;
let q_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("q_proj"))?;
let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?;
let head_dim = embed_dim / num_attention_heads;
let scale = (head_dim as f64).powf(-0.5);
Ok(ClipAttention {
k_proj,
v_proj,
q_proj,
out_proj,
head_dim,
scale,
num_attention_heads,
})
}
fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> {
xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let in_dtype = xs.dtype();
let (bsz, seq_len, embed_dim) = xs.dims3()?;
let query_states = (self.q_proj.forward(xs)? * self.scale)?;
let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim);
let query_states = self
.shape(&query_states, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let key_states = self
.shape(&self.k_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let value_states = self
.shape(&self.v_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?;
let src_len = key_states.dim(1)?;
let attn_weights = if let Some(causal_attention_mask) = causal_attention_mask {
attn_weights
.reshape((bsz, self.num_attention_heads, seq_len, src_len))?
.broadcast_add(causal_attention_mask)?
.reshape((bsz * self.num_attention_heads, seq_len, src_len))?
} else {
attn_weights
};
let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?;
let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?;
let attn_output = attn_output
.reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))?
.transpose(1, 2)?
.reshape((bsz, seq_len, embed_dim))?;
self.out_proj.forward(&attn_output)
}
}
#[derive(Clone, Debug)]
struct ClipMlp {
fc1: candle_nn::Linear,
fc2: candle_nn::Linear,
activation: Activation,
}
impl ClipMlp {
fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let fc1 = candle_nn::linear(c.embed_dim(), c.intermediate_size(), vs.pp("fc1"))?;
let fc2 = candle_nn::linear(c.intermediate_size(), c.embed_dim(), vs.pp("fc2"))?;
Ok(ClipMlp {
fc1,
fc2,
activation: c.activation(),
})
}
}
impl ClipMlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?;
self.fc2.forward(&self.activation.forward(&xs)?)
}
}
#[derive(Clone, Debug)]
struct ClipEncoderLayer {
self_attn: ClipAttention,
layer_norm1: candle_nn::LayerNorm,
mlp: ClipMlp,
layer_norm2: candle_nn::LayerNorm,
}
impl ClipEncoderLayer {
fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let self_attn = ClipAttention::new(vs.pp("self_attn"), c)?;
let layer_norm1 = candle_nn::layer_norm(c.embed_dim(), 1e-5, vs.pp("layer_norm1"))?;
let mlp = ClipMlp::new(vs.pp("mlp"), c)?;
let layer_norm2 = candle_nn::layer_norm(c.embed_dim(), 1e-5, vs.pp("layer_norm2"))?;
Ok(ClipEncoderLayer {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = self.layer_norm1.forward(xs)?;
let xs = self.self_attn.forward(&xs, causal_attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self.layer_norm2.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
xs + residual
}
}
#[derive(Clone, Debug)]
pub struct ClipEncoder {
layers: Vec<ClipEncoderLayer>,
}
impl ClipEncoder {
pub fn new(vs: candle_nn::VarBuilder, c: &EncoderConfig) -> Result<Self> {
let vs = vs.pp("layers");
let mut layers: Vec<ClipEncoderLayer> = Vec::new();
for index in 0..c.num_hidden_layers() {
let layer = ClipEncoderLayer::new(vs.pp(index.to_string()), c)?;
layers.push(layer)
}
Ok(ClipEncoder { layers })
}
pub fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, causal_attention_mask)?;
}
Ok(xs)
}
// required by LLaVA
pub fn output_hidden_states(
&self,
xs: &Tensor,
causal_attention_mask: Option<&Tensor>,
) -> Result<Vec<Tensor>> {
let mut xs = xs.clone();
let mut hidden_states = Vec::new();
for layer in self.layers.iter() {
xs = layer.forward(&xs, causal_attention_mask)?;
hidden_states.push(xs.clone());
}
Ok(hidden_states)
}
}
/// A CLIP transformer based model.
#[derive(Clone, Debug)]
pub struct ClipTextTransformer {
embeddings: ClipTextEmbeddings,
encoder: ClipEncoder,
final_layer_norm: candle_nn::LayerNorm,
}
impl ClipTextTransformer {
pub fn new(vs: candle_nn::VarBuilder, c: &ClipTextConfig) -> Result<Self> {
let embeddings = ClipTextEmbeddings::new(vs.pp("embeddings"), c)?;
let encoder = ClipEncoder::new(vs.pp("encoder"), &EncoderConfig::Text(c.clone()))?;
let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("final_layer_norm"))?;
Ok(ClipTextTransformer {
embeddings,
encoder,
final_layer_norm,
})
}
// TODO: rewrite to newer version
fn build_causal_attention_mask(
bsz: usize,
seq_len: usize,
mask_after: usize,
device: &Device,
) -> Result<Tensor> {
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| {
(0..seq_len).map(move |j| {
if j > i || j > mask_after {
f32::MIN
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?;
mask.broadcast_as((bsz, 1, seq_len, seq_len))
}
pub fn forward_with_mask(&self, input_ids: &Tensor, mask_after: usize) -> Result<Tensor> {
let (bsz, seq_len) = input_ids.dims2()?;
let input_ids = self.embeddings.forward(input_ids)?;
let causal_attention_mask =
Self::build_causal_attention_mask(bsz, seq_len, mask_after, input_ids.device())?;
let input_ids = self
.encoder
.forward(&input_ids, Some(&causal_attention_mask))?;
self.final_layer_norm.forward(&input_ids)
}
}
impl Module for ClipTextTransformer {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let output = self.forward_with_mask(input_ids, usize::MAX)?;
let sequence_max_indices = input_ids.argmax(D::Minus1)?.to_dtype(DType::I64)?;
let mut indices = Vec::new();
for (batch_idx, &seq_idx) in sequence_max_indices.to_vec1::<i64>()?.iter().enumerate() {
let index = output.i((batch_idx, seq_idx as usize))?.unsqueeze(0)?;
indices.push(index);
}
Tensor::cat(&indices, 0)
}
}
| candle/candle-transformers/src/models/clip/text_model.rs/0 | {
"file_path": "candle/candle-transformers/src/models/clip/text_model.rs",
"repo_id": "candle",
"token_count": 5660
} | 54 |
//! EnCodec neural audio codec based on the Encodec implementation.
//!
//! See ["High Fidelity Neural Audio Compression"](https://arxiv.org/abs/2210.13438)
//!
//! Based on implementation from [huggingface/transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py)
use candle::{DType, IndexOp, Layout, Module, Result, Shape, Tensor, D};
use candle_nn::{conv1d, Conv1d, ConvTranspose1d, VarBuilder};
// Encodec Model
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)]
pub enum NormType {
WeightNorm,
TimeGroupNorm,
None,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)]
pub enum PadMode {
Constant,
Reflect,
Replicate,
}
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub target_bandwidths: Vec<f64>,
pub sampling_rate: usize,
pub audio_channels: usize,
pub normalize: bool,
pub chunk_length_s: Option<usize>,
pub overlap: Option<usize>,
pub hidden_size: usize,
pub num_filters: usize,
pub num_residual_layers: usize,
pub upsampling_ratios: Vec<usize>,
pub norm_type: NormType,
pub kernel_size: usize,
pub last_kernel_size: usize,
pub residual_kernel_size: usize,
pub dilation_growth_rate: usize,
pub use_causal_conv: bool,
pub pad_mode: PadMode,
pub compress: usize,
pub num_lstm_layers: usize,
pub trim_right_ratio: f64,
pub codebook_size: usize,
pub codebook_dim: Option<usize>,
pub use_conv_shortcut: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
target_bandwidths: vec![1.5, 3.0, 6.0, 12.0, 24.0],
sampling_rate: 24_000,
audio_channels: 1,
normalize: false,
chunk_length_s: None,
overlap: None,
hidden_size: 128,
num_filters: 32,
num_residual_layers: 1,
upsampling_ratios: vec![8, 5, 4, 2],
norm_type: NormType::WeightNorm,
kernel_size: 7,
last_kernel_size: 7,
residual_kernel_size: 3,
dilation_growth_rate: 2,
use_causal_conv: true,
// This should be PadMode::Reflect which is currently unsupported in candle.
pad_mode: PadMode::Replicate,
compress: 2,
num_lstm_layers: 2,
trim_right_ratio: 1.0,
codebook_size: 1024,
codebook_dim: None,
use_conv_shortcut: true,
}
}
}
impl Config {
fn codebook_dim(&self) -> usize {
self.codebook_dim.unwrap_or(self.hidden_size)
}
fn frame_rate(&self) -> usize {
let hop_length: usize = self.upsampling_ratios.iter().product();
self.sampling_rate.div_ceil(hop_length)
}
fn num_quantizers(&self) -> usize {
let num = 1000f64
* self
.target_bandwidths
.last()
.expect("empty target_bandwidths");
(num as usize) / (self.frame_rate() * 10)
}
}
fn get_extra_padding_for_conv1d(
xs: &Tensor,
k_size: usize,
stride: usize,
padding_total: usize,
) -> Result<usize> {
let len = xs.dim(D::Minus1)?;
let n_frames = (len + padding_total).saturating_sub(k_size) as f64 / stride as f64 + 1.0;
let ideal_len =
((n_frames.ceil() as usize - 1) * stride + k_size).saturating_sub(padding_total);
Ok(ideal_len.saturating_sub(len))
}
fn pad1d(xs: &Tensor, pad_l: usize, pad_r: usize, mode: PadMode) -> Result<Tensor> {
match mode {
PadMode::Constant => xs.pad_with_zeros(D::Minus1, pad_l, pad_r),
PadMode::Reflect => candle::bail!("pad-mode 'reflect' is not supported"),
PadMode::Replicate => xs.pad_with_same(D::Minus1, pad_l, pad_r),
}
}
// Applies weight norm for inference by recomputing the weight tensor. This
// does not apply to training.
// https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html
pub fn conv1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "weight_g")?;
let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = vb.get(out_c, "bias")?;
Ok(Conv1d::new(weight, Some(bias), config))
}
pub fn conv1d_weight_norm_no_bias(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "weight_g")?;
let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
Ok(Conv1d::new(weight, None, config))
}
pub fn conv_transpose1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
bias: bool,
config: candle_nn::ConvTranspose1dConfig,
vb: VarBuilder,
) -> Result<ConvTranspose1d> {
let weight_g = vb.get((in_c, 1, 1), "weight_g")?;
let weight_v = vb.get((in_c, out_c, kernel_size), "weight_v")?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = if bias {
Some(vb.get(out_c, "bias")?)
} else {
None
};
Ok(ConvTranspose1d::new(weight, bias, config))
}
struct CodebookEncode;
impl candle::CustomOp2 for CodebookEncode {
fn name(&self) -> &'static str {
"cb"
}
fn cpu_fwd(
&self,
lhs_storage: &candle::CpuStorage,
lhs_layout: &Layout,
rhs_storage: &candle::CpuStorage,
rhs_layout: &Layout,
) -> Result<(candle::CpuStorage, Shape)> {
use rayon::prelude::*;
let (lhs_dim1, lhs_dim2) = lhs_layout.shape().dims2()?;
let (rhs_dim1, rhs_dim2) = rhs_layout.shape().dims2()?;
if lhs_dim2 != rhs_dim2 {
candle::bail!("CodebookEncode, mismatch on last dim, {lhs_layout:?} {rhs_layout:?}");
}
if lhs_dim2 == 0 {
candle::bail!("CodebookEncode, empty last dim {lhs_layout:?}")
}
let lhs = match lhs_layout.contiguous_offsets() {
None => candle::bail!("CodebookEncode, lhs has to be contiguous, got {lhs_layout:?}"),
Some((o1, o2)) => {
let slice = lhs_storage.as_slice::<f32>()?;
&slice[o1..o2]
}
};
let rhs = match rhs_layout.contiguous_offsets() {
None => candle::bail!("CodebookEncode, rhs has to be contiguous, got {rhs_layout:?}"),
Some((o1, o2)) => {
let slice = rhs_storage.as_slice::<f32>()?;
&slice[o1..o2]
}
};
let dst = (0..lhs_dim1)
.into_par_iter()
.map(|idx1| {
let mut where_min = 0;
let mut min_dist = f32::INFINITY;
let lhs = &lhs[idx1 * lhs_dim2..(idx1 + 1) * lhs_dim2];
for idx2 in 0..rhs_dim1 {
let rhs = &rhs[idx2 * rhs_dim2..(idx2 + 1) * rhs_dim2];
let mut dist = 0f32;
for (a, b) in lhs.iter().zip(rhs.iter()) {
dist += (a - b) * (a - b)
}
if dist < min_dist {
min_dist = dist;
where_min = idx2;
}
}
where_min as u32
})
.collect();
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, (lhs_dim1,).into()))
}
}
// https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L340
#[allow(unused)]
#[derive(Clone, Debug)]
pub struct EuclideanCodebook {
inited: Tensor,
cluster_size: Tensor,
embed: candle_nn::Embedding,
embed_avg: Tensor,
c2: Tensor,
}
impl EuclideanCodebook {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let inited = vb.get(1, "inited")?;
let cluster_size = vb.get(cfg.codebook_size, "cluster_size")?;
let e_shape = (cfg.codebook_size, cfg.codebook_dim());
let embed = vb.get(e_shape, "embed")?;
let c2 = ((&embed * &embed)?.sum(D::Minus1)? / 2.0)?;
let embed_avg = vb.get(e_shape, "embed_avg")?;
Ok(Self {
inited,
cluster_size,
embed: candle_nn::Embedding::new(embed, cfg.codebook_dim()),
embed_avg,
c2,
})
}
pub fn encode_slow(&self, xs: &Tensor) -> Result<Tensor> {
let mut target_shape = xs.dims().to_vec();
target_shape.pop();
let xs = xs.flatten_to(D::Minus2)?;
let _ = xs.dims2()?;
let dot_prod = xs.matmul(&self.embed.embeddings().t()?)?;
let codes = self.c2.broadcast_sub(&dot_prod)?.argmin(D::Minus1)?;
codes.reshape(target_shape)
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let mut target_shape = xs.dims().to_vec();
target_shape.pop();
let xs = xs.flatten_to(D::Minus2)?;
let _ = xs.dims2()?;
let codes = Tensor::apply_op2(&xs, self.embed.embeddings(), CodebookEncode)?;
codes.reshape(target_shape)
}
pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> {
let quantize = self.embed.forward(embed_ind)?;
Ok(quantize)
}
}
#[derive(Clone, Debug)]
pub struct VectorQuantization {
codebook: EuclideanCodebook,
}
impl VectorQuantization {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let codebook = EuclideanCodebook::new(cfg, vb.pp("codebook"))?;
Ok(Self { codebook })
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.transpose(1, 2)?;
self.codebook.encode_slow(&xs)
}
pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> {
let quantize = self.codebook.decode(embed_ind)?;
let quantize = quantize.transpose(1, 2)?;
Ok(quantize)
}
}
#[derive(Clone, Debug)]
pub struct ResidualVectorQuantizer {
layers: Vec<VectorQuantization>,
dtype: DType,
}
impl ResidualVectorQuantizer {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = &vb.pp("layers");
let layers = (0..cfg.num_quantizers())
.map(|i| VectorQuantization::new(cfg, vb.pp(i)))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
layers,
dtype: vb.dtype(),
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let mut codes = Vec::with_capacity(self.layers.len());
let mut residual = xs.clone();
for layer in self.layers.iter() {
let indices = layer.encode(&residual)?;
let quantized = layer.decode(&indices)?;
residual = (residual - quantized)?;
codes.push(indices)
}
Tensor::stack(&codes, 0)
}
pub fn decode(&self, codes: &Tensor) -> Result<Tensor> {
let mut quantized_out = Tensor::zeros((), self.dtype, codes.device())?;
let ncodes = codes.dim(0)?;
if ncodes > self.layers.len() {
candle::bail!(
"codes shape {:?} does not match the number of quantization layers {}",
codes.shape(),
self.layers.len()
)
}
for (i, layer) in self.layers.iter().take(ncodes).enumerate() {
let quantized = layer.decode(&codes.i(i)?)?;
quantized_out = quantized.broadcast_add(&quantized_out)?;
}
Ok(quantized_out)
}
}
// https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L226
#[derive(Clone, Debug)]
pub struct EncodecLSTM {
layers: Vec<candle_nn::LSTM>,
}
impl EncodecLSTM {
pub fn new(dim: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = &vb.pp("lstm");
let mut layers = vec![];
for layer_idx in 0..cfg.num_lstm_layers {
let config = candle_nn::LSTMConfig {
layer_idx,
..Default::default()
};
let lstm = candle_nn::lstm(dim, dim, config, vb.clone())?;
layers.push(lstm)
}
Ok(Self { layers })
}
}
impl Module for EncodecLSTM {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
use candle_nn::RNN;
// This is different from the Python transformers version as candle LSTM is batch first.
let xs = xs.t()?;
let residual = &xs;
let mut xs = xs.clone();
for layer in self.layers.iter() {
let states = layer.seq(&xs)?;
xs = layer.states_to_tensor(&states)?;
}
let xs = (xs + residual)?.t()?;
Ok(xs)
}
}
#[derive(Clone, Debug)]
pub struct EncodecConvTranspose1d {
conv: ConvTranspose1d,
}
impl EncodecConvTranspose1d {
fn new(
in_c: usize,
out_c: usize,
k: usize,
stride: usize,
_cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let cfg = candle_nn::ConvTranspose1dConfig {
stride,
..Default::default()
};
let conv = conv_transpose1d_weight_norm(in_c, out_c, k, true, cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl Module for EncodecConvTranspose1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.conv)
}
}
#[derive(Clone, Debug)]
pub struct EncodecConv1d {
causal: bool,
conv: Conv1d,
norm: Option<candle_nn::GroupNorm>,
pad_mode: PadMode,
}
impl EncodecConv1d {
pub fn new(
in_c: usize,
out_c: usize,
kernel_size: usize,
stride: usize,
dilation: usize,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let conv = match cfg.norm_type {
NormType::WeightNorm => conv1d_weight_norm(
in_c,
out_c,
kernel_size,
candle_nn::Conv1dConfig {
stride,
dilation,
..Default::default()
},
vb.pp("conv"),
)?,
NormType::None | NormType::TimeGroupNorm => conv1d(
in_c,
out_c,
kernel_size,
candle_nn::Conv1dConfig {
padding: 0,
stride,
groups: 1,
dilation: 1,
cudnn_fwd_algo: None,
},
vb.pp("conv"),
)?,
};
let norm = match cfg.norm_type {
NormType::None | NormType::WeightNorm => None,
NormType::TimeGroupNorm => {
let gn = candle_nn::group_norm(1, out_c, 1e-5, vb.pp("norm"))?;
Some(gn)
}
};
Ok(Self {
causal: cfg.use_causal_conv,
conv,
norm,
pad_mode: cfg.pad_mode,
})
}
}
impl Module for EncodecConv1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _t, _c) = xs.dims3()?;
let k_size = self.conv.weight().dim(D::Minus1)?;
let conv_cfg = self.conv.config();
// Effective kernel size with dilations.
let k_size = (k_size - 1) * conv_cfg.dilation + 1;
let padding_total = k_size - conv_cfg.stride;
let extra_padding =
get_extra_padding_for_conv1d(xs, k_size, conv_cfg.stride, padding_total)?;
let xs = if self.causal {
pad1d(xs, padding_total, extra_padding, self.pad_mode)?
} else {
let padding_right = padding_total / 2;
let padding_left = padding_total - padding_right;
pad1d(
xs,
padding_left,
padding_right + extra_padding,
self.pad_mode,
)?
};
let xs = self.conv.forward(&xs)?;
match &self.norm {
None => Ok(xs),
Some(norm) => xs.apply(norm),
}
}
}
#[derive(Clone, Debug)]
pub struct EncodecResnetBlock {
block_conv1: EncodecConv1d,
block_conv2: EncodecConv1d,
shortcut: Option<EncodecConv1d>,
}
impl EncodecResnetBlock {
pub fn new(
dim: usize,
(dilation1, dilation2): (usize, usize),
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let h = dim / cfg.compress;
let mut layer = Layer::new(vb.pp("block"));
// TODO: Apply dilations!
layer.inc();
let block_conv1 = EncodecConv1d::new(
dim,
h,
cfg.residual_kernel_size,
1,
dilation1,
cfg,
layer.next(),
)?;
layer.inc();
let block_conv2 = EncodecConv1d::new(h, dim, 1, 1, dilation2, cfg, layer.next())?;
let shortcut = if cfg.use_conv_shortcut {
let conv = EncodecConv1d::new(dim, dim, 1, 1, 1, cfg, vb.pp("shortcut"))?;
Some(conv)
} else {
None
};
Ok(Self {
block_conv1,
block_conv2,
shortcut,
})
}
}
impl Module for EncodecResnetBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs.clone();
let xs = xs.elu(1.)?;
let xs = self.block_conv1.forward(&xs)?;
let xs = xs.elu(1.)?;
let xs = self.block_conv2.forward(&xs)?;
let xs = match &self.shortcut {
None => (xs + residual)?,
Some(shortcut) => xs.add(&shortcut.forward(&residual)?)?,
};
Ok(xs)
}
}
struct Layer<'a> {
vb: VarBuilder<'a>,
cnt: usize,
}
impl<'a> Layer<'a> {
fn new(vb: VarBuilder<'a>) -> Self {
Self { vb, cnt: 0 }
}
fn inc(&mut self) {
self.cnt += 1;
}
fn next(&mut self) -> VarBuilder<'_> {
let vb = self.vb.pp(self.cnt.to_string());
self.cnt += 1;
vb
}
}
#[derive(Clone, Debug)]
pub struct Encoder {
init_conv: EncodecConv1d,
sampling_layers: Vec<(Vec<EncodecResnetBlock>, EncodecConv1d)>,
final_lstm: EncodecLSTM,
final_conv: EncodecConv1d,
}
impl Encoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let mut layer = Layer::new(vb.pp("layers"));
let init_conv = EncodecConv1d::new(
cfg.audio_channels,
cfg.num_filters,
cfg.kernel_size,
1,
1,
cfg,
layer.next(),
)?;
let mut sampling_layers = vec![];
let mut scaling = 1;
for &ratio in cfg.upsampling_ratios.iter().rev() {
let current_scale = scaling * cfg.num_filters;
let mut resnets = vec![];
for j in 0..(cfg.num_residual_layers as u32) {
let resnet = EncodecResnetBlock::new(
current_scale,
(cfg.dilation_growth_rate.pow(j), 1),
cfg,
layer.next(),
)?;
resnets.push(resnet)
}
layer.inc(); // ELU
let conv1d = EncodecConv1d::new(
current_scale,
current_scale * 2,
ratio * 2,
ratio,
1,
cfg,
layer.next(),
)?;
sampling_layers.push((resnets, conv1d));
scaling *= 2;
}
let final_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?;
layer.inc(); // ELU
let final_conv = EncodecConv1d::new(
cfg.num_filters * scaling,
cfg.hidden_size,
cfg.last_kernel_size,
1,
1,
cfg,
layer.next(),
)?;
Ok(Self {
init_conv,
sampling_layers,
final_conv,
final_lstm,
})
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.init_conv)?;
for (resnets, conv) in self.sampling_layers.iter() {
for resnet in resnets.iter() {
xs = xs.apply(resnet)?;
}
xs = xs.elu(1.0)?.apply(conv)?;
}
xs.apply(&self.final_lstm)?
.elu(1.0)?
.apply(&self.final_conv)
}
}
#[derive(Clone, Debug)]
pub struct Decoder {
init_conv: EncodecConv1d,
init_lstm: EncodecLSTM,
sampling_layers: Vec<(EncodecConvTranspose1d, Vec<EncodecResnetBlock>)>,
final_conv: EncodecConv1d,
}
impl Decoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let mut layer = Layer::new(vb.pp("layers"));
let mut scaling = usize::pow(2, cfg.upsampling_ratios.len() as u32);
let init_conv = EncodecConv1d::new(
cfg.hidden_size,
cfg.num_filters * scaling,
cfg.last_kernel_size,
1,
1,
cfg,
layer.next(),
)?;
let init_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?;
let mut sampling_layers = vec![];
for &ratio in cfg.upsampling_ratios.iter() {
let current_scale = scaling * cfg.num_filters;
layer.inc(); // ELU
let conv1d = EncodecConvTranspose1d::new(
current_scale,
current_scale / 2,
ratio * 2,
ratio,
cfg,
layer.next(),
)?;
let mut resnets = vec![];
for j in 0..(cfg.num_residual_layers as u32) {
let resnet = EncodecResnetBlock::new(
current_scale / 2,
(cfg.dilation_growth_rate.pow(j), 1),
cfg,
layer.next(),
)?;
resnets.push(resnet)
}
sampling_layers.push((conv1d, resnets));
scaling /= 2;
}
layer.inc(); // ELU
let final_conv = EncodecConv1d::new(
cfg.num_filters,
cfg.audio_channels,
cfg.last_kernel_size,
1,
1,
cfg,
layer.next(),
)?;
Ok(Self {
init_conv,
init_lstm,
sampling_layers,
final_conv,
})
}
}
impl Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.init_conv)?.apply(&self.init_lstm)?;
for (conv, resnets) in self.sampling_layers.iter() {
xs = xs.elu(1.)?.apply(conv)?;
for resnet in resnets.iter() {
xs = xs.apply(resnet)?
}
}
xs.elu(1.)?.apply(&self.final_conv)
}
}
#[derive(Debug)]
pub struct Model {
encoder: Encoder,
decoder: Decoder,
quantizer: ResidualVectorQuantizer,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let decoder = Decoder::new(cfg, vb.pp("decoder"))?;
let quantizer = ResidualVectorQuantizer::new(cfg, vb.pp("quantizer"))?;
Ok(Self {
encoder,
decoder,
quantizer,
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.encoder.forward(xs)?;
let codes = self.quantizer.encode(&xs)?;
codes.transpose(0, 1)
}
pub fn decode(&self, codes: &Tensor) -> Result<Tensor> {
let (_b_sz, _codebooks, _seqlen) = codes.dims3()?;
let codes = codes.transpose(0, 1)?;
let embeddings = self.quantizer.decode(&codes)?;
let outputs = self.decoder.forward(&embeddings)?;
Ok(outputs)
}
}
| candle/candle-transformers/src/models/encodec.rs/0 | {
"file_path": "candle/candle-transformers/src/models/encodec.rs",
"repo_id": "candle",
"token_count": 13015
} | 55 |
//! Hiera inference implementation based on timm.
//!
//!
//! - ๐ป [Hiera](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/hiera.py)
//! - ๐ [Paper](https://arxiv.org/abs/2306.00989). Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles
use candle::{Result, D};
use candle_nn::{conv2d, layer_norm, linear, ops::softmax, Conv2dConfig, Func, VarBuilder};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
channels: usize,
heads: usize,
stages: [usize; 4],
}
impl Config {
pub fn tiny() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 7, 2],
}
}
pub fn small() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 11, 2],
}
}
pub fn base() -> Self {
Self {
channels: 96,
heads: 1,
stages: [2, 3, 16, 3],
}
}
pub fn base_plus() -> Self {
Self {
channels: 112,
heads: 2,
stages: [2, 3, 16, 3],
}
}
pub fn large() -> Self {
Self {
channels: 144,
heads: 2,
stages: [2, 6, 36, 4],
}
}
pub fn huge() -> Self {
Self {
channels: 256,
heads: 4,
stages: [2, 6, 36, 4],
}
}
}
const NUM_TOKENS: usize = 56 * 56;
fn hiera_embeddings(channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv_cfg = Conv2dConfig {
stride: 4,
padding: 3,
..Default::default()
};
let proj = conv2d(3, channels, 7, conv_cfg, vb.pp("patch_embed.proj"))?;
let pos_embed = vb.get((1, NUM_TOKENS, channels), "pos_embed")?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&proj)?;
let (b, c, _, _) = xs.dims4()?;
let xs = xs.reshape((b, c, ()))?.transpose(1, 2)?;
let xs = xs.broadcast_add(&pos_embed)?;
Ok(xs)
}))
}
fn hiera_unroll() -> Result<Func<'static>> {
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let (mut b, _, c) = xs.dims3()?;
let mut size = 56;
xs = xs.reshape((b, size, size, c))?;
for _ in 0..3 {
size /= 2;
let new_shape = &[b, size, 2, size, 2, c];
xs = xs.reshape(new_shape)?;
xs = xs.permute((0, 2, 4, 1, 3, 5))?;
xs = xs.flatten(0, 2)?;
b *= 4;
}
xs = xs.reshape(((), NUM_TOKENS, c))?;
Ok(xs)
}))
}
fn hiera_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let fc1 = linear(in_channels, out_channels, vb.pp("fc1"))?;
let fc2 = linear(out_channels, in_channels, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&fc1)?.gelu()?.apply(&fc2)?;
Ok(xs)
}))
}
fn hiera_attention(
in_channels: usize,
out_channels: usize,
heads: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let head_dim = out_channels / heads;
let scale = (head_dim as f64).powf(-0.5);
let proj = linear(out_channels, out_channels, vb.pp("proj"))?;
let qkv = linear(in_channels, out_channels * 3, vb.pp("qkv"))?;
Ok(Func::new(move |xs| {
let (b, n, _) = xs.dims3()?;
let num_windows = if use_mask_attention {
n / (q_stride * window_size)
} else {
1
};
let qkv = xs.apply(&qkv)?;
let ec = qkv.elem_count();
let s = ec / (b * num_windows * 3 * heads * head_dim);
let qkv = qkv
.reshape((b, s, num_windows, 3, heads, head_dim))?
.permute((3, 0, 4, 2, 1, 5))?;
let mut q = qkv.get(0)?;
let k = qkv.get(1)?;
let v = qkv.get(2)?;
if q_stride > 1 {
let ec = q.elem_count();
let s = ec / (b * num_windows * q_stride * heads * head_dim);
q = q
.reshape((b, heads, num_windows, q_stride, s, head_dim))?
.max(3)?;
}
let q = (q * scale)?;
// Q, K and V are 6 dimensional with the first dimension being 1.
// Squeeze them for the attention calculation since 6 dimensional matmuls are not supported.
let att = q
.squeeze(0)?
.matmul(&k.squeeze(0)?.transpose(D::Minus2, D::Minus1)?)?;
let att = softmax(&att, D::Minus1)?;
let xs = att.matmul(&v.squeeze(0)?)?.unsqueeze(0)?;
let xs = xs.transpose(1, 3)?.reshape((b, (), out_channels))?;
let xs = xs.apply(&proj)?;
Ok(xs)
}))
}
fn hiera_block(
heads: usize,
in_channels: usize,
out_channels: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let norm1 = layer_norm(in_channels, 1e-6, vb.pp("norm1"))?;
let norm2 = layer_norm(out_channels, 1e-6, vb.pp("norm2"))?;
let proj = linear(in_channels, out_channels, vb.pp("proj"));
let stride = 4;
let mlp = hiera_mlp(out_channels, out_channels * 4, vb.pp("mlp"))?;
let attn = hiera_attention(
in_channels,
out_channels,
heads,
q_stride,
window_size,
use_mask_attention,
vb.pp("attn"),
)?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let xs_norm = xs.apply_t(&norm1, false)?;
if let Ok(p) = &proj {
xs = xs_norm.apply(p)?;
let (a, _, d) = xs.dims3()?;
xs = xs.reshape((a, stride, (), d))?.max(1)?;
}
let xs = (xs + &xs_norm.apply(&attn)?)?;
let xs = (&xs + &xs.apply_t(&norm2, false)?.apply(&mlp)?)?;
Ok(xs)
}))
}
fn hiera_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.stages.iter().sum();
let mut blocks = Vec::with_capacity(nblocks);
let mut out_channels = cfg.channels;
let mut in_channels = out_channels;
let mut heads = cfg.heads;
let mut b = 0;
let mut q_stride = 1;
let mut window_size = 64;
for s in 0..4 {
let use_mask_attention = s < 2;
for _ in 0..cfg.stages[s] {
blocks.push(hiera_block(
heads,
in_channels,
out_channels,
q_stride,
window_size,
use_mask_attention,
vb.pp(b),
)?);
b += 1;
in_channels = out_channels;
q_stride = 1;
}
q_stride = 4;
out_channels *= 2;
heads *= 2;
window_size /= 4;
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
fn hiera_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(outputs, 1e-6, vb.pp("norm"))?;
let linear = linear(outputs, nclasses, vb.pp("fc"))?;
Ok(Func::new(move |xs| {
xs.apply_t(&norm, false)?.apply(&linear)
}))
}
// Build a hiera model for a given configuration.
fn hiera_model(cfg: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = cfg.channels * 8;
let head = hiera_head(outputs, nclasses, vb.pp("head"))?;
Some(head)
}
};
let embeddings = hiera_embeddings(cfg.channels, vb.clone())?;
let unroll = hiera_unroll()?;
let blocks = hiera_blocks(cfg, vb.pp("blocks"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&embeddings)?
.apply(&unroll)?
.apply(&blocks)?
.mean(1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn hiera(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, Some(nclasses), vb)
}
pub fn hiera_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, None, vb)
}
| candle/candle-transformers/src/models/hiera.rs/0 | {
"file_path": "candle/candle-transformers/src/models/hiera.rs",
"repo_id": "candle",
"token_count": 4445
} | 56 |
// Copyright (c) Kyutai, all rights reserved.
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
use candle::{DType, Device, IndexOp, Module, Result, StreamTensor, StreamingModule, Tensor, D};
use candle_nn::{linear_no_bias, Linear, VarBuilder};
use std::sync::Arc;
fn linear(in_d: usize, out_d: usize, bias: bool, vb: VarBuilder) -> Result<Linear> {
if bias {
candle_nn::linear(in_d, out_d, vb)
} else {
linear_no_bias(in_d, out_d, vb)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PositionalEmbedding {
Rope,
Sin,
None,
}
#[derive(Debug, Clone)]
pub struct Config {
pub d_model: usize,
pub num_heads: usize,
pub num_layers: usize,
pub causal: bool,
pub norm_first: bool,
pub bias_ff: bool,
pub bias_attn: bool,
pub layer_scale: Option<f64>,
pub positional_embedding: PositionalEmbedding,
pub use_conv_block: bool,
pub cross_attention: bool,
pub conv_kernel_size: usize,
pub use_conv_bias: bool,
pub gating: Option<candle_nn::Activation>,
pub norm: super::NormType,
pub context: usize,
pub max_period: usize,
pub max_seq_len: usize,
pub kv_repeat: usize,
pub dim_feedforward: usize,
pub conv_layout: bool,
}
#[derive(Debug, Clone)]
pub struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
span: tracing::Span,
}
impl RotaryEmbedding {
pub fn new(dim: usize, max_seq_len: usize, theta: f32, dev: &Device) -> Result<Self> {
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
span: tracing::span!(tracing::Level::TRACE, "rot"),
})
}
pub fn apply_rotary_emb(&self, qk: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_size, _nheads, seqlen, _headdim) = qk.dims4()?;
let qk_dtype = qk.dtype();
let c = self.cos.narrow(0, seqlen_offset, seqlen)?;
let s = self.sin.narrow(0, seqlen_offset, seqlen)?;
candle_nn::rotary_emb::rope_i(&qk.to_dtype(DType::F32)?, &c, &s)?.to_dtype(qk_dtype)
}
}
#[derive(Debug, Clone)]
pub struct LayerScale {
scale: Tensor,
}
impl LayerScale {
pub fn new(d_model: usize, _init: f64, vb: VarBuilder) -> Result<Self> {
let scale = vb.get(d_model, "scale")?;
Ok(Self { scale })
}
}
impl Module for LayerScale {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&self.scale)
}
}
#[derive(Debug, Clone)]
pub struct StreamingMultiheadAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
kv_repeat: usize,
num_heads: usize,
context: usize,
neg_inf: Tensor,
rope: Option<Arc<RotaryEmbedding>>,
kv_cache: candle_nn::kv_cache::RotatingKvCache,
pos: usize,
use_flash_attn: bool,
span: tracing::Span,
}
impl StreamingMultiheadAttention {
pub fn new(rope: &Option<Arc<RotaryEmbedding>>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.d_model;
let num_kv = cfg.num_heads / cfg.kv_repeat;
let kv_dim = num_kv * (embed_dim / cfg.num_heads);
let q_proj = linear(embed_dim, embed_dim, cfg.bias_attn, vb.pp("q_proj"))?;
let k_proj = linear(embed_dim, kv_dim, cfg.bias_attn, vb.pp("k_proj"))?;
let v_proj = linear(embed_dim, kv_dim, cfg.bias_attn, vb.pp("v_proj"))?;
let out_proj = linear(embed_dim, embed_dim, cfg.bias_attn, vb.pp("o_proj"))?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, vb.device())?.to_dtype(vb.dtype())?;
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
rope: rope.clone(),
kv_repeat: cfg.kv_repeat,
num_heads: cfg.num_heads,
context: cfg.context,
neg_inf,
kv_cache: candle_nn::kv_cache::RotatingKvCache::new(2, cfg.context),
pos: 0,
use_flash_attn: false,
span: tracing::span!(tracing::Level::TRACE, "mha"),
})
}
pub fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
if self.kv_repeat != 1 {
candle::bail!("only kv-repeat = 1 is supported")
}
let (b, t, hd) = xs.dims3()?;
let head_dim = hd / self.num_heads;
let q = xs
.apply(&self.q_proj)?
.reshape((b, t, self.num_heads, head_dim))?;
let k = xs
.apply(&self.k_proj)?
.reshape((b, t, self.num_heads, head_dim))?;
let v = xs
.apply(&self.v_proj)?
.reshape((b, t, self.num_heads, head_dim))?;
// qk_layer_norm = None
// kv_repeat = 1, otherwise we would need repeat_kv
let mut q = q.transpose(1, 2)?.contiguous()?; // b,h,t,d
let mut k = k.transpose(1, 2)?.contiguous()?; // b,h,k,d
let v = v.transpose(1, 2)?.contiguous()?; // b,h,k,d
if let Some(rope) = &self.rope {
q = rope.apply_rotary_emb(&q, self.pos)?;
k = rope.apply_rotary_emb(&k, self.pos)?;
}
let (k, v) = {
self.pos += k.dim(2)?;
self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?
};
// The KV cache keeps all the data at the moment, we want to trim
// down the part that comes from the cache to at most context to
// be coherent with the mask shape we provide.
let k_len = k.dim(2)?;
let k_target_len = t + usize::min(self.context, k_len - t);
let (k, v) = if k_target_len < k_len {
let k = k.narrow(2, k_len - k_target_len, k_target_len)?;
let v = v.narrow(2, k_len - k_target_len, k_target_len)?;
(k, v)
} else {
(k.clone(), v.clone())
};
let xs = if q.dtype() == DType::BF16 && self.use_flash_attn {
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let softmax_scale = 1f32 / (head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, t > 1)?.transpose(1, 2)?
} else {
let pre_ws = q.matmul(&k.t()?)?; // b,h,t,k
let pre_ws = (pre_ws * (head_dim as f64).powf(-0.5))?;
let pre_ws = match mask {
None => pre_ws,
Some(mask) => {
let mask = mask.broadcast_left((b, self.num_heads))?;
let neg_inf = self.neg_inf.broadcast_as(pre_ws.shape())?;
mask.where_cond(&neg_inf, &pre_ws)?
}
};
let ws = candle_nn::ops::softmax_last_dim(&pre_ws)?; // b,h,t,k
ws.matmul(&v)? // b,h,t,d
};
let xs = xs
.transpose(1, 2)? // b,t,h,d
.reshape((b, t, hd))?
.apply(&self.out_proj)?;
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
self.kv_cache.reset()
}
pub fn set_kv_cache(&mut self, kv_cache: candle_nn::kv_cache::RotatingKvCache) {
self.kv_cache = kv_cache
}
}
#[derive(Debug, Clone)]
pub struct StreamingMultiheadCrossAttention {
in_proj_q: Linear,
in_proj_k: Linear,
in_proj_v: Linear,
out_proj: Linear,
kv_repeat: usize,
num_heads: usize,
neg_inf: Tensor,
span: tracing::Span,
}
impl StreamingMultiheadCrossAttention {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.d_model;
let num_kv = cfg.num_heads / cfg.kv_repeat;
let kv_dim = num_kv * (embed_dim / cfg.num_heads);
let out_dim = embed_dim + 2 * kv_dim;
let in_proj_weight = vb.get((out_dim, embed_dim), "in_proj_weight")?;
let in_proj_weight_q = in_proj_weight.narrow(0, 0, embed_dim)?;
let in_proj_weight_k = in_proj_weight.narrow(0, embed_dim, kv_dim)?;
let in_proj_weight_v = in_proj_weight.narrow(0, embed_dim + kv_dim, kv_dim)?;
let (in_proj_bias_q, in_proj_bias_k, in_proj_bias_v) = if cfg.bias_attn {
let b = vb.get(out_dim, "in_proj_bias")?;
let q = b.narrow(0, 0, embed_dim)?;
let k = b.narrow(0, embed_dim, kv_dim)?;
let v = b.narrow(0, embed_dim + kv_dim, kv_dim)?;
(Some(q), Some(k), Some(v))
} else {
(None, None, None)
};
let in_proj_q = Linear::new(in_proj_weight_q, in_proj_bias_q);
let in_proj_k = Linear::new(in_proj_weight_k, in_proj_bias_k);
let in_proj_v = Linear::new(in_proj_weight_v, in_proj_bias_v);
let out_proj = linear(embed_dim, embed_dim, cfg.bias_attn, vb.pp("out_proj"))?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, vb.device())?.to_dtype(vb.dtype())?;
Ok(Self {
in_proj_q,
in_proj_k,
in_proj_v,
out_proj,
kv_repeat: cfg.kv_repeat,
num_heads: cfg.num_heads,
neg_inf,
span: tracing::span!(tracing::Level::TRACE, "mhca"),
})
}
pub fn forward(&self, xs: &Tensor, ca_src: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
if self.kv_repeat != 1 {
candle::bail!("only kv-repeat = 1 is supported")
}
let (b, t, hd) = xs.dims3()?;
let head_dim = hd / self.num_heads;
// time_dim = 1, layout: b,t,h,d
let q = xs.apply(&self.in_proj_q)?;
let k = ca_src.apply(&self.in_proj_k)?;
let v = ca_src.apply(&self.in_proj_v)?;
let (ca_b, ca_t, ca_dim) = k.dims3()?;
let q = q.reshape((b, t, self.num_heads, head_dim))?;
let k = k.reshape((ca_b, ca_t, ca_dim / head_dim, head_dim))?;
let v = v.reshape((ca_b, ca_t, ca_dim / head_dim, head_dim))?;
// qk_layer_norm = None
// kv_repeat = 1, otherwise we would need repeat_kv
let q = q.transpose(1, 2)?.contiguous()?; // b,h,t,d
let k = k.transpose(1, 2)?.contiguous()?; // b,h,k,d
let v = v.transpose(1, 2)?.contiguous()?; // b,h,k,d
let pre_ws = q.matmul(&k.t()?)?; // b,h,t,k
let pre_ws = (pre_ws * (head_dim as f64).powf(-0.5))?;
let pre_ws = match mask {
None => pre_ws,
Some(mask) => {
let mask = mask.broadcast_left((b, self.num_heads))?;
let neg_inf = self.neg_inf.broadcast_as(pre_ws.shape())?;
mask.where_cond(&neg_inf, &pre_ws)?
}
};
let ws = candle_nn::ops::softmax_last_dim(&pre_ws)?; // b,h,t,k
let xs = ws.matmul(&v)?; // b,h,t,d
let xs = xs
.transpose(1, 2)? // b,t,h,d
.reshape((b, t, hd))?
.apply(&self.out_proj)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub enum Mlp {
NoGating {
span1: tracing::Span,
linear1: Linear,
span2: tracing::Span,
linear2: Linear,
span: tracing::Span,
},
Gating {
linear_in: Linear,
linear_out: Linear,
activation: candle_nn::Activation,
span: tracing::Span,
},
}
impl Mlp {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let d_model = cfg.d_model;
let span = tracing::span!(tracing::Level::TRACE, "mlp");
match cfg.gating {
None => {
let span1 = tracing::span!(tracing::Level::TRACE, "lin1");
let span2 = tracing::span!(tracing::Level::TRACE, "lin2");
let linear1 = linear(d_model, cfg.dim_feedforward, cfg.bias_ff, vb.pp("mlp.fc1"))?;
let linear2 = linear(cfg.dim_feedforward, d_model, cfg.bias_ff, vb.pp("mlp.fc2"))?;
Ok(Self::NoGating {
linear1,
linear2,
span,
span1,
span2,
})
}
Some(activation) => {
let vb = vb.pp("gating");
let hidden = if cfg.dim_feedforward == 4 * d_model {
11 * d_model / 4
} else {
2 * cfg.dim_feedforward / 3
};
// TODO: Maybe use bias_ff here?
let linear_in = linear(d_model, 2 * hidden, false, vb.pp("linear_in"))?;
let linear_out = linear(hidden, d_model, false, vb.pp("linear_out"))?;
Ok(Self::Gating {
linear_in,
linear_out,
activation,
span,
})
}
}
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::NoGating {
linear1,
linear2,
span,
span1,
span2,
} => {
let _enter = span.enter();
let xs = {
let _enter = span1.enter();
xs.apply(linear1)?
};
let xs = xs.gelu_erf()?;
{
let _enter = span2.enter();
xs.apply(linear2)
}
}
Self::Gating {
linear_in,
linear_out,
activation,
span,
} => {
let _enter = span.enter();
let xs = xs.apply(linear_in)?;
let (b, t, _) = xs.dims3()?;
let xs = xs.reshape((b, t, 2, ()))?;
let xs = (xs.i((.., .., 0))?.apply(activation)? * xs.i((.., .., 1))?)?;
xs.apply(linear_out)
}
}
}
}
#[derive(Debug, Clone)]
pub struct RmsNorm {
pub(crate) alpha: Tensor,
pub(crate) eps: f32,
}
impl RmsNorm {
pub fn new(d_model: usize, eps: f32, vb: VarBuilder) -> Result<Self> {
let alpha = vb.get((1, 1, d_model), "alpha")?.reshape(d_model)?;
Ok(Self { alpha, eps })
}
}
impl Module for RmsNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
candle_nn::ops::rms_norm(xs, &self.alpha, self.eps)
}
}
#[derive(Debug, Clone)]
pub enum Norm {
LayerNorm(candle_nn::LayerNorm),
RmsNorm(RmsNorm),
}
impl Norm {
pub fn new(d_model: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let norm = match cfg.norm {
super::NormType::LayerNorm => {
let norm = candle_nn::layer_norm(d_model, 1e-5, vb)?;
Self::LayerNorm(norm)
}
super::NormType::RmsNorm => {
let norm = RmsNorm::new(d_model, 1e-8, vb)?;
Self::RmsNorm(norm)
}
};
Ok(norm)
}
}
impl Module for Norm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::LayerNorm(m) => m.forward(xs),
Self::RmsNorm(m) => m.forward(xs),
}
}
}
#[derive(Debug, Clone)]
pub struct StreamingTransformerLayer {
self_attn: StreamingMultiheadAttention,
mlp: Mlp,
norm1: Norm,
norm2: Norm,
layer_scale_1: Option<LayerScale>,
layer_scale_2: Option<LayerScale>,
cross_attn: Option<(candle_nn::LayerNorm, StreamingMultiheadCrossAttention)>,
norm_first: bool,
span: tracing::Span,
}
impl StreamingTransformerLayer {
pub fn new(rope: &Option<Arc<RotaryEmbedding>>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
if cfg.use_conv_block {
candle::bail!("conv-block is not supported")
}
let d_model = cfg.d_model;
let mlp = Mlp::new(cfg, vb.clone())?;
let (norm1, norm2) = match cfg.norm {
super::NormType::LayerNorm => {
let norm1 = candle_nn::layer_norm(d_model, 1e-5, vb.pp("input_layernorm"))?;
let norm2 =
candle_nn::layer_norm(d_model, 1e-5, vb.pp("post_attention_layernorm"))?;
(Norm::LayerNorm(norm1), Norm::LayerNorm(norm2))
}
super::NormType::RmsNorm => {
let norm1 = RmsNorm::new(d_model, 1e-8, vb.pp("input_rmsnorm"))?;
let norm2 = RmsNorm::new(d_model, 1e-8, vb.pp("post_attention_rmsnorm"))?;
(Norm::RmsNorm(norm1), Norm::RmsNorm(norm2))
}
};
let layer_scale_1 = match cfg.layer_scale {
None => None,
Some(ls) => {
let ls = LayerScale::new(d_model, ls, vb.pp("self_attn_layer_scale"))?;
Some(ls)
}
};
let layer_scale_2 = match cfg.layer_scale {
None => None,
Some(ls) => {
let ls = LayerScale::new(d_model, ls, vb.pp("mlp_layer_scale"))?;
Some(ls)
}
};
let self_attn = StreamingMultiheadAttention::new(rope, cfg, vb.pp("self_attn"))?;
let cross_attn = if cfg.cross_attention {
let norm_cross = candle_nn::layer_norm(cfg.d_model, 1e-5, vb.pp("norm_cross"))?;
let cross_attn = StreamingMultiheadCrossAttention::new(cfg, vb.pp("cross_attention"))?;
Some((norm_cross, cross_attn))
} else {
None
};
Ok(Self {
self_attn,
mlp,
norm1,
norm2,
layer_scale_1,
layer_scale_2,
cross_attn,
norm_first: cfg.norm_first,
span: tracing::span!(tracing::Level::TRACE, "transformer-layer"),
})
}
pub fn forward(
&mut self,
xs: &Tensor,
ca_src: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
if !self.norm_first {
candle::bail!("only norm_first = true is supported")
}
let norm1 = xs.apply(&self.norm1)?;
let xs = (xs
+ self
.self_attn
.forward(&norm1, mask)?
.apply(&self.layer_scale_1.as_ref())?)?;
let xs = match (&self.cross_attn, ca_src) {
(Some((norm_cross, cross_attn)), Some(ca_src)) => {
let residual = &xs;
let xs = xs.apply(norm_cross)?;
(residual + cross_attn.forward(&xs, ca_src, None)?)?
}
_ => xs,
};
let xs = (&xs
+ xs.apply(&self.norm2)?
.apply(&self.mlp)?
.apply(&self.layer_scale_2.as_ref()))?;
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
self.self_attn.reset_kv_cache()
}
pub fn set_kv_cache(&mut self, kv_cache: candle_nn::kv_cache::RotatingKvCache) {
self.self_attn.set_kv_cache(kv_cache)
}
}
#[derive(Debug, Clone)]
pub struct StreamingTransformer {
layers: Vec<StreamingTransformerLayer>,
positional_embedding: PositionalEmbedding,
max_period: usize,
}
impl StreamingTransformer {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_l = vb.pp("layers");
let rope = match cfg.positional_embedding {
PositionalEmbedding::Rope => {
let rope = RotaryEmbedding::new(
cfg.d_model / cfg.num_heads,
cfg.max_seq_len,
cfg.max_period as f32,
vb.device(),
)?;
Some(Arc::new(rope))
}
PositionalEmbedding::Sin | PositionalEmbedding::None => None,
};
let mut layers = Vec::with_capacity(cfg.num_layers);
for layer_idx in 0..cfg.num_layers {
let layer = StreamingTransformerLayer::new(&rope, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
Ok(Self {
layers,
positional_embedding: cfg.positional_embedding,
max_period: cfg.max_period,
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
self.forward_ca(xs, None)
}
pub fn forward_ca(&mut self, xs: &Tensor, ca_src: Option<&Tensor>) -> Result<Tensor> {
let (_b, t, c) = xs.dims3()?;
let pos = self.layers[0].self_attn.kv_cache.current_seq_len();
let mask = self.layers[0]
.self_attn
.kv_cache
.attn_mask(t, xs.device())?;
let mut xs = match self.positional_embedding {
PositionalEmbedding::Rope | PositionalEmbedding::None => xs.clone(),
PositionalEmbedding::Sin => {
let dev = xs.device();
let theta = self.max_period as f32;
let half_dim = c / 2;
let positions = Tensor::arange(pos as u32, (pos + t) as u32, dev)?
.unsqueeze(1)?
.to_dtype(DType::F32)?;
let inv_freq: Vec<_> = (0..half_dim)
.map(|i| 1f32 / theta.powf(i as f32 / (half_dim - 1) as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let freqs = positions.broadcast_mul(&inv_freq)?;
let pos_emb =
Tensor::cat(&[freqs.cos()?, freqs.sin()?], D::Minus1)?.to_dtype(xs.dtype())?;
xs.broadcast_add(&pos_emb)?
}
};
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, ca_src, mask.as_ref())?;
}
Ok(xs)
}
pub fn copy_state(&mut self, from: &Self) -> Result<()> {
if self.layers.len() != from.layers.len() {
candle::bail!("cannot copy kv-caches as the transformers have different depths")
}
self.layers
.iter_mut()
.zip(from.layers.iter())
.for_each(|(v, w)| v.set_kv_cache(w.self_attn.kv_cache.clone()));
Ok(())
}
}
impl StreamingModule for StreamingTransformer {
fn reset_state(&mut self) {
self.layers.iter_mut().for_each(|v| v.reset_kv_cache())
}
fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> {
match xs.as_option() {
None => Ok(StreamTensor::empty()),
Some(xs) => Ok(StreamTensor::from_tensor(self.forward(xs)?)),
}
}
}
#[derive(Debug, Clone)]
pub struct ProjectedTransformer {
transformer: StreamingTransformer,
input_proj: Option<Linear>,
output_projs: Vec<Option<Linear>>,
conv_layout: bool,
span: tracing::Span,
}
impl ProjectedTransformer {
pub fn new(
input_dim: usize,
output_dims: &[usize],
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let transformer = StreamingTransformer::new(cfg, vb.clone())?;
let input_proj = if input_dim == cfg.d_model {
None
} else {
let l = linear_no_bias(input_dim, cfg.d_model, vb.pp("input_proj"))?;
Some(l)
};
let mut output_projs = Vec::with_capacity(output_dims.len());
let vb_o = vb.pp("output_projs");
for (i, &output_dim) in output_dims.iter().enumerate() {
let output_proj = if output_dim == cfg.d_model {
None
} else {
let l = linear_no_bias(cfg.d_model, output_dim, vb_o.pp(i))?;
Some(l)
};
output_projs.push(output_proj)
}
Ok(Self {
transformer,
input_proj,
output_projs,
conv_layout: cfg.conv_layout,
span: tracing::span!(tracing::Level::TRACE, "proj-transformer"),
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Vec<Tensor>> {
let _enter = self.span.enter();
let xs = if self.conv_layout {
xs.transpose(1, 2)?
} else {
xs.clone()
};
let xs = xs.apply(&self.input_proj.as_ref())?;
let xs = self.transformer.forward(&xs)?;
let mut ys = Vec::with_capacity(self.output_projs.len());
for output_proj in self.output_projs.iter() {
let ys_ = xs.apply(&output_proj.as_ref())?;
let ys_ = if self.conv_layout {
ys_.transpose(1, 2)?
} else {
ys_
};
ys.push(ys_)
}
Ok(ys)
}
}
impl StreamingModule for ProjectedTransformer {
fn reset_state(&mut self) {
self.transformer.reset_state()
}
fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> {
let xs = xs.apply(&|x: &Tensor| {
if self.conv_layout {
x.transpose(1, 2)
} else {
Ok(x.clone())
}
})?;
let xs = xs.apply(&self.input_proj.as_ref())?;
let xs = self.transformer.step(&xs)?;
let ys = xs.apply(&self.output_projs[0].as_ref())?;
ys.apply(&|y: &Tensor| {
if self.conv_layout {
y.transpose(1, 2)
} else {
Ok(y.clone())
}
})
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
| candle/candle-transformers/src/models/mimi/transformer.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mimi/transformer.rs",
"repo_id": "candle",
"token_count": 14210
} | 57 |
/// Mistral LLM, https://github.com/mistralai/mistral-src
use crate::models::{
mistral::Config,
with_tracing::{linear_no_bias, Linear, RmsNorm},
};
use crate::utils::repeat_kv;
use candle::{DType, Device, Module, Result, Tensor};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let rope_theta = cfg.rope_theta as f32;
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(q, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(k, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let key_states = repeat_kv(key_states, self.num_kv_groups)?;
let value_states = repeat_kv(value_states, self.num_kv_groups)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&value_states)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
pub cfg: Config,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("norm"))?;
Ok(Self {
embed_tokens,
layers,
norm,
cfg: cfg.clone(),
})
}
// Attn mask used to mask out padding tokens
pub fn forward(
&mut self,
attn_mask: &Tensor,
input_ids: &Tensor,
dtype: DType,
) -> Result<Tensor> {
let mut xs = self.embed_tokens.forward(input_ids)?;
// Expand to 4d mask for sdpa
let attn_mask = prepare_4d_attention_mask(attn_mask, dtype, None)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, Some(&attn_mask), 0)?;
}
// Return hiddens instead of logits
xs.apply(&self.norm)
}
}
fn prepare_4d_attention_mask(
mask: &Tensor,
dtype: DType,
tgt_len: Option<usize>,
) -> Result<Tensor> {
let bsz = mask.dims()[0];
let src_len = mask.dims()[1];
let tgt_len = tgt_len.unwrap_or(src_len);
let expanded_mask = mask
.unsqueeze(1)?
.unsqueeze(2)?
.expand((bsz, 1, tgt_len, src_len))?
.to_dtype(dtype)?;
let inverted_mask = (1.0 - expanded_mask)?;
(inverted_mask * get_dtype_min_val(dtype))?.to_dtype(dtype)
}
fn get_dtype_min_val(dtype: DType) -> f64 {
match dtype {
DType::F32 => f32::MIN as f64,
DType::F64 => f64::MIN,
_ => panic!("Unsupported data type"),
}
}
| candle/candle-transformers/src/models/nvembed_v2/embedding.rs/0 | {
"file_path": "candle/candle-transformers/src/models/nvembed_v2/embedding.rs",
"repo_id": "candle",
"token_count": 4768
} | 58 |
use candle::{DType, IndexOp, Result, Tensor};
use candle_nn::{Module, VarBuilder};
use super::image_encoder::ImageEncoderViT;
use super::mask_decoder::MaskDecoder;
use super::prompt_encoder::PromptEncoder;
use super::tiny_vit::{tiny_vit_5m, TinyViT};
const PROMPT_EMBED_DIM: usize = 256;
pub const IMAGE_SIZE: usize = 1024;
const VIT_PATCH_SIZE: usize = 16;
const PRED_IOU_THRESH: f32 = 0.88;
const STABILITY_SCORE_OFFSET: f32 = 1.0;
const STABILITY_SCORE_THRESHOLD: f32 = 0.95;
const MODEL_MASK_THRESHOLD: f32 = 0.0;
const CROP_NMS_THRESH: f32 = 0.7;
#[derive(Debug)]
enum ImageEncoder {
Original(Box<ImageEncoderViT>),
TinyViT(Box<TinyViT>),
}
impl Module for ImageEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Original(vit) => vit.forward(xs),
Self::TinyViT(vit) => vit.forward(xs),
}
}
}
#[derive(Debug)]
pub struct Sam {
image_encoder: ImageEncoder,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: Tensor,
pixel_std: Tensor,
}
impl Sam {
pub fn new(
encoder_embed_dim: usize,
encoder_depth: usize,
encoder_num_heads: usize,
encoder_global_attn_indexes: &[usize],
vb: VarBuilder,
) -> Result<Self> {
let image_embedding_size = IMAGE_SIZE / VIT_PATCH_SIZE;
let image_encoder = ImageEncoderViT::new(
IMAGE_SIZE,
VIT_PATCH_SIZE,
3,
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
PROMPT_EMBED_DIM,
/* qkv_bias */ true,
/* use_rel_pos */ true,
/* use_abs_pos */ true,
/* window_size */ 14,
/* global_attn_indexes */ encoder_global_attn_indexes,
vb.pp("image_encoder"),
)?;
let prompt_encoder = PromptEncoder::new(
PROMPT_EMBED_DIM,
(image_embedding_size, image_embedding_size),
(IMAGE_SIZE, IMAGE_SIZE),
16,
vb.pp("prompt_encoder"),
)?;
let mask_decoder = MaskDecoder::new(
PROMPT_EMBED_DIM,
/* num_multitask_outputs */ 3,
/* iou_head_depth */ 3,
/* iou_head_hidden_dim */ 256,
vb.pp("mask_decoder"),
)?;
let pixel_mean =
Tensor::new(&[123.675f32, 116.28, 103.53], vb.device())?.reshape((3, 1, 1))?;
let pixel_std =
Tensor::new(&[58.395f32, 57.12, 57.375], vb.device())?.reshape((3, 1, 1))?;
Ok(Self {
image_encoder: ImageEncoder::Original(image_encoder.into()),
prompt_encoder,
mask_decoder,
pixel_std,
pixel_mean,
})
}
pub fn new_tiny(vb: VarBuilder) -> Result<Self> {
let image_embedding_size = IMAGE_SIZE / VIT_PATCH_SIZE;
let image_encoder = tiny_vit_5m(vb.pp("image_encoder"))?;
let prompt_encoder = PromptEncoder::new(
PROMPT_EMBED_DIM,
(image_embedding_size, image_embedding_size),
(IMAGE_SIZE, IMAGE_SIZE),
16,
vb.pp("prompt_encoder"),
)?;
let mask_decoder = MaskDecoder::new(
PROMPT_EMBED_DIM,
/* num_multitask_outputs */ 3,
/* iou_head_depth */ 3,
/* iou_head_hidden_dim */ 256,
vb.pp("mask_decoder"),
)?;
let pixel_mean =
Tensor::new(&[123.675f32, 116.28, 103.53], vb.device())?.reshape((3, 1, 1))?;
let pixel_std =
Tensor::new(&[58.395f32, 57.12, 57.375], vb.device())?.reshape((3, 1, 1))?;
Ok(Self {
image_encoder: ImageEncoder::TinyViT(image_encoder.into()),
prompt_encoder,
mask_decoder,
pixel_std,
pixel_mean,
})
}
pub fn embeddings(&self, img: &Tensor) -> Result<Tensor> {
let img = self.preprocess(img)?.unsqueeze(0)?;
self.image_encoder.forward(&img)
}
pub fn forward(
&self,
img: &Tensor,
points: &[(f64, f64, bool)],
multimask_output: bool,
) -> Result<(Tensor, Tensor)> {
let (_c, original_h, original_w) = img.dims3()?;
let img = self.preprocess(img)?.unsqueeze(0)?;
let img_embeddings = self.image_encoder.forward(&img)?;
let (low_res_mask, iou) = self.forward_for_embeddings(
&img_embeddings,
original_h,
original_w,
points,
multimask_output,
)?;
let mask = low_res_mask
.upsample_nearest2d(IMAGE_SIZE, IMAGE_SIZE)?
.get(0)?
.i((.., ..original_h, ..original_w))?;
Ok((mask, iou))
}
/// Generate the mask and IOU predictions from some image embeddings and prompt.
///
/// The prompt is specified as a list of points `(x, y, b)`. `x` and `y` are the point
/// coordinates (between 0 and 1) and `b` is `true` for points that should be part of the mask
/// and `false` for points that should be part of the background and so excluded from the mask.
pub fn forward_for_embeddings(
&self,
img_embeddings: &Tensor,
original_h: usize,
original_w: usize,
points: &[(f64, f64, bool)],
multimask_output: bool,
) -> Result<(Tensor, Tensor)> {
let image_pe = self.prompt_encoder.get_dense_pe()?;
let points = if points.is_empty() {
None
} else {
let n_points = points.len();
let xys = points
.iter()
.flat_map(|(x, y, _b)| {
let x = (*x as f32) * (original_w as f32);
let y = (*y as f32) * (original_h as f32);
[x, y]
})
.collect::<Vec<_>>();
let labels = points
.iter()
.map(|(_x, _y, b)| if *b { 1f32 } else { 0f32 })
.collect::<Vec<_>>();
let points = Tensor::from_vec(xys, (1, n_points, 2), img_embeddings.device())?;
let labels = Tensor::from_vec(labels, (1, n_points), img_embeddings.device())?;
Some((points, labels))
};
let points = points.as_ref().map(|xy| (&xy.0, &xy.1));
let (sparse_prompt_embeddings, dense_prompt_embeddings) =
self.prompt_encoder.forward(points, None, None)?;
self.mask_decoder.forward(
img_embeddings,
&image_pe,
&sparse_prompt_embeddings,
&dense_prompt_embeddings,
multimask_output,
)
}
pub fn unpreprocess(&self, img: &Tensor) -> Result<Tensor> {
let img = img
.broadcast_mul(&self.pixel_std)?
.broadcast_add(&self.pixel_mean)?;
img.maximum(&img.zeros_like()?)?
.minimum(&(img.ones_like()? * 255.)?)
}
pub fn preprocess(&self, img: &Tensor) -> Result<Tensor> {
let (_c, h, w) = img.dims3()?;
let img = img
.to_dtype(DType::F32)?
.broadcast_sub(&self.pixel_mean)?
.broadcast_div(&self.pixel_std)?;
if h > IMAGE_SIZE || w > IMAGE_SIZE {
candle::bail!("image is too large ({w}, {h}), maximum size {IMAGE_SIZE}")
}
let img = img.pad_with_zeros(1, 0, IMAGE_SIZE - h)?;
img.pad_with_zeros(2, 0, IMAGE_SIZE - w)
}
fn process_crop(
&self,
img: &Tensor,
cb: CropBox,
point_grids: &[(f64, f64)],
) -> Result<Vec<crate::object_detection::Bbox<Tensor>>> {
// Crop the image and calculate embeddings.
let img = img.i((.., cb.y0..cb.y1, cb.x0..cb.x1))?;
let img = self.preprocess(&img)?.unsqueeze(0)?;
let img_embeddings = self.image_encoder.forward(&img)?;
let crop_w = cb.x1 - cb.x0;
let crop_h = cb.y1 - cb.y0;
// Generate masks for this crop.
let image_pe = self.prompt_encoder.get_dense_pe()?;
let points = point_grids
.iter()
.map(|&(x, y)| vec![x as f32 * crop_w as f32, y as f32 * crop_h as f32])
.collect::<Vec<_>>();
let mut bboxes = Vec::new();
for points in points.chunks(64) {
// Run the model on this batch.
let points_len = points.len();
let in_points = Tensor::new(points.to_vec(), img.device())?.unsqueeze(1)?;
let in_labels = Tensor::ones((points_len, 1), DType::F32, img.device())?;
let (sparse_prompt_embeddings, dense_prompt_embeddings) =
self.prompt_encoder
.forward(Some((&in_points, &in_labels)), None, None)?;
let (low_res_mask, iou_predictions) = self.mask_decoder.forward(
&img_embeddings,
&image_pe,
&sparse_prompt_embeddings,
&dense_prompt_embeddings,
/* multimask_output */ true,
)?;
let low_res_mask = low_res_mask.flatten(0, 1)?;
let iou_predictions = iou_predictions.flatten(0, 1)?.to_vec1::<f32>()?;
let dev = low_res_mask.device();
for (i, iou) in iou_predictions.iter().enumerate() {
// Filter by predicted IoU.
if *iou < PRED_IOU_THRESH {
continue;
}
let low_res_mask = low_res_mask.get(i)?;
// Calculate stability score.
let bound = Tensor::new(MODEL_MASK_THRESHOLD + STABILITY_SCORE_OFFSET, dev)?
.broadcast_as(low_res_mask.shape())?;
let intersections = low_res_mask
.ge(&bound)?
.to_dtype(DType::F32)?
.sum_all()?
.to_vec0::<f32>()?;
let bound = Tensor::new(MODEL_MASK_THRESHOLD - STABILITY_SCORE_OFFSET, dev)?
.broadcast_as(low_res_mask.shape())?;
let unions = low_res_mask
.ge(&bound)?
.to_dtype(DType::F32)?
.sum_all()?
.to_vec0::<f32>()?;
let stability_score = intersections / unions;
if stability_score < STABILITY_SCORE_THRESHOLD {
continue;
}
// Threshold masks and calculate boxes.
let low_res_mask = low_res_mask
.ge(&Tensor::new(0f32, dev)?.broadcast_as(low_res_mask.shape())?)?
.to_dtype(DType::U32)?;
let low_res_mask_per_x = low_res_mask.sum(0)?.to_vec1::<u32>()?;
let low_res_mask_per_y = low_res_mask.sum(1)?.to_vec1::<u32>()?;
let min_max_x = min_max_indexes(&low_res_mask_per_x);
let min_max_y = min_max_indexes(&low_res_mask_per_y);
if let Some(((x0, x1), (y0, y1))) = min_max_x.zip(min_max_y) {
let bbox = crate::object_detection::Bbox {
xmin: x0 as f32,
ymin: y0 as f32,
xmax: x1 as f32,
ymax: y1 as f32,
confidence: *iou,
data: low_res_mask,
};
bboxes.push(bbox);
}
// TODO:
// Filter boxes that touch crop boundaries
// Compress to RLE.
}
}
let mut bboxes = vec![bboxes];
// Remove duplicates within this crop.
crate::object_detection::non_maximum_suppression(&mut bboxes, CROP_NMS_THRESH);
// TODO: Return to the original image frame.
Ok(bboxes.remove(0))
}
pub fn generate_masks(
&self,
img: &Tensor,
points_per_side: usize,
crop_n_layer: usize,
crop_overlap_ratio: f64,
crop_n_points_downscale_factor: usize,
) -> Result<Vec<crate::object_detection::Bbox<Tensor>>> {
let (_c, h, w) = img.dims3()?;
let point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layer,
crop_n_points_downscale_factor,
);
let crop_boxes = generate_crop_boxes((h, w), crop_n_layer, crop_overlap_ratio);
let mut bboxes = Vec::new();
for crop_box in crop_boxes.into_iter() {
let layer_idx = crop_box.layer_idx;
let b = self.process_crop(img, crop_box, &point_grids[layer_idx])?;
bboxes.extend(b)
}
// TODO: remove duplicates
Ok(bboxes)
}
}
// Return the first and last indexes i for which values[i] > 0
fn min_max_indexes(values: &[u32]) -> Option<(usize, usize)> {
let (mut min_i, mut max_i) = (usize::MAX, usize::MIN);
for (i, &s) in values.iter().enumerate() {
if s == 0 {
continue;
}
min_i = usize::min(i, min_i);
max_i = usize::max(i, max_i);
}
if max_i < min_i {
None
} else {
Some((min_i, max_i))
}
}
#[derive(Debug)]
struct CropBox {
x0: usize,
y0: usize,
x1: usize,
y1: usize,
layer_idx: usize,
}
impl CropBox {
fn new(x0: usize, y0: usize, x1: usize, y1: usize, layer_idx: usize) -> Self {
Self {
x0,
y0,
x1,
y1,
layer_idx,
}
}
}
fn generate_crop_boxes(
(im_h, im_w): (usize, usize),
n_layers: usize,
overlap_ratio: f64,
) -> Vec<CropBox> {
fn crop_len(orig_len: usize, n_crops: usize, overlap: usize) -> usize {
f64::ceil((overlap * (n_crops - 1) + orig_len) as f64 / n_crops as f64) as usize
}
let short_side = usize::min(im_h, im_w);
let mut crop_boxes = Vec::new();
// Original image.
crop_boxes.push(CropBox::new(0, 0, im_w, im_h, 0));
for layer_idx in 1..=n_layers {
let n_crops_per_side = 1 << layer_idx;
let overlap = (overlap_ratio * short_side as f64 * 2. / n_crops_per_side as f64) as usize;
let crop_w = crop_len(im_w, n_crops_per_side, overlap);
let crop_h = crop_len(im_w, n_crops_per_side, overlap);
for i_x in 0..n_crops_per_side {
let x0 = (crop_w - overlap) * i_x;
for i_y in 0..n_crops_per_side {
let y0 = (crop_h - overlap) * i_y;
let x1 = usize::min(im_w, x0 + crop_w);
let y1 = usize::min(im_h, y0 + crop_h);
crop_boxes.push(CropBox::new(x0, y0, x1, y1, layer_idx));
}
}
}
crop_boxes
}
// Generates a 2D grid of points evenly spaced in [0,1]x[0,1].
fn build_point_grid(n_per_side: usize) -> Vec<(f64, f64)> {
let offset = 1f64 / (2 * n_per_side) as f64;
let mut points = Vec::with_capacity(n_per_side * n_per_side);
for i_x in 0..n_per_side {
let x = offset + i_x as f64 / n_per_side as f64;
for i_y in 0..n_per_side {
let y = offset + i_y as f64 / n_per_side as f64;
points.push((x, y))
}
}
points
}
fn build_all_layer_point_grids(
n_per_side: usize,
n_layers: usize,
scale_per_layer: usize,
) -> Vec<Vec<(f64, f64)>> {
let mut points_by_layer = Vec::with_capacity(n_layers + 1);
for i in 0..=n_layers {
let n_points = n_per_side / scale_per_layer.pow(i as u32);
points_by_layer.push(build_point_grid(n_points))
}
points_by_layer
}
| candle/candle-transformers/src/models/segment_anything/sam.rs/0 | {
"file_path": "candle/candle-transformers/src/models/segment_anything/sam.rs",
"repo_id": "candle",
"token_count": 8456
} | 59 |
//! # UniPC Scheduler
//!
//! UniPC is a training-free framework designed for the fast sampling of diffusion models, which consists of a
//! corrector (UniC) and a predictor (UniP) that share a unified analytical form and support arbitrary orders.
//!
//! UniPC is by design model-agnostic, supporting pixel-space/latent-space DPMs on unconditional/conditional
//! sampling. It can also be applied to both noise prediction and data prediction models. Compared with prior
//! methods, UniPC converges faster thanks to the increased order of accuracy. Both quantitative and qualitative
//! results show UniPC can improve sampling quality, especially at very low step counts (5~10).
//!
//! For more information, see the original publication:
//! UniPC: A Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models, W. Zhao et al, 2023.
//! https://arxiv.org/abs/2302.04867
//!
//! This work is based largely on UniPC implementation from the diffusers python package:
//! https://raw.githubusercontent.com/huggingface/diffusers/e8aacda762e311505ba05ae340af23b149e37af3/src/diffusers/schedulers/scheduling_unipc_multistep.py
use std::collections::HashSet;
use std::ops::Neg;
use super::schedulers::PredictionType;
use super::{
schedulers::{Scheduler, SchedulerConfig},
utils::{interp, linspace},
};
use candle::{Error, IndexOp, Result, Tensor};
#[derive(Debug, Clone, Copy)]
pub enum SigmaSchedule {
Karras(KarrasSigmaSchedule),
Exponential(ExponentialSigmaSchedule),
}
impl SigmaSchedule {
fn sigma_t(&self, t: f64) -> f64 {
match self {
Self::Karras(x) => x.sigma_t(t),
Self::Exponential(x) => x.sigma_t(t),
}
}
}
impl Default for SigmaSchedule {
fn default() -> Self {
Self::Karras(KarrasSigmaSchedule::default())
}
}
#[derive(Debug, Clone, Copy)]
pub struct KarrasSigmaSchedule {
pub sigma_min: f64,
pub sigma_max: f64,
pub rho: f64,
}
impl KarrasSigmaSchedule {
fn sigma_t(&self, t: f64) -> f64 {
let (min_inv_rho, max_inv_rho) = (
self.sigma_min.powf(1.0 / self.rho),
self.sigma_max.powf(1.0 / self.rho),
);
(max_inv_rho + ((1.0 - t) * (min_inv_rho - max_inv_rho))).powf(self.rho)
}
}
impl Default for KarrasSigmaSchedule {
fn default() -> Self {
Self {
sigma_max: 10.0,
sigma_min: 0.1,
rho: 4.0,
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct ExponentialSigmaSchedule {
sigma_min: f64,
sigma_max: f64,
}
impl ExponentialSigmaSchedule {
fn sigma_t(&self, t: f64) -> f64 {
(t * (self.sigma_max.ln() - self.sigma_min.ln()) + self.sigma_min.ln()).exp()
}
}
impl Default for ExponentialSigmaSchedule {
fn default() -> Self {
Self {
sigma_max: 80.0,
sigma_min: 0.1,
}
}
}
#[derive(Debug, Default, Clone, Copy)]
pub enum SolverType {
#[default]
Bh1,
Bh2,
}
#[derive(Debug, Default, Clone, Copy)]
pub enum AlgorithmType {
#[default]
DpmSolverPlusPlus,
SdeDpmSolverPlusPlus,
}
#[derive(Debug, Default, Clone, Copy)]
pub enum FinalSigmasType {
#[default]
Zero,
SigmaMin,
}
#[derive(Debug, Clone)]
pub enum TimestepSchedule {
/// Timesteps will be determined by interpolation of sigmas
FromSigmas,
/// Timesteps will be separated by regular intervals
Linspace,
}
impl TimestepSchedule {
fn timesteps(
&self,
sigma_schedule: &SigmaSchedule,
num_inference_steps: usize,
num_training_steps: usize,
) -> Result<Vec<usize>> {
match self {
Self::FromSigmas => {
let sigmas: Tensor = linspace(1., 0., num_inference_steps)?
.to_vec1()?
.into_iter()
.map(|t| sigma_schedule.sigma_t(t))
.collect::<Vec<f64>>()
.try_into()?;
let log_sigmas = sigmas.log()?.to_vec1::<f64>()?;
let timesteps = interp(
&log_sigmas.iter().copied().rev().collect::<Vec<_>>(),
&linspace(
log_sigmas[log_sigmas.len() - 1] - 0.001,
log_sigmas[0] + 0.001,
num_inference_steps,
)?
.to_vec1::<f64>()?,
&linspace(0., num_training_steps as f64, num_inference_steps)?
.to_vec1::<f64>()?,
)
.into_iter()
.map(|f| (num_training_steps - 1) - (f as usize))
.collect::<Vec<_>>();
Ok(timesteps)
}
Self::Linspace => {
Ok(
linspace((num_training_steps - 1) as f64, 0., num_inference_steps)?
.to_vec1::<f64>()?
.into_iter()
.map(|f| f as usize)
.collect(),
)
}
}
}
}
#[derive(Debug, Clone)]
pub enum CorrectorConfiguration {
Disabled,
Enabled { skip_steps: HashSet<usize> },
}
impl Default for CorrectorConfiguration {
fn default() -> Self {
Self::Enabled {
skip_steps: [0, 1, 2].into_iter().collect(),
}
}
}
impl CorrectorConfiguration {
pub fn new(disabled_steps: impl IntoIterator<Item = usize>) -> Self {
Self::Enabled {
skip_steps: disabled_steps.into_iter().collect(),
}
}
}
#[derive(Debug, Clone)]
pub struct UniPCSchedulerConfig {
/// Configure the UNIC corrector. By default it is disabled
pub corrector: CorrectorConfiguration,
/// Determines how sigma relates to a given timestep
pub sigma_schedule: SigmaSchedule,
/// Determines the points
pub timestep_schedule: TimestepSchedule,
/// The solver order which can be `1` or higher. It is recommended to use `solver_order=2` for guided
/// sampling, and `solver_order=3` for unconditional sampling.
pub solver_order: usize,
/// Prediction type of the scheduler function
pub prediction_type: PredictionType,
pub num_training_timesteps: usize,
/// Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
/// as Stable Diffusion.
pub thresholding: bool,
/// The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
pub dynamic_thresholding_ratio: f64,
/// The threshold value for dynamic thresholding.
pub sample_max_value: f64,
pub solver_type: SolverType,
/// Whether to use lower-order solvers in the final steps.
pub lower_order_final: bool,
}
impl Default for UniPCSchedulerConfig {
fn default() -> Self {
Self {
corrector: Default::default(),
timestep_schedule: TimestepSchedule::FromSigmas,
sigma_schedule: SigmaSchedule::Karras(Default::default()),
prediction_type: PredictionType::Epsilon,
num_training_timesteps: 1000,
solver_order: 2,
thresholding: false,
dynamic_thresholding_ratio: 0.995,
sample_max_value: 1.0,
solver_type: SolverType::Bh1,
lower_order_final: true,
}
}
}
impl SchedulerConfig for UniPCSchedulerConfig {
fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>> {
Ok(Box::new(EdmDpmMultistepScheduler::new(
self.clone(),
inference_steps,
)?))
}
}
struct State {
model_outputs: Vec<Option<Tensor>>,
lower_order_nums: usize,
order: usize,
last_sample: Option<Tensor>,
}
impl State {
fn new(solver_order: usize) -> Self {
Self {
model_outputs: vec![None; solver_order],
lower_order_nums: 0,
order: 0,
last_sample: None,
}
}
fn lower_order_nums(&self) -> usize {
self.lower_order_nums
}
fn update_lower_order_nums(&mut self, n: usize) {
self.lower_order_nums = n;
}
fn model_outputs(&self) -> &[Option<Tensor>] {
self.model_outputs.as_slice()
}
fn update_model_output(&mut self, idx: usize, output: Option<Tensor>) {
self.model_outputs[idx] = output;
}
fn last_sample(&self) -> Option<&Tensor> {
self.last_sample.as_ref()
}
fn update_last_sample(&mut self, sample: Tensor) {
let _ = self.last_sample.replace(sample);
}
fn order(&self) -> usize {
self.order
}
fn update_order(&mut self, order: usize) {
self.order = order;
}
}
pub struct EdmDpmMultistepScheduler {
schedule: Schedule,
config: UniPCSchedulerConfig,
state: State,
}
impl EdmDpmMultistepScheduler {
pub fn new(config: UniPCSchedulerConfig, num_inference_steps: usize) -> Result<Self> {
let schedule = Schedule::new(
config.timestep_schedule.clone(),
config.sigma_schedule,
num_inference_steps,
config.num_training_timesteps,
)?;
Ok(Self {
schedule,
state: State::new(config.solver_order),
config,
})
}
fn step_index(&self, timestep: usize) -> usize {
let index_candidates = self
.schedule
.timesteps()
.iter()
.enumerate()
.filter(|(_, t)| (*t == ×tep))
.map(|(i, _)| i)
.collect::<Vec<_>>();
match index_candidates.len() {
0 => 0,
1 => index_candidates[0],
_ => index_candidates[1],
}
}
fn timestep(&self, step_idx: usize) -> usize {
self.schedule
.timesteps()
.get(step_idx)
.copied()
.unwrap_or(0)
}
fn convert_model_output(
&self,
model_output: &Tensor,
sample: &Tensor,
timestep: usize,
) -> Result<Tensor> {
let (alpha_t, sigma_t) = (
self.schedule.alpha_t(timestep),
self.schedule.sigma_t(timestep),
);
let x0_pred = match self.config.prediction_type {
PredictionType::Epsilon => ((sample - (model_output * sigma_t))? / alpha_t)?,
PredictionType::Sample => model_output.clone(),
PredictionType::VPrediction => ((alpha_t * sample)? - (sigma_t * model_output)?)?,
};
if self.config.thresholding {
self.threshold_sample(x0_pred)
} else {
Ok(x0_pred)
}
}
fn threshold_sample(&self, sample: Tensor) -> Result<Tensor> {
let shape = sample.shape().clone().into_dims();
let v = sample
.abs()?
.reshape((shape[0], shape[1] * shape[2..].iter().product::<usize>()))?
.to_dtype(candle::DType::F64)?
.to_vec2::<f64>()?;
let q = stats::Quantile::new(self.config.dynamic_thresholding_ratio)
.with_samples(v.into_iter().flatten());
let (threshold, max) = (q.quantile().max(self.config.sample_max_value), q.max());
sample.clamp(-threshold, threshold)? / (threshold / max).sqrt().min(1.)
}
fn multistep_uni_p_bh_update(&self, sample: &Tensor, timestep: usize) -> Result<Tensor> {
let step_index = self.step_index(timestep);
let ns = &self.schedule;
let model_outputs = self.state.model_outputs();
let Some(m0) = &model_outputs[model_outputs.len() - 1] else {
return Err(Error::Msg(
"Expected model output for predictor update".to_string(),
));
};
let (t0, tt) = (timestep, self.timestep(self.step_index(timestep) + 1));
let (sigma_t, sigma_s0) = (ns.sigma_t(tt), ns.sigma_t(t0));
let (alpha_t, _alpha_s0) = (ns.alpha_t(tt), ns.alpha_t(t0));
let (lambda_t, lambda_s0) = (ns.lambda_t(tt), ns.lambda_t(t0));
let h = lambda_t - lambda_s0;
let device = sample.device();
let (mut rks, mut d1s) = (vec![], vec![]);
for i in 1..self.state.order() {
let ti = self.timestep(step_index.saturating_sub(i + 1));
let Some(mi) = model_outputs
.get(model_outputs.len().saturating_sub(i + 1))
.into_iter()
.flatten()
.next()
else {
return Err(Error::Msg(
"Expected model output for predictor update".to_string(),
));
};
let (alpha_si, sigma_si) = (ns.alpha_t(ti), ns.sigma_t(ti));
let lambda_si = alpha_si.ln() - sigma_si.ln();
let rk = (lambda_si - lambda_s0) / h;
rks.push(rk);
d1s.push(((mi - m0)? / rk)?);
}
rks.push(1.0);
let rks = Tensor::new(rks, device)?;
let (mut r, mut b) = (vec![], vec![]);
let hh = h.neg();
let h_phi_1 = hh.exp_m1();
let mut h_phi_k = h_phi_1 / hh - 1.;
let mut factorial_i = 1.;
let b_h = match self.config.solver_type {
SolverType::Bh1 => hh,
SolverType::Bh2 => hh.exp_m1(),
};
for i in 1..self.state.order() + 1 {
r.push(rks.powf(i as f64 - 1.)?);
b.push(h_phi_k * factorial_i / b_h);
factorial_i = i as f64 + 1.;
h_phi_k = h_phi_k / hh - 1. / factorial_i;
}
let (r, b) = (Tensor::stack(&r, 0)?, Tensor::new(b, device)?);
let (d1s, rhos_p) = match d1s.len() {
0 => (None, None),
_ => {
let rhos_p = match self.state.order() {
2 => Tensor::new(&[0.5f64], m0.device())?.to_dtype(m0.dtype())?,
_ => {
let ((r1, r2), b1) = (r.dims2()?, b.dims1()?);
let inverse = linalg::inverse(&r.i((..(r1 - 1), ..(r2 - 1)))?)?;
let b = b.i(..(b1 - 1))?;
b.broadcast_mul(&inverse)?.sum(1)?.to_dtype(m0.dtype())?
}
};
(Some(Tensor::stack(&d1s, 1)?), Some(rhos_p))
}
};
let x_t_ = ((sigma_t / sigma_s0 * sample)? - (alpha_t * h_phi_1 * m0)?)?;
if let (Some(d1s), Some(rhos_p)) = (d1s, rhos_p) {
use linalg::{Permutation, TensordotFixedPosition, TensordotGeneral};
let output_shape = m0.shape().clone();
let pred_res = TensordotGeneral {
lhs_permutation: Permutation { dims: vec![0] },
rhs_permutation: Permutation {
dims: vec![1, 0, 2, 3, 4],
},
tensordot_fixed_position: TensordotFixedPosition {
len_uncontracted_lhs: 1,
len_uncontracted_rhs: output_shape.dims().iter().product::<usize>(),
len_contracted_axes: d1s.dim(1)?,
output_shape,
},
output_permutation: Permutation {
dims: vec![0, 1, 2, 3],
},
}
.eval(&rhos_p, &d1s)?;
x_t_ - (alpha_t * b_h * pred_res)?
} else {
Ok(x_t_)
}
}
fn multistep_uni_c_bh_update(
&self,
model_output: &Tensor,
model_outputs: &[Option<Tensor>],
last_sample: &Tensor,
sample: &Tensor,
timestep: usize,
) -> Result<Tensor> {
let step_index = self.step_index(timestep);
let Some(m0) = model_outputs.last().into_iter().flatten().next() else {
return Err(Error::Msg(
"Expected model output for corrector update".to_string(),
));
};
let model_t = model_output;
let (x, _xt) = (last_sample, sample);
let (t0, tt, ns) = (
self.timestep(self.step_index(timestep) - 1),
timestep,
&self.schedule,
);
let (sigma_t, sigma_s0) = (ns.sigma_t(tt), ns.sigma_t(t0));
let (alpha_t, _alpha_s0) = (ns.alpha_t(tt), ns.alpha_t(t0));
let (lambda_t, lambda_s0) = (ns.lambda_t(tt), ns.lambda_t(t0));
let h = lambda_t - lambda_s0;
let device = sample.device();
let (mut rks, mut d1s) = (vec![], vec![]);
for i in 1..self.state.order() {
let ti = self.timestep(step_index.saturating_sub(i + 1));
let Some(mi) = model_outputs
.get(model_outputs.len().saturating_sub(i + 1))
.into_iter()
.flatten()
.next()
else {
return Err(Error::Msg(
"Expected model output for corrector update".to_string(),
));
};
let (alpha_si, sigma_si) = (ns.alpha_t(ti), ns.sigma_t(ti));
let lambda_si = alpha_si.ln() - sigma_si.ln();
let rk = (lambda_si - lambda_s0) / h;
rks.push(rk);
d1s.push(((mi - m0)? / rk)?);
}
rks.push(1.0);
let rks = Tensor::new(rks, device)?;
let (mut r, mut b) = (vec![], vec![]);
let hh = h.neg();
let h_phi_1 = hh.exp_m1();
let mut h_phi_k = h_phi_1 / hh - 1.;
let mut factorial_i = 1.;
let b_h = match self.config.solver_type {
SolverType::Bh1 => hh,
SolverType::Bh2 => hh.exp_m1(),
};
for i in 1..self.state.order() + 1 {
r.push(rks.powf(i as f64 - 1.)?);
b.push(h_phi_k * factorial_i / b_h);
factorial_i = i as f64 + 1.;
h_phi_k = h_phi_k / hh - 1. / factorial_i;
}
let (r, b) = (Tensor::stack(&r, 0)?, Tensor::new(b, device)?);
let d1s = match d1s.len() {
0 => None,
_ => Some(Tensor::stack(&d1s, 1)?),
};
let rhos_c = match self.state.order() {
1 => Tensor::new(&[0.5f64], m0.device())?.to_dtype(m0.dtype())?,
_ => {
let inverse = linalg::inverse(&r)?;
b.broadcast_mul(&inverse)?.sum(1)?.to_dtype(m0.dtype())?
}
};
let x_t_ = ((sigma_t / sigma_s0 * x)? - (alpha_t * h_phi_1 * m0)?)?;
let corr_res = d1s
.map(|d1s| {
use linalg::{Permutation, TensordotFixedPosition, TensordotGeneral};
let output_shape = x_t_.shape().clone();
TensordotGeneral {
lhs_permutation: Permutation { dims: vec![0] },
rhs_permutation: Permutation {
dims: vec![1, 0, 2, 3, 4],
},
tensordot_fixed_position: TensordotFixedPosition {
len_uncontracted_lhs: 1,
len_uncontracted_rhs: output_shape.dims().iter().product::<usize>(),
len_contracted_axes: d1s.dim(1)?,
output_shape,
},
output_permutation: Permutation {
dims: vec![0, 1, 2, 3],
},
}
.eval(&rhos_c.i(..rhos_c.dims()[0] - 1)?, &d1s)
})
.unwrap_or_else(|| Tensor::zeros_like(m0))?;
let d1_t = (model_t - m0)?;
let x_t = (x_t_
- (alpha_t
* b_h
* (corr_res + rhos_c.i(rhos_c.dims()[0] - 1)?.broadcast_mul(&d1_t)?)?)?)?;
Ok(x_t)
}
}
impl Scheduler for EdmDpmMultistepScheduler {
fn step(&mut self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> {
let step_index = self.step_index(timestep);
let model_output_converted = &self.convert_model_output(model_output, sample, timestep)?;
let sample = match (&self.config.corrector, self.state.last_sample()) {
(CorrectorConfiguration::Enabled { skip_steps: s }, Some(last_sample))
if !s.contains(&step_index) && step_index > 0 =>
{
&self.multistep_uni_c_bh_update(
model_output_converted,
self.state.model_outputs(),
last_sample,
sample,
timestep,
)?
}
(CorrectorConfiguration::Enabled { .. }, _) | (CorrectorConfiguration::Disabled, _) => {
sample
}
};
let mut model_outputs = self.state.model_outputs().to_vec();
for i in 0..self.config.solver_order.saturating_sub(1) {
self.state
.update_model_output(i, model_outputs[i + 1].take());
}
self.state.update_model_output(
model_outputs.len() - 1,
Some(model_output_converted.clone()),
);
let mut this_order = self.config.solver_order;
if self.config.lower_order_final {
this_order = self
.config
.solver_order
.min(self.schedule.timesteps.len() - step_index);
}
self.state
.update_order(this_order.min(self.state.lower_order_nums() + 1));
self.state.update_last_sample(sample.clone());
let prev_sample = self.multistep_uni_p_bh_update(sample, timestep)?;
let lower_order_nums = self.state.lower_order_nums();
if lower_order_nums < self.config.solver_order {
self.state.update_lower_order_nums(lower_order_nums + 1);
}
Ok(prev_sample)
}
fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor> {
Ok(sample)
}
fn timesteps(&self) -> &[usize] {
&self.schedule.timesteps
}
fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor> {
let (alpha_t, sigma_t) = (
self.schedule.alpha_t(timestep),
self.schedule.sigma_t(timestep),
);
(alpha_t * original)? + (sigma_t * noise)?
}
fn init_noise_sigma(&self) -> f64 {
self.schedule.sigma_t(self.schedule.num_training_steps())
}
}
#[derive(Debug, Clone)]
struct Schedule {
timesteps: Vec<usize>,
num_training_steps: usize,
sigma_schedule: SigmaSchedule,
#[allow(unused)]
timestep_schedule: TimestepSchedule,
}
impl Schedule {
fn new(
timestep_schedule: TimestepSchedule,
sigma_schedule: SigmaSchedule,
num_inference_steps: usize,
num_training_steps: usize,
) -> Result<Self> {
Ok(Self {
timesteps: timestep_schedule.timesteps(
&sigma_schedule,
num_inference_steps,
num_training_steps,
)?,
timestep_schedule,
sigma_schedule,
num_training_steps,
})
}
fn timesteps(&self) -> &[usize] {
&self.timesteps
}
fn num_training_steps(&self) -> usize {
self.num_training_steps
}
fn t(&self, step: usize) -> f64 {
(step as f64 + 1.) / self.num_training_steps as f64
}
fn alpha_t(&self, t: usize) -> f64 {
(1. / (self.sigma_schedule.sigma_t(self.t(t)).powi(2) + 1.)).sqrt()
}
fn sigma_t(&self, t: usize) -> f64 {
self.sigma_schedule.sigma_t(self.t(t)) * self.alpha_t(t)
}
fn lambda_t(&self, t: usize) -> f64 {
self.alpha_t(t).ln() - self.sigma_t(t).ln()
}
}
mod stats {
//! This is a slightly modified form of the Pยฒ quantile implementation from https://github.com/vks/average.
//! Also see: http://www.cs.wustl.edu/~jain/papers/ftp/psqr.pdf
use num_traits::{Float, ToPrimitive};
#[derive(Debug, Clone)]
pub struct Quantile {
q: [f64; 5],
n: [i64; 5],
m: [f64; 5],
dm: [f64; 5],
max: Option<f64>,
}
impl Quantile {
pub fn new(p: f64) -> Quantile {
assert!((0. ..=1.).contains(&p));
Quantile {
q: [0.; 5],
n: [1, 2, 3, 4, 0],
m: [1., 1. + 2. * p, 1. + 4. * p, 3. + 2. * p, 5.],
dm: [0., p / 2., p, (1. + p) / 2., 1.],
max: None,
}
}
pub fn max(&self) -> f64 {
self.max.unwrap_or(f64::NAN)
}
fn p(&self) -> f64 {
self.dm[2]
}
fn parabolic(&self, i: usize, d: f64) -> f64 {
let s = d.round() as i64;
self.q[i]
+ d / (self.n[i + 1] - self.n[i - 1]).to_f64().unwrap()
* ((self.n[i] - self.n[i - 1] + s).to_f64().unwrap()
* (self.q[i + 1] - self.q[i])
/ (self.n[i + 1] - self.n[i]).to_f64().unwrap()
+ (self.n[i + 1] - self.n[i] - s).to_f64().unwrap()
* (self.q[i] - self.q[i - 1])
/ (self.n[i] - self.n[i - 1]).to_f64().unwrap())
}
fn linear(&self, i: usize, d: f64) -> f64 {
let sum = if d < 0. { i - 1 } else { i + 1 };
self.q[i] + d * (self.q[sum] - self.q[i]) / (self.n[sum] - self.n[i]).to_f64().unwrap()
}
pub fn quantile(&self) -> f64 {
if self.len() >= 5 {
return self.q[2];
}
if self.is_empty() {
return f64::NAN;
}
let mut heights: [f64; 4] = [self.q[0], self.q[1], self.q[2], self.q[3]];
let len = self.len() as usize;
debug_assert!(len < 5);
sort_floats(&mut heights[..len]);
let desired_index = (len as f64) * self.p() - 1.;
let mut index = desired_index.ceil();
if desired_index == index && index >= 0. {
let index = index.round() as usize;
debug_assert!(index < 5);
if index < len - 1 {
return 0.5 * self.q[index] + 0.5 * self.q[index + 1];
}
}
index = index.max(0.);
let mut index = index.round() as usize;
debug_assert!(index < 5);
index = index.min(len - 1);
self.q[index]
}
fn len(&self) -> u64 {
self.n[4] as u64
}
fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn add(&mut self, x: f64) {
self.max = self.max.map(|y| y.max(x)).or(Some(x));
if self.n[4] < 5 {
self.q[self.n[4] as usize] = x;
self.n[4] += 1;
if self.n[4] == 5 {
sort_floats(&mut self.q);
}
return;
}
let mut k: usize;
if x < self.q[0] {
self.q[0] = x;
k = 0;
} else {
k = 4;
for i in 1..5 {
if x < self.q[i] {
k = i;
break;
}
}
if self.q[4] < x {
self.q[4] = x;
}
};
for i in k..5 {
self.n[i] += 1;
}
for i in 0..5 {
self.m[i] += self.dm[i];
}
for i in 1..4 {
let d = self.m[i] - self.n[i].to_f64().unwrap();
if d >= 1. && self.n[i + 1] - self.n[i] > 1
|| d <= -1. && self.n[i - 1] - self.n[i] < -1
{
let d = Float::signum(d);
let q_new = self.parabolic(i, d);
if self.q[i - 1] < q_new && q_new < self.q[i + 1] {
self.q[i] = q_new;
} else {
self.q[i] = self.linear(i, d);
}
let delta = d.round() as i64;
debug_assert_eq!(delta.abs(), 1);
self.n[i] += delta;
}
}
}
pub fn with_samples(mut self, samples: impl IntoIterator<Item = f64>) -> Self {
for sample in samples {
self.add(sample);
}
self
}
}
fn sort_floats(v: &mut [f64]) {
v.sort_unstable_by(|a, b| a.total_cmp(b));
}
}
mod linalg {
use candle::{IndexOp, Result, Shape, Tensor};
pub fn inverse(m: &Tensor) -> Result<Tensor> {
adjoint(m)? / determinant(m)?.to_scalar::<f64>()?
}
pub fn adjoint(m: &Tensor) -> Result<Tensor> {
cofactor(m)?.transpose(0, 1)
}
pub fn cofactor(m: &Tensor) -> Result<Tensor> {
let s = m.shape().dim(0)?;
if s == 2 {
let mut v = vec![];
for i in 0..2 {
let mut x = vec![];
for j in 0..2 {
x.push((m.i((i, j))? * (-1.0f64).powi(i as i32 + j as i32))?)
}
v.push(Tensor::stack(&x, 0)?.unsqueeze(0)?);
}
return Tensor::stack(&v, 1)?.squeeze(0);
}
let minors = minors(m)?;
let mut v = vec![];
for i in 0..s {
let mut x = vec![];
for j in 0..s {
let det = (determinant(&minors.i((i, j))?)?
* ((-1.0f64).powi(i as i32) * (-1.0f64).powi(j as i32)))?;
x.push(det);
}
v.push(Tensor::stack(&x, 0)?.unsqueeze(0)?);
}
Tensor::stack(&v, 1)?.squeeze(0)
}
pub fn determinant(m: &Tensor) -> Result<Tensor> {
let s = m.shape().dim(0)?;
if s == 2 {
return (m.i((0, 0))? * m.i((1, 1))?)? - (m.i((0, 1))? * m.i((1, 0))?);
}
let cofactor = cofactor(m)?;
let m0 = m.i((0, 0))?;
let det = (0..s)
.map(|i| (m.i((0, i))? * cofactor.i((0, i))?))
.try_fold(m0.zeros_like()?, |acc, cur| (acc + cur?))?;
Ok(det)
}
pub fn minors(m: &Tensor) -> Result<Tensor> {
let s = m.shape().dim(0)?;
if s == 1 {
return m.i((0, 0));
}
let mut v = vec![];
for i in 0..s {
let msub = Tensor::cat(&[m.i((..i, ..))?, m.i(((i + 1).., ..))?], 0)?;
let mut x = vec![];
for j in 0..s {
let t = Tensor::cat(&[msub.i((.., ..j))?, msub.i((.., (j + 1)..))?], 1)?;
x.push(t);
}
v.push(Tensor::stack(&x, 0)?.unsqueeze(0)?);
}
Tensor::stack(&v, 1)?.squeeze(0)
}
#[derive(Debug)]
pub struct TensordotGeneral {
pub lhs_permutation: Permutation,
pub rhs_permutation: Permutation,
pub tensordot_fixed_position: TensordotFixedPosition,
pub output_permutation: Permutation,
}
impl TensordotGeneral {
pub fn eval(&self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor> {
let permuted_lhs = self.lhs_permutation.eval(lhs)?;
let permuted_rhs = self.rhs_permutation.eval(rhs)?;
let tensordotted = self
.tensordot_fixed_position
.eval(&permuted_lhs, &permuted_rhs)?;
self.output_permutation.eval(&tensordotted)
}
}
#[derive(Debug)]
pub struct TensordotFixedPosition {
pub len_uncontracted_lhs: usize,
pub len_uncontracted_rhs: usize,
pub len_contracted_axes: usize,
pub output_shape: Shape,
}
impl TensordotFixedPosition {
fn eval(&self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor> {
let lhs_view = lhs.reshape((self.len_uncontracted_lhs, self.len_contracted_axes))?;
let rhs_view = rhs.reshape((self.len_contracted_axes, self.len_uncontracted_rhs))?;
lhs_view.matmul(&rhs_view)?.reshape(&self.output_shape)
}
}
#[derive(Debug)]
pub struct Permutation {
pub dims: Vec<usize>,
}
impl Permutation {
fn eval(&self, tensor: &Tensor) -> Result<Tensor> {
tensor.permute(self.dims.as_slice())
}
}
}
| candle/candle-transformers/src/models/stable_diffusion/uni_pc.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/uni_pc.rs",
"repo_id": "candle",
"token_count": 17600
} | 60 |
use super::Config;
use crate::models::with_tracing::{linear, linear_no_bias, Linear};
use candle::{Device, IndexOp, Result, Tensor, D};
use candle_nn::{embedding, Conv1d, Conv1dConfig, Embedding, LayerNorm, Module, VarBuilder};
fn conv1d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
config: Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight = vb.get((out_channels, in_channels, kernel_size), "weight")?;
let bias = vb.get(out_channels, "bias")?;
Ok(Conv1d::new(weight, Some(bias), config))
}
fn layer_norm(size: usize, vb: VarBuilder) -> Result<LayerNorm> {
let weight = vb.get(size, "weight")?;
let bias = vb.get(size, "bias")?;
Ok(LayerNorm::new(weight, bias, 1e-5))
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L62
#[derive(Debug, Clone)]
struct MultiHeadAttention {
query: Linear,
key: Linear,
value: Linear,
out: Linear,
n_head: usize,
span: tracing::Span,
softmax_span: tracing::Span,
matmul_span: tracing::Span,
kv_cache: Option<(Tensor, Tensor)>,
}
impl MultiHeadAttention {
fn load(n_state: usize, n_head: usize, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "multi-head-attn");
let softmax_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-softmax");
let matmul_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-matmul");
let query = linear(n_state, n_state, vb.pp("q_proj"))?;
let value = linear(n_state, n_state, vb.pp("v_proj"))?;
let key = linear_no_bias(n_state, n_state, vb.pp("k_proj"))?;
let out = linear(n_state, n_state, vb.pp("out_proj"))?;
Ok(Self {
query,
key,
value,
out,
n_head,
span,
softmax_span,
matmul_span,
kv_cache: None,
})
}
fn forward(
&mut self,
x: &Tensor,
xa: Option<&Tensor>,
mask: Option<&Tensor>,
flush_cache: bool,
) -> Result<Tensor> {
let _enter = self.span.enter();
let q = self.query.forward(x)?;
let (k, v) = match xa {
None => {
let k = self.key.forward(x)?;
let v = self.value.forward(x)?;
(k, v)
}
Some(x) => {
if flush_cache {
self.kv_cache = None;
}
if let Some((k, v)) = &self.kv_cache {
(k.clone(), v.clone())
} else {
let k = self.key.forward(x)?;
let v = self.value.forward(x)?;
self.kv_cache = Some((k.clone(), v.clone()));
(k, v)
}
}
};
let wv = self.qkv_attention(&q, &k, &v, mask)?;
let out = self.out.forward(&wv)?;
Ok(out)
}
fn reshape_head(&self, x: &Tensor) -> Result<Tensor> {
let (n_batch, n_ctx, n_state) = x.dims3()?;
let target_dims = &[n_batch, n_ctx, self.n_head, n_state / self.n_head];
x.reshape(target_dims)?.transpose(1, 2)
}
fn qkv_attention(
&self,
q: &Tensor,
k: &Tensor,
v: &Tensor,
mask: Option<&Tensor>,
) -> Result<Tensor> {
let (_, n_ctx, n_state) = q.dims3()?;
let scale = ((n_state / self.n_head) as f64).powf(-0.25);
let q = (self.reshape_head(q)? * scale)?;
let k = (self.reshape_head(k)?.transpose(2, 3)? * scale)?;
let v = self.reshape_head(v)?.contiguous()?;
let mut qk = {
let _enter = self.matmul_span.enter();
q.matmul(&k)?
};
if let Some(mask) = mask {
let mask = mask.i((0..n_ctx, 0..n_ctx))?;
qk = qk.broadcast_add(&mask)?
}
let w = {
let _enter = self.softmax_span.enter();
candle_nn::ops::softmax_last_dim(&qk)?
};
let wv = {
let _enter = self.matmul_span.enter();
w.matmul(&v)?
}
.transpose(1, 2)?
.flatten_from(2)?;
Ok(wv)
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None;
}
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L111
#[derive(Debug, Clone)]
struct ResidualAttentionBlock {
attn: MultiHeadAttention,
attn_ln: LayerNorm,
cross_attn: Option<(MultiHeadAttention, LayerNorm)>,
mlp_linear1: Linear,
mlp_linear2: Linear,
mlp_ln: LayerNorm,
span: tracing::Span,
}
impl ResidualAttentionBlock {
fn load(n_state: usize, n_head: usize, ca: bool, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "residual-attn");
let attn = MultiHeadAttention::load(n_state, n_head, vb.pp("self_attn"))?;
let attn_ln = layer_norm(n_state, vb.pp("self_attn_layer_norm"))?;
let cross_attn = if ca {
let cross_attn = MultiHeadAttention::load(n_state, n_head, vb.pp("encoder_attn"))?;
let cross_attn_ln = layer_norm(n_state, vb.pp("encoder_attn_layer_norm"))?;
Some((cross_attn, cross_attn_ln))
} else {
None
};
let n_mlp = n_state * 4;
let mlp_linear1 = linear(n_state, n_mlp, vb.pp("fc1"))?;
let mlp_linear2 = linear(n_mlp, n_state, vb.pp("fc2"))?;
let mlp_ln = layer_norm(n_state, vb.pp("final_layer_norm"))?;
Ok(Self {
attn,
attn_ln,
cross_attn,
mlp_linear1,
mlp_linear2,
mlp_ln,
span,
})
}
fn forward(
&mut self,
x: &Tensor,
xa: Option<&Tensor>,
mask: Option<&Tensor>,
flush_kv_cache: bool,
) -> Result<Tensor> {
let _enter = self.span.enter();
let attn = self
.attn
.forward(&self.attn_ln.forward(x)?, None, mask, flush_kv_cache)?;
let mut x = (x + attn)?;
if let Some((attn, ln)) = &mut self.cross_attn {
x = (&x + attn.forward(&ln.forward(&x)?, xa, None, flush_kv_cache)?)?;
}
let mlp = self.mlp_linear2.forward(
&self
.mlp_linear1
.forward(&self.mlp_ln.forward(&x)?)?
.gelu()?,
)?;
x + mlp
}
fn reset_kv_cache(&mut self) {
self.attn.reset_kv_cache();
if let Some((attn, _)) = &mut self.cross_attn {
attn.reset_kv_cache();
}
}
}
fn sinusoids(length: usize, channels: usize, device: &Device) -> Result<Tensor> {
let max_timescale = 10000f32;
let log_timescale_increment = max_timescale.ln() / (channels / 2 - 1) as f32;
let inv_timescales: Vec<_> = (0..channels / 2)
.map(|i| (i as f32 * (-log_timescale_increment)).exp())
.collect();
let inv_timescales = Tensor::new(inv_timescales.as_slice(), device)?.unsqueeze(0)?;
let arange = Tensor::arange(0, length as u32, device)?
.to_dtype(candle::DType::F32)?
.unsqueeze(1)?;
let sh = (length, channels / 2);
let scaled_time = (arange.broadcast_as(sh)? * inv_timescales.broadcast_as(sh)?)?;
let sincos = Tensor::cat(&[scaled_time.sin()?, scaled_time.cos()?], 1)?;
Ok(sincos)
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L143
#[derive(Debug, Clone)]
pub struct AudioEncoder {
conv1: Conv1d,
conv2: Conv1d,
positional_embedding: Tensor,
blocks: Vec<ResidualAttentionBlock>,
ln_post: LayerNorm,
span: tracing::Span,
conv1_span: tracing::Span,
conv2_span: tracing::Span,
}
impl AudioEncoder {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "audio-encoder");
let conv1_span = tracing::span!(tracing::Level::TRACE, "conv1");
let conv2_span = tracing::span!(tracing::Level::TRACE, "conv2");
let n_state = cfg.d_model;
let n_head = cfg.encoder_attention_heads;
let n_ctx = cfg.max_source_positions;
let cfg1 = Conv1dConfig {
padding: 1,
stride: 1,
groups: 1,
dilation: 1,
cudnn_fwd_algo: None,
};
let cfg2 = Conv1dConfig {
padding: 1,
stride: 2,
groups: 1,
dilation: 1,
cudnn_fwd_algo: None,
};
let conv1 = conv1d(cfg.num_mel_bins, n_state, 3, cfg1, vb.pp("conv1"))?;
let conv2 = conv1d(n_state, n_state, 3, cfg2, vb.pp("conv2"))?;
let positional_embedding = sinusoids(n_ctx, n_state, vb.device())?;
let blocks = (0..cfg.encoder_layers)
.map(|i| {
ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(format!("layers.{i}")))
})
.collect::<Result<Vec<_>>>()?;
let ln_post = layer_norm(n_state, vb.pp("layer_norm"))?;
Ok(Self {
conv1,
conv2,
positional_embedding,
blocks,
ln_post,
conv1_span,
conv2_span,
span,
})
}
pub fn forward(&mut self, x: &Tensor, flush_kv_cache: bool) -> Result<Tensor> {
let _enter = self.span.enter();
let x = {
let _enter = self.conv1_span.enter();
self.conv1.forward(x)?.gelu()?
};
let x = {
let _enter = self.conv2_span.enter();
self.conv2.forward(&x)?.gelu()?
};
let x = x.transpose(1, 2)?;
let (_bsize, seq_len, _hidden) = x.dims3()?;
let positional_embedding = self.positional_embedding.narrow(0, 0, seq_len)?;
let mut x = x.broadcast_add(&positional_embedding)?;
for block in self.blocks.iter_mut() {
x = block.forward(&x, None, None, flush_kv_cache)?
}
let x = self.ln_post.forward(&x)?;
Ok(x)
}
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L176
#[derive(Debug, Clone)]
pub struct TextDecoder {
token_embedding: Embedding,
positional_embedding: Tensor,
blocks: Vec<ResidualAttentionBlock>,
ln: LayerNorm,
mask: Tensor,
span: tracing::Span,
span_final: tracing::Span,
}
impl TextDecoder {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "text-decoder");
let span_final = tracing::span!(tracing::Level::TRACE, "text-decoder-final");
let n_state = cfg.d_model;
let n_head = cfg.decoder_attention_heads;
let n_ctx = cfg.max_target_positions;
let token_embedding = embedding(cfg.vocab_size, n_state, vb.pp("embed_tokens"))?;
let positional_embedding = vb.get((n_ctx, n_state), "embed_positions.weight")?;
let blocks = (0..cfg.decoder_layers)
.map(|i| {
ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(format!("layers.{i}")))
})
.collect::<Result<Vec<_>>>()?;
let ln = layer_norm(n_state, vb.pp("layer_norm"))?;
let mask: Vec<_> = (0..n_ctx)
.flat_map(|i| (0..n_ctx).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_vec(mask, (n_ctx, n_ctx), vb.device())?;
Ok(Self {
token_embedding,
positional_embedding,
blocks,
ln,
mask,
span,
span_final,
})
}
pub fn forward(&mut self, x: &Tensor, xa: &Tensor, flush_kv_cache: bool) -> Result<Tensor> {
let _enter = self.span.enter();
let last = x.dim(D::Minus1)?;
let token_embedding = self.token_embedding.forward(x)?;
let positional_embedding = self.positional_embedding.narrow(0, 0, last)?;
let mut x = token_embedding.broadcast_add(&positional_embedding)?;
for block in self.blocks.iter_mut() {
x = block.forward(&x, Some(xa), Some(&self.mask), flush_kv_cache)?;
}
self.ln.forward(&x)
}
pub fn final_linear(&self, x: &Tensor) -> Result<Tensor> {
let b_size = x.dim(0)?;
let w = self.token_embedding.embeddings().broadcast_left(b_size)?;
let logits = {
let _enter = self.span_final.enter();
x.matmul(&w.t()?)?
};
Ok(logits)
}
pub fn reset_kv_cache(&mut self) {
for block in self.blocks.iter_mut() {
block.reset_kv_cache();
}
}
}
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L221
#[derive(Debug, Clone)]
pub struct Whisper {
pub encoder: AudioEncoder,
pub decoder: TextDecoder,
pub config: Config,
}
impl Whisper {
pub fn load(vb: &VarBuilder, config: Config) -> Result<Self> {
let encoder = AudioEncoder::load(vb.pp("model.encoder"), &config)?;
let decoder = TextDecoder::load(vb.pp("model.decoder"), &config)?;
Ok(Self {
encoder,
decoder,
config,
})
}
pub fn reset_kv_cache(&mut self) {
self.encoder
.blocks
.iter_mut()
.for_each(|b| b.reset_kv_cache());
self.decoder.reset_kv_cache();
}
}
| candle/candle-transformers/src/models/whisper/model.rs/0 | {
"file_path": "candle/candle-transformers/src/models/whisper/model.rs",
"repo_id": "candle",
"token_count": 7098
} | 61 |
//! Varbuilder for Loading gguf files
//!
//! VarBuilder is a utility to store quantized tensors from a [GGUF model file](https://huggingface.co/docs/hub/gguf).
//! These tensors can be loaded from disk using `from_gguf` or from an in-memory
//! buffer using `from_gguf_buffer`.
use candle::quantized::QTensor;
use candle::{Device, Result, Shape};
use std::sync::Arc;
// VarBuilder specialized for QTensors
#[derive(Clone)]
pub struct VarBuilder {
data: Arc<std::collections::HashMap<String, Arc<QTensor>>>,
path: Vec<String>,
device: Device,
}
impl VarBuilder {
pub fn from_gguf<P: AsRef<std::path::Path>>(p: P, device: &Device) -> Result<Self> {
let mut file = std::fs::File::open(p)?;
let content = candle::quantized::gguf_file::Content::read(&mut file)?;
let mut data = std::collections::HashMap::new();
for tensor_name in content.tensor_infos.keys() {
let tensor = content.tensor(&mut file, tensor_name, device)?;
data.insert(tensor_name.to_string(), Arc::new(tensor));
}
Ok(Self {
data: Arc::new(data),
path: Vec::new(),
device: device.clone(),
})
}
pub fn from_gguf_buffer(buffer: &[u8], device: &Device) -> Result<Self> {
let mut cursor = std::io::Cursor::new(buffer);
let content = candle::quantized::gguf_file::Content::read(&mut cursor)?;
let mut data = std::collections::HashMap::new();
for tensor_name in content.tensor_infos.keys() {
let tensor = content.tensor(&mut cursor, tensor_name, device)?;
data.insert(tensor_name.to_string(), Arc::new(tensor));
}
Ok(Self {
data: Arc::new(data),
path: Vec::new(),
device: device.clone(),
})
}
pub fn pp<S: ToString>(&self, s: S) -> Self {
let mut path = self.path.clone();
path.push(s.to_string());
Self {
data: self.data.clone(),
path,
device: self.device.clone(),
}
}
fn path(&self, tensor_name: &str) -> String {
if self.path.is_empty() {
tensor_name.to_string()
} else {
[&self.path.join("."), tensor_name].join(".")
}
}
pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Arc<QTensor>> {
let path = self.path(name);
match self.data.get(&path) {
None => {
candle::bail!("cannot find tensor {path}")
}
Some(qtensor) => {
let shape = s.into();
if qtensor.shape() != &shape {
candle::bail!(
"shape mismatch for {name}, got {:?}, expected {shape:?}",
qtensor.shape()
)
}
Ok(qtensor.clone())
}
}
}
pub fn get_no_shape(&self, name: &str) -> Result<Arc<QTensor>> {
let path = self.path(name);
match self.data.get(&path) {
None => {
candle::bail!("cannot find tensor {name}")
}
Some(qtensor) => Ok(qtensor.clone()),
}
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn contains_key(&self, key: &str) -> bool {
self.data.contains_key(key)
}
}
| candle/candle-transformers/src/quantized_var_builder.rs/0 | {
"file_path": "candle/candle-transformers/src/quantized_var_builder.rs",
"repo_id": "candle",
"token_count": 1649
} | 62 |
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<style>
@import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap");
html,
body {
font-family: "Source Sans 3", sans-serif;
}
</style>
<title>Candle Blip Image Captioning Demo</title>
<script src="https://cdn.tailwindcss.com"></script>
<script type="module" src="./code.js"></script>
<script type="module">
const MODELS = {
blip_image_quantized_q4k: {
base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/",
model: "blip-image-captioning-large-q4k.gguf",
config: "config.json",
tokenizer: "tokenizer.json",
quantized: true,
size: "271 MB",
},
blip_image_quantized_q80: {
base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/",
model: "blip-image-captioning-large-q80.gguf",
config: "config.json",
tokenizer: "tokenizer.json",
quantized: true,
size: "505 MB",
},
blip_image_large: {
base_url:
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/refs%2Fpr%2F18/",
model: "model.safetensors",
config: "config.json",
tokenizer: "tokenizer.json",
quantized: false,
size: "1.88 GB",
},
};
const blipWorker = new Worker("./blipWorker.js", {
type: "module",
});
const outputStatusEl = document.querySelector("#output-status");
const outputCaptionEl = document.querySelector("#output-caption");
const modelSelectEl = document.querySelector("#model");
const clearBtn = document.querySelector("#clear-btn");
const fileUpload = document.querySelector("#file-upload");
const dropArea = document.querySelector("#drop-area");
const imagesExamples = document.querySelector("#image-select");
const canvas = document.querySelector("#canvas");
const ctxCanvas = canvas.getContext("2d");
let isCaptioning = false;
let currentImageURL = null;
clearBtn.addEventListener("click", () => {
clearImageCanvas();
});
modelSelectEl.addEventListener("change", () => {
if (currentImageURL) {
runInference(currentImageURL);
}
});
//add event listener to file input
fileUpload.addEventListener("input", async (e) => {
const target = e.target;
if (target.files.length > 0) {
const href = URL.createObjectURL(target.files[0]);
clearImageCanvas();
await drawImageCanvas(href);
runInference(href);
}
});
// add event listener to drop-area
dropArea.addEventListener("dragenter", (e) => {
e.preventDefault();
dropArea.classList.add("border-blue-700");
});
dropArea.addEventListener("dragleave", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
});
dropArea.addEventListener("dragover", (e) => {
e.preventDefault();
});
dropArea.addEventListener("drop", async (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
const url = e.dataTransfer.getData("text/uri-list");
const files = e.dataTransfer.files;
if (files.length > 0) {
const href = URL.createObjectURL(files[0]);
clearImageCanvas();
await drawImageCanvas(href);
runInference(href);
} else if (url) {
clearImageCanvas();
await drawImageCanvas(url);
runInference(url);
}
});
imagesExamples.addEventListener("click", async (e) => {
if (isCaptioning) {
return;
}
const target = e.target;
if (target.nodeName === "IMG") {
const href = target.src;
clearImageCanvas();
await drawImageCanvas(href);
runInference(href);
}
});
function clearImageCanvas() {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
isCaptioning = false;
clearBtn.disabled = true;
canvas.parentElement.style.height = "auto";
outputStatusEl.hidden = false;
outputCaptionEl.hidden = true;
outputStatusEl.innerText = "Please select an image";
currentImageURL = null;
}
async function drawImageCanvas(imgURL) {
if (!imgURL) {
throw new Error("No image URL provided");
}
return new Promise((resolve, reject) => {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctxCanvas.drawImage(img, 0, 0);
canvas.parentElement.style.height = canvas.offsetHeight + "px";
clearBtn.disabled = false;
resolve(img);
};
img.src = imgURL;
currentImageURL = imgURL;
});
}
document.addEventListener("DOMContentLoaded", () => {
for (const [id, model] of Object.entries(MODELS)) {
const option = document.createElement("option");
option.value = id;
option.innerText = `${id} (${model.size})`;
modelSelectEl.appendChild(option);
}
});
async function getImageCaption(
worker,
weightsURL,
tokenizerURL,
configURL,
modelID,
imageURL,
quantized,
updateStatus = null
) {
return new Promise((resolve, reject) => {
worker.postMessage({
weightsURL,
tokenizerURL,
configURL,
modelID,
imageURL,
quantized,
});
function messageHandler(event) {
if ("error" in event.data) {
worker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
worker.removeEventListener("message", messageHandler);
resolve(event.data);
}
if (updateStatus) updateStatus(event.data);
}
worker.addEventListener("message", messageHandler);
});
}
function updateStatus(data) {
if (data.status === "status") {
outputStatusEl.innerText = data.message;
}
}
async function runInference(imageURL) {
if (isCaptioning || !imageURL) {
alert("Please select an image first");
return;
}
outputStatusEl.hidden = false;
outputCaptionEl.hidden = true;
clearBtn.disabled = true;
modelSelectEl.disabled = true;
isCaptioning = true;
const selectedModel = modelSelectEl.value;
const model = MODELS[selectedModel];
const weightsURL = `${model.base_url}${model.model}`;
const tokenizerURL = `${model.base_url}${model.tokenizer}`;
const configURL = `${model.base_url}${model.config}`;
const quantized = model.quantized;
try {
const time = performance.now();
const caption = await getImageCaption(
blipWorker,
weightsURL,
tokenizerURL,
configURL,
selectedModel,
imageURL,
quantized,
updateStatus
);
outputStatusEl.hidden = true;
outputCaptionEl.hidden = false;
const totalTime = ((performance.now() - time)/1000).toFixed(2);
outputCaptionEl.innerHTML = `${
caption.output
}<br/><span class="text-xs">Inference time: ${totalTime} s</span>`;
} catch (err) {
console.error(err);
outputStatusEl.hidden = false;
outputCaptionEl.hidden = true;
outputStatusEl.innerText = err.message;
}
clearBtn.disabled = false;
modelSelectEl.disabled = false;
isCaptioning = false;
}
</script>
</head>
<body class="container max-w-4xl mx-auto p-4">
<main class="grid grid-cols-1 gap-5 relative">
<span class="absolute text-5xl -ml-[1em]"> ๐ฏ๏ธ </span>
<div>
<h1 class="text-5xl font-bold">Candle BLIP Image Captioning</h1>
<h2 class="text-2xl font-bold">Rust/WASM Demo</h2>
<p class="max-w-lg">
<a
href="https://huggingface.co/Salesforce/blip-image-captioning-large"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>BLIP Image Captioning
</a>
running in the browser using
<a
href="https://github.com/huggingface/candle/"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>Candle</a
>, a minimalist ML framework for Rust.
</p>
<p class="text-xs max-w-lg py-2">
<b>Note:</b>
The image captioning on the smallest model takes about ~50 seconds, it
will vary depending on your machine and model size.
</p>
</div>
<div>
<label for="model" class="font-medium block">Models Options: </label>
<select
id="model"
class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max"
></select>
</div>
<!-- drag and drop area -->
<div class="grid gap-4 sm:grid-cols-2 py-4">
<div class="relative max-w-lg">
<div
class="absolute w-full bottom-full flex justify-between items-center"
>
<div class="flex gap-2 w-full">
<button
id="clear-btn"
disabled
title="Clear Image"
class="ml-auto text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"
>
<svg
class=""
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 13 12"
height="1em"
>
<path
d="M1.6.7 12 11.1M12 .7 1.6 11.1"
stroke="#2E3036"
stroke-width="2"
/>
</svg>
</button>
</div>
</div>
<div
id="drop-area"
class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden"
>
<div
class="flex flex-col items-center justify-center space-y-1 text-center"
>
<svg
width="25"
height="25"
viewBox="0 0 25 25"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z"
fill="#000"
/>
</svg>
<div class="flex text-sm text-gray-600">
<label
for="file-upload"
class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"
>
<span>Drag and drop y our image here</span>
<span class="block text-xs">or</span>
<span class="block text-xs">Click to upload</span>
</label>
</div>
<input
id="file-upload"
name="file-upload"
type="file"
class="sr-only"
/>
</div>
<canvas
id="canvas"
class="absolute pointer-events-none w-full"
></canvas>
</div>
</div>
<div class="">
<div
class="h-full bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"
>
<p
id="output-caption"
class="m-auto text-xl text-center p-2"
hidden
></p>
<span id="output-status" class="m-auto font-light">
Please select an image
</span>
</div>
</div>
</div>
<div>
<div
class="flex gap-3 items-center overflow-x-scroll"
id="image-select"
>
<h3 class="font-medium">Examples:</h3>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
</div>
</div>
</main>
</body>
</html>
| candle/candle-wasm-examples/blip/index.html/0 | {
"file_path": "candle/candle-wasm-examples/blip/index.html",
"repo_id": "candle",
"token_count": 7164
} | 63 |
use crate::model::{Cache, Config, Llama};
use byteorder::{LittleEndian, ReadBytesExt};
use candle::{DType, Device, IndexOp, Result, Shape, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use serde::{Deserialize, Serialize};
use tokenizers::Tokenizer;
use wasm_bindgen::prelude::*;
use yew_agent::{HandlerId, Public, WorkerLink};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
}
#[macro_export]
macro_rules! console_log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string()))
}
// Communication to the worker happens through bincode, the model weights and configs are fetched
// on the main thread and transferred via the following structure.
#[derive(Serialize, Deserialize)]
pub struct ModelData {
pub tokenizer: Vec<u8>,
pub model: Vec<u8>,
}
fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> {
let mut buf = [0u8; 4];
r.read_exact(&mut buf)?;
Ok(i32::from_le_bytes(buf))
}
fn read_tensor<R: std::io::Read, S: Into<Shape>>(
r: &mut R,
shape: S,
dev: &Device,
) -> Result<Tensor> {
let shape = shape.into();
let mut data_t = vec![0f32; shape.elem_count()];
r.read_f32_into::<LittleEndian>(&mut data_t)?;
let tensor = Tensor::from_vec(data_t, shape, dev)?;
Ok(tensor)
}
pub struct Model {
pub cache: Cache,
pub config: Config,
pub llama: Llama,
pub tokenizer: Tokenizer,
}
impl Model {
fn run(
&self,
link: &WorkerLink<Worker>,
id: HandlerId,
temp: f64,
top_p: f64,
prompt: String,
) -> Result<()> {
let dev = Device::Cpu;
let temp = if temp <= 0. { None } else { Some(temp) };
let top_p = if top_p <= 0. || top_p >= 1.0 {
None
} else {
Some(top_p)
};
console_log!("temp: {temp:?} top_p: {top_p:?} prompt: {prompt}");
let mut logits_processor = LogitsProcessor::new(299792458, temp, top_p);
let mut index_pos = 0;
let mut tokens = self
.tokenizer
.encode(prompt.to_string(), true)
.map_err(|m| candle::Error::Msg(m.to_string()))?
.get_ids()
.to_vec();
link.respond(id, Ok(WorkerOutput::Generated(prompt)));
for index in 0.. {
if tokens.len() >= self.config.seq_len {
break;
}
let context_size = if self.cache.use_kv_cache && index > 0 {
1
} else {
tokens.len()
};
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
let input = Tensor::new(ctxt, &dev)?.unsqueeze(0)?;
let logits = self.llama.forward(&input, index_pos)?;
let logits = logits.squeeze(0)?;
index_pos += ctxt.len();
let next_token = logits_processor.sample(&logits)?;
tokens.push(next_token);
if let Some(text) = self.tokenizer.id_to_token(next_token) {
let text = text.replace('โ', " ").replace("<0x0A>", "\n");
link.respond(id, Ok(WorkerOutput::Generated(text)));
}
}
Ok(())
}
}
impl Config {
fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> {
let dim = read_i32(r)? as usize;
let hidden_dim = read_i32(r)? as usize;
let n_layers = read_i32(r)? as usize;
let n_heads = read_i32(r)? as usize;
let n_kv_heads = read_i32(r)? as usize;
let vocab_size = read_i32(r)? as usize;
let seq_len = read_i32(r)? as usize;
Ok(Self {
dim,
hidden_dim,
n_layers,
n_heads,
n_kv_heads,
vocab_size,
seq_len,
norm_eps: 1e-5,
})
}
pub fn head_size(&self) -> usize {
self.dim / self.n_heads
}
}
struct TransformerWeights {
// token embedding table
token_embedding_table: Tensor, // (vocab_size, dim)
// weights for rmsnorms
rms_att_weight: Tensor, // (layer, dim) rmsnorm weights
rms_ffn_weight: Tensor, // (layer, dim)
// weights for matmuls
wq: Tensor, // (layer, dim, dim)
wk: Tensor, // (layer, dim, dim)
wv: Tensor, // (layer, dim, dim)
wo: Tensor, // (layer, dim, dim)
// weights for ffn
w1: Tensor, // (layer, hidden_dim, dim)
w2: Tensor, // (layer, dim, hidden_dim)
w3: Tensor, // (layer, hidden_dim, dim)
// final rmsnorm
rms_final_weight: Tensor, // (dim,)
// freq_cis for RoPE relatively positional embeddings
freq_cis_real: Tensor, // (seq_len, head_size/2)
freq_cis_imag: Tensor, // (seq_len, head_size/2)
}
impl TransformerWeights {
fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> {
let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?;
let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?;
let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?;
let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?;
let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?;
let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?;
let rms_final_weight = read_tensor(r, c.dim, dev)?;
let head_size = c.head_size();
let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?;
let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?;
Ok(Self {
token_embedding_table,
rms_att_weight,
wq,
wk,
wv,
wo,
rms_ffn_weight,
w1,
w2,
w3,
rms_final_weight,
freq_cis_real,
freq_cis_imag,
})
}
fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder<'_>> {
let mut ws = std::collections::HashMap::new();
let mut insert = |name: &str, t: Tensor| {
ws.insert(name.to_string(), t);
};
insert("rot.freq_cis_real", self.freq_cis_real.clone());
insert("rot.freq_cis_imag", self.freq_cis_imag.clone());
insert(
"model.embed_tokens.weight",
self.token_embedding_table.clone(),
);
insert("lm_head.weight", self.token_embedding_table.clone());
insert("model.norm.weight", self.rms_final_weight.clone());
for layer in 0..cfg.n_layers {
ws.insert(
format!("model.layers.{layer}.self_attn.q_proj.weight"),
self.wq.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.k_proj.weight"),
self.wk.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.v_proj.weight"),
self.wv.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.o_proj.weight"),
self.wo.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.gate_proj.weight"),
self.w1.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.down_proj.weight"),
self.w2.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.up_proj.weight"),
self.w3.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.input_layernorm.weight"),
self.rms_att_weight.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.post_attention_layernorm.weight"),
self.rms_ffn_weight.i(layer)?,
);
}
let vb = VarBuilder::from_tensors(ws, DType::F32, device);
Ok(vb)
}
}
impl Model {
pub fn load(md: ModelData) -> Result<Self> {
let dev = Device::Cpu;
let mut model = std::io::Cursor::new(md.model);
let config = Config::from_reader(&mut model)?;
let weights = TransformerWeights::from_reader(&mut model, &config, &dev)?;
let vb = weights.var_builder(&config, &dev)?;
let cache = Cache::new(true, &config, vb.pp("rot"))?;
let llama = Llama::load(vb, &cache, &config)?;
let tokenizer =
Tokenizer::from_bytes(&md.tokenizer).map_err(|m| candle::Error::Msg(m.to_string()))?;
Ok(Self {
cache,
config,
llama,
tokenizer,
})
}
}
pub struct Worker {
link: WorkerLink<Self>,
model: Option<Model>,
}
#[derive(Serialize, Deserialize)]
pub enum WorkerInput {
ModelData(ModelData),
Run(f64, f64, String),
}
#[derive(Serialize, Deserialize)]
pub enum WorkerOutput {
Generated(String),
GenerationDone(std::result::Result<(), String>),
WeightsLoaded,
}
impl yew_agent::Worker for Worker {
type Input = WorkerInput;
type Message = ();
type Output = std::result::Result<WorkerOutput, String>;
type Reach = Public<Self>;
fn create(link: WorkerLink<Self>) -> Self {
Self { link, model: None }
}
fn update(&mut self, _msg: Self::Message) {
// no messaging
}
fn handle_input(&mut self, msg: Self::Input, id: HandlerId) {
let output = match msg {
WorkerInput::ModelData(md) => match Model::load(md) {
Ok(model) => {
self.model = Some(model);
Ok(WorkerOutput::WeightsLoaded)
}
Err(err) => Err(format!("model creation error {err:?}")),
},
WorkerInput::Run(temp, top_p, prompt) => match &mut self.model {
None => Err("model has not been set yet".to_string()),
Some(model) => {
{
let mut cache = model.cache.kvs.lock().unwrap();
for elem in cache.iter_mut() {
*elem = None
}
}
let result = model
.run(&self.link, id, temp, top_p, prompt)
.map_err(|e| e.to_string());
Ok(WorkerOutput::GenerationDone(result))
}
},
};
self.link.respond(id, output);
}
fn name_of_resource() -> &'static str {
"worker.js"
}
fn resource_path_is_relative() -> bool {
true
}
}
| candle/candle-wasm-examples/llama2-c/src/worker.rs/0 | {
"file_path": "candle/candle-wasm-examples/llama2-c/src/worker.rs",
"repo_id": "candle",
"token_count": 5773
} | 64 |
export async function extractEmbeddings(
worker,
weightsURL,
tokenizerURL,
configURL,
modelID,
sentences,
updateStatus,
normalize_embeddings = true
) {
return new Promise((resolve, reject) => {
worker.postMessage({
weightsURL,
tokenizerURL,
configURL,
modelID,
sentences,
normalize_embeddings,
});
function messageHandler(event) {
if ("error" in event.data) {
worker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
worker.removeEventListener("message", messageHandler);
resolve(event.data);
}
if (updateStatus) updateStatus(event.data);
}
worker.addEventListener("message", messageHandler);
});
}
export async function generateText(
worker,
weightsURL,
tokenizerURL,
configURL,
modelID,
prompt,
params,
updateStatus
) {
return new Promise((resolve, reject) => {
worker.postMessage({
weightsURL,
tokenizerURL,
configURL,
modelID,
prompt,
params,
});
function messageHandler(event) {
if ("error" in event.data) {
worker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
worker.removeEventListener("message", messageHandler);
resolve(event.data);
}
if (updateStatus) updateStatus(event.data);
}
worker.addEventListener("message", messageHandler);
});
}
export const MODELS = {
t5_small_quantized: {
size: "64.4 MB",
base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/",
model: "model.gguf",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: {
translation_en_to_de: {
prefix: "translate English to German: ",
max_length: 300,
},
translation_en_to_fr: {
prefix: "translate English to French: ",
max_length: 300,
},
translation_en_to_ro: {
prefix: "translate English to Romanian: ",
max_length: 300,
},
summarization: { prefix: "summarize: ", max_length: 200 },
},
},
t5_small: {
size: "242 MB",
base_url: "https://huggingface.co/t5-small/resolve/main/",
model: "model.safetensors",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: {
translation_en_to_de: {
prefix: "translate English to German: ",
max_length: 300,
},
translation_en_to_fr: {
prefix: "translate English to French: ",
max_length: 300,
},
translation_en_to_ro: {
prefix: "translate English to Romanian: ",
max_length: 300,
},
summarization: { prefix: "summarize: ", max_length: 200 },
},
},
flan_t5_small: {
size: "308 MB",
base_url:
"https://huggingface.co/google/flan-t5-small/resolve/refs%2Fpr%2F14/",
model: "model.safetensors",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: {
translation_en_to_de: {
prefix: "translate English to German: ",
max_length: 300,
},
translation_en_to_fr: {
prefix: "translate English to French: ",
max_length: 300,
},
translation_en_to_ro: {
prefix: "translate English to Romanian: ",
max_length: 300,
},
summarization: { prefix: "summarize: ", max_length: 200 },
},
},
flan_t5_base_quantized: {
size: "263 MB",
base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/",
model: "model-flan-t5-base.gguf",
tokenizer: "tokenizer.json",
config: "config-flan-t5-base.json",
tasks: {
translation_en_to_de: {
prefix: "translate English to German: ",
max_length: 300,
},
translation_en_to_fr: {
prefix: "translate English to French: ",
max_length: 300,
},
translation_en_to_ro: {
prefix: "translate English to Romanian: ",
max_length: 300,
},
summarization: { prefix: "summarize: ", max_length: 200 },
},
},
coedit_large_quantized: {
size: "643 MB",
base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
model: "model.gguf",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: {
fluency: {
prefix: "Fix the grammar: ",
max_length: 300,
},
coherence: {
prefix: "Rewrite to make this easier to understand: ",
max_length: 300,
},
simplification: {
prefix: "translate English to Romanian: ",
max_length: 300,
},
simplification: {
prefix: "Paraphrase this: ",
max_length: 300,
},
formalization: {
prefix: "Write this more formally: ",
max_length: 300,
},
neutralize: {
prefix: "Write in a more neutral way: ",
max_length: 300,
},
},
},
};
export function getModelInfo(id, taskID) {
const model = MODELS[id];
return {
modelURL: model.base_url + model.model,
configURL: model.base_url + model.config,
tokenizerURL: model.base_url + model.tokenizer,
maxLength: model.tasks[taskID].max_length,
};
}
| candle/candle-wasm-examples/t5/utils.js/0 | {
"file_path": "candle/candle-wasm-examples/t5/utils.js",
"repo_id": "candle",
"token_count": 2339
} | 65 |
[package]
name = "candle-wasm-example-yolo"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
[dependencies]
candle = { workspace = true }
candle-nn = { workspace = true }
num-traits = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
image = { workspace = true }
# App crates.
anyhow = { workspace = true }
byteorder = { workspace = true }
log = { workspace = true }
rand = { workspace = true }
safetensors = { workspace = true }
# Wasm specific crates.
console_error_panic_hook = "0.1.7"
getrandom = { version = "0.2", features = ["js"] }
gloo = "0.11"
js-sys = "0.3.64"
wasm-bindgen = "0.2.87"
wasm-bindgen-futures = "0.4.37"
wasm-logger = "0.2"
yew-agent = "0.2.0"
yew = { version = "0.20.0", features = ["csr"] }
[dependencies.web-sys]
version = "=0.3.70"
features = [
'Blob',
'CanvasRenderingContext2d',
'Document',
'Element',
'HtmlElement',
'HtmlCanvasElement',
'HtmlImageElement',
'ImageData',
'Node',
'Window',
'Request',
'RequestCache',
'RequestInit',
'RequestMode',
'Response',
'Performance',
'TextMetrics',
]
| candle/candle-wasm-examples/yolo/Cargo.toml/0 | {
"file_path": "candle/candle-wasm-examples/yolo/Cargo.toml",
"repo_id": "candle",
"token_count": 464
} | 66 |
pub fn add(left: usize, right: usize) -> usize {
left + right
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let result = add(2, 2);
assert_eq!(result, 4);
}
}
| candle/candle-wasm-tests/src/lib.rs/0 | {
"file_path": "candle/candle-wasm-tests/src/lib.rs",
"repo_id": "candle",
"token_count": 108
} | 67 |
apiVersion: v2
name: chat-ui
version: 0.0.1-latest
type: application
icon: https://huggingface.co/front/assets/huggingface_logo-noborder.svg
| chat-ui/chart/Chart.yaml/0 | {
"file_path": "chat-ui/chart/Chart.yaml",
"repo_id": "chat-ui",
"token_count": 55
} | 68 |
# Text Embedding Models
By default (for backward compatibility), when `TEXT_EMBEDDING_MODELS` environment variable is not defined, [transformers.js](https://huggingface.co/docs/transformers.js) embedding models will be used for embedding tasks, specifically, the [Xenova/gte-small](https://huggingface.co/Xenova/gte-small) model.
You can customize the embedding model by setting `TEXT_EMBEDDING_MODELS` in your `.env.local` file where the required fields are `name`, `chunkCharLength` and `endpoints`.
Supported text embedding backends are: [`transformers.js`](https://huggingface.co/docs/transformers.js), [`TEI`](https://github.com/huggingface/text-embeddings-inference) and [`OpenAI`](https://platform.openai.com/docs/guides/embeddings). `transformers.js` models run locally as part of `chat-ui`, whereas `TEI` models run in a different environment & accessed through an API endpoint. `openai` models are accessed through the [OpenAI API](https://platform.openai.com/docs/guides/embeddings).
When more than one embedding models are supplied in `.env.local` file, the first will be used by default, and the others will only be used on LLM's which configured `embeddingModel` to the name of the model.
## Transformers.js
The Transformers.js backend uses local CPU for the embedding which can be quite slow. If possible, consider using TEI or OpenAI embeddings instead if you use web search frequently, as performance will improve significantly.
```ini
TEXT_EMBEDDING_MODELS = `[
{
"name": "Xenova/gte-small",
"displayName": "Xenova/gte-small",
"description": "locally running embedding",
"chunkCharLength": 512,
"endpoints": [
{ "type": "transformersjs" }
]
}
]`
```
## Text Embeddings Inference (TEI)
> Text Embeddings Inference (TEI) is a comprehensive toolkit designed for efficient deployment and serving of open source text embeddings models. It enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE, and E5.
Some recommended models at the time of writing (May 2024) are `Snowflake/snowflake-arctic-embed-m` and `BAAI/bge-large-en-v1.5`. You may run TEI locally with GPU support via Docker:
`docker run --gpus all -p 8080:80 -v tei-data:/data --name tei ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id YOUR/HF_MODEL`
You can then hook this up to your Chat UI instance with the following configuration.
```ini
TEXT_EMBEDDING_MODELS=`[
{
"name": "YOUR/HF_MODEL",
"displayName": "YOUR/HF_MODEL",
"preQuery": "Check the model documentation for the preQuery. Not all models have one",
"prePassage": "Check the model documentation for the prePassage. Not all models have one",
"chunkCharLength": 512,
"endpoints": [{
"type": "tei",
"url": "http://127.0.0.1:8080/"
}]
}
]`
```
Examples for `Snowflake/snowflake-arctic-embed-m` and `BAAI/bge-large-en-v1.5`:
```ini
TEXT_EMBEDDING_MODELS=`[
{
"name": "Snowflake/snowflake-arctic-embed-m",
"displayName": "Snowflake/snowflake-arctic-embed-m",
"preQuery": "Represent this sentence for searching relevant passages: ",
"chunkCharLength": 512,
"endpoints": [{
"type": "tei",
"url": "http://127.0.0.1:8080/"
}]
},{
"name": "BAAI/bge-large-en-v1.5",
"displayName": "BAAI/bge-large-en-v1.5",
"chunkCharLength": 512,
"endpoints": [{
"type": "tei",
"url": "http://127.0.0.1:8080/"
}]
}
]`
```
## OpenAI
It's also possible to host your own OpenAI API compatible embedding models. [`Infinity`](https://github.com/michaelfeil/infinity) is one example. You may run it locally with Docker:
`docker run -it --gpus all -v infinity-data:/app/.cache -p 7997:7997 michaelf34/infinity:latest v2 --model-id nomic-ai/nomic-embed-text-v1 --port 7997`
You can then hook this up to your Chat UI instance with the following configuration.
```ini
TEXT_EMBEDDING_MODELS=`[
{
"name": "nomic-ai/nomic-embed-text-v1",
"displayName": "nomic-ai/nomic-embed-text-v1",
"chunkCharLength": 512,
"model": {
"name": "nomic-ai/nomic-embed-text-v1"
},
"endpoints": [
{
"type": "openai",
"url": "https://127.0.0.1:7997/embeddings"
}
]
}
]`
```
| chat-ui/docs/source/configuration/embeddings.md/0 | {
"file_path": "chat-ui/docs/source/configuration/embeddings.md",
"repo_id": "chat-ui",
"token_count": 1568
} | 69 |
# Configuration Overview
Chat UI handles configuration with environment variables. The default config for Chat UI is stored in the `.env` file, which you may use as a reference. You will need to override some values to get Chat UI to run locally. This can be done in `.env.local` or via your environment. The bare minimum configuration to get Chat UI running is:
```ini
MONGODB_URL=mongodb://localhost:27017
HF_TOKEN=your_token
```
The following sections detail various sections of the app you may want to configure.
| chat-ui/docs/source/configuration/overview.md/0 | {
"file_path": "chat-ui/docs/source/configuration/overview.md",
"repo_id": "chat-ui",
"token_count": 130
} | 70 |
import readline from "readline";
import minimist from "minimist";
// @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them
import { env } from "$env/dynamic/private";
import { faker } from "@faker-js/faker";
import { ObjectId } from "mongodb";
// @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them
import { ready } from "$lib/server/config";
import { collections } from "$lib/server/database.ts";
import { models } from "../src/lib/server/models.ts";
import type { User } from "../src/lib/types/User";
import type { Assistant } from "../src/lib/types/Assistant";
import type { Conversation } from "../src/lib/types/Conversation";
import type { Settings } from "../src/lib/types/Settings";
import type { CommunityToolDB, ToolLogoColor, ToolLogoIcon } from "../src/lib/types/Tool";
import { defaultEmbeddingModel } from "../src/lib/server/embeddingModels.ts";
import { Message } from "../src/lib/types/Message.ts";
import { addChildren } from "../src/lib/utils/tree/addChildren.ts";
import { generateSearchTokens } from "../src/lib/utils/searchTokens.ts";
import { ReviewStatus } from "../src/lib/types/Review.ts";
import fs from "fs";
import path from "path";
import { MessageUpdateType } from "../src/lib/types/MessageUpdate.ts";
import { MessageReasoningUpdateType } from "../src/lib/types/MessageUpdate.ts";
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
await ready;
rl.on("close", function () {
process.exit(0);
});
const samples = fs.readFileSync(path.join(__dirname, "samples.txt"), "utf8").split("\n---\n");
const possibleFlags = ["reset", "all", "users", "settings", "assistants", "conversations", "tools"];
const argv = minimist(process.argv.slice(2));
const flags = argv["_"].filter((flag) => possibleFlags.includes(flag));
async function generateMessages(preprompt?: string): Promise<Message[]> {
const isLinear = faker.datatype.boolean(0.5);
const isInterrupted = faker.datatype.boolean(0.05);
const messages: Message[] = [];
messages.push({
id: crypto.randomUUID(),
from: "system",
content: preprompt ?? "",
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
});
let isUser = true;
let lastId = messages[0].id;
if (isLinear) {
const convLength = faker.number.int({ min: 1, max: 25 }) * 2; // must always be even
for (let i = 0; i < convLength; i++) {
const hasReasoning = Math.random() < 0.2;
lastId = addChildren(
{
messages,
rootMessageId: messages[0].id,
},
{
from: isUser ? "user" : "assistant",
content:
faker.lorem.sentence({
min: 10,
max: isUser ? 50 : 200,
}) +
(!isUser && Math.random() < 0.1
? "\n```\n" + faker.helpers.arrayElement(samples) + "\n```\n"
: ""),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
reasoning: hasReasoning ? faker.lorem.paragraphs(2) : undefined,
updates: hasReasoning
? [
{
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
uuid: crypto.randomUUID(),
status: "thinking",
},
]
: [],
interrupted: !isUser && i === convLength - 1 && isInterrupted,
},
lastId
);
isUser = !isUser;
}
} else {
const convLength = faker.number.int({ min: 2, max: 200 });
for (let i = 0; i < convLength; i++) {
const hasReasoning = Math.random() < 0.2;
addChildren(
{
messages,
rootMessageId: messages[0].id,
},
{
from: isUser ? "user" : "assistant",
content:
faker.lorem.sentence({
min: 10,
max: isUser ? 50 : 200,
}) +
(!isUser && Math.random() < 0.1
? "\n```\n" + faker.helpers.arrayElement(samples) + "\n```\n"
: ""),
reasoning: hasReasoning ? faker.lorem.paragraphs(2) : undefined,
updates: hasReasoning
? [
{
type: MessageUpdateType.Reasoning,
subtype: MessageReasoningUpdateType.Status,
uuid: crypto.randomUUID(),
status: "thinking",
},
]
: [],
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
interrupted: !isUser && i === convLength - 1 && isInterrupted,
},
faker.helpers.arrayElement([
messages[0].id,
...messages.filter((m) => m.from === (isUser ? "assistant" : "user")).map((m) => m.id),
])
);
isUser = !isUser;
}
}
return messages;
}
async function seed() {
console.log("Seeding...");
const modelIds = models.map((model) => model.id);
if (flags.includes("reset")) {
console.log("Starting reset of DB");
await collections.users.deleteMany({});
await collections.settings.deleteMany({});
await collections.assistants.deleteMany({});
await collections.conversations.deleteMany({});
await collections.tools.deleteMany({});
await collections.migrationResults.deleteMany({});
await collections.semaphores.deleteMany({});
console.log("Reset done");
}
if (flags.includes("users") || flags.includes("all")) {
console.log("Creating 100 new users");
const newUsers: User[] = Array.from({ length: 100 }, () => ({
_id: new ObjectId(),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
username: faker.internet.userName(),
name: faker.person.fullName(),
hfUserId: faker.string.alphanumeric(24),
avatarUrl: faker.image.avatar(),
}));
await collections.users.insertMany(newUsers);
console.log("Done creating users.");
}
const users = await collections.users.find().toArray();
if (flags.includes("settings") || flags.includes("all")) {
console.log("Updating settings for all users");
users.forEach(async (user) => {
const settings: Settings = {
userId: user._id,
shareConversationsWithModelAuthors: faker.datatype.boolean(0.25),
hideEmojiOnSidebar: faker.datatype.boolean(0.25),
ethicsModalAcceptedAt: faker.date.recent({ days: 30 }),
activeModel: faker.helpers.arrayElement(modelIds),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
disableStream: faker.datatype.boolean(0.25),
directPaste: faker.datatype.boolean(0.25),
customPrompts: {},
assistants: [],
};
await collections.settings.updateOne(
{ userId: user._id },
{ $set: { ...settings } },
{ upsert: true }
);
});
console.log("Done updating settings.");
}
if (flags.includes("assistants") || flags.includes("all")) {
console.log("Creating assistants for all users");
await Promise.all(
users.map(async (user) => {
const name = faker.animal.insect();
const assistants = faker.helpers.multiple<Assistant>(
() => ({
_id: new ObjectId(),
name,
createdById: user._id,
createdByName: user.username,
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
userCount: faker.number.int({ min: 1, max: 100000 }),
review: faker.helpers.enumValue(ReviewStatus),
modelId: faker.helpers.arrayElement(modelIds),
description: faker.lorem.sentence(),
preprompt: faker.hacker.phrase(),
exampleInputs: faker.helpers.multiple(() => faker.lorem.sentence(), {
count: faker.number.int({ min: 0, max: 4 }),
}),
searchTokens: generateSearchTokens(name),
last24HoursCount: faker.number.int({ min: 0, max: 1000 }),
}),
{ count: faker.number.int({ min: 3, max: 10 }) }
);
await collections.assistants.insertMany(assistants);
await collections.settings.updateOne(
{ userId: user._id },
{ $set: { assistants: assistants.map((a) => a._id.toString()) } },
{ upsert: true }
);
})
);
console.log("Done creating assistants.");
}
if (flags.includes("conversations") || flags.includes("all")) {
console.log("Creating conversations for all users");
await Promise.all(
users.map(async (user) => {
const conversations = faker.helpers.multiple(
async () => {
const settings = await collections.settings.findOne<Settings>({ userId: user._id });
const assistantId =
settings?.assistants && settings.assistants.length > 0 && faker.datatype.boolean(0.1)
? faker.helpers.arrayElement<ObjectId>(settings.assistants)
: undefined;
const preprompt =
(assistantId
? await collections.assistants
.findOne({ _id: assistantId })
.then((assistant: Assistant) => assistant?.preprompt ?? "")
: faker.helpers.maybe(() => faker.hacker.phrase(), { probability: 0.5 })) ?? "";
const messages = await generateMessages(preprompt);
const conv = {
_id: new ObjectId(),
userId: user._id,
assistantId,
preprompt,
createdAt: faker.date.recent({ days: 145 }),
updatedAt: faker.date.recent({ days: 145 }),
model: faker.helpers.arrayElement(modelIds),
title: faker.internet.emoji() + " " + faker.hacker.phrase(),
embeddingModel: defaultEmbeddingModel.id,
messages,
rootMessageId: messages[0].id,
} satisfies Conversation;
return conv;
},
{ count: faker.number.int({ min: 10, max: 200 }) }
);
await collections.conversations.insertMany(await Promise.all(conversations));
})
);
console.log("Done creating conversations.");
}
// generate Community Tools
if (flags.includes("tools") || flags.includes("all")) {
const tools = await Promise.all(
faker.helpers.multiple(
() => {
const _id = new ObjectId();
const displayName = faker.company.catchPhrase();
const description = faker.company.catchPhrase();
const color = faker.helpers.arrayElement([
"purple",
"blue",
"green",
"yellow",
"red",
]) satisfies ToolLogoColor;
const icon = faker.helpers.arrayElement([
"wikis",
"tools",
"camera",
"code",
"email",
"cloud",
"terminal",
"game",
"chat",
"speaker",
"video",
]) satisfies ToolLogoIcon;
const baseUrl = faker.helpers.arrayElement([
"stabilityai/stable-diffusion-3-medium",
"multimodalart/cosxl",
"gokaygokay/SD3-Long-Captioner",
"xichenhku/MimicBrush",
]);
// keep empty for populate for now
const user: User = faker.helpers.arrayElement(users);
const createdById = user._id;
const createdByName = user.username ?? user.name;
return {
type: "community" as const,
_id,
createdById,
createdByName,
displayName,
name: displayName.toLowerCase().replace(" ", "_"),
endpoint: "/test",
description,
color,
icon,
baseUrl,
inputs: [],
outputPath: null,
outputType: "str" as const,
showOutput: false,
useCount: faker.number.int({ min: 0, max: 100000 }),
last24HoursUseCount: faker.number.int({ min: 0, max: 1000 }),
createdAt: faker.date.recent({ days: 30 }),
updatedAt: faker.date.recent({ days: 30 }),
searchTokens: generateSearchTokens(displayName),
review: faker.helpers.enumValue(ReviewStatus),
outputComponent: null,
outputComponentIdx: null,
};
},
{ count: faker.number.int({ min: 10, max: 200 }) }
)
);
await collections.tools.insertMany(tools satisfies CommunityToolDB[]);
}
}
// run seed
(async () => {
try {
rl.question(
"You're about to run a seeding script on the following MONGODB_URL: \x1b[31m" +
env.MONGODB_URL +
"\x1b[0m\n\n With the following flags: \x1b[31m" +
flags.join("\x1b[0m , \x1b[31m") +
"\x1b[0m\n \n\n Are you sure you want to continue? (yes/no): ",
async (confirm) => {
if (confirm !== "yes") {
console.log("Not 'yes', exiting.");
rl.close();
process.exit(0);
}
console.log("Starting seeding...");
await seed();
console.log("Seeding done.");
rl.close();
}
);
} catch (e) {
console.error(e);
process.exit(1);
}
})();
| chat-ui/scripts/populate.ts/0 | {
"file_path": "chat-ui/scripts/populate.ts",
"repo_id": "chat-ui",
"token_count": 5135
} | 71 |
<script lang="ts">
import { base } from "$app/paths";
import type { ToolLogoColor, ToolLogoIcon } from "$lib/types/Tool";
import { debounce } from "$lib/utils/debounce";
import { onMount } from "svelte";
import ToolLogo from "./ToolLogo.svelte";
import CarbonClose from "~icons/carbon/close";
interface ToolSuggestion {
_id: string;
displayName: string;
createdByName: string;
color: ToolLogoColor;
icon: ToolLogoIcon;
}
interface Props {
toolIds?: string[];
}
let { toolIds = $bindable([]) }: Props = $props();
let selectedValues: ToolSuggestion[] = $state([]);
onMount(async () => {
selectedValues = await Promise.all(
toolIds.map(async (id) => await fetch(`${base}/api/tools/${id}`).then((res) => res.json()))
);
await fetchSuggestions("");
});
let inputValue = $state("");
let maxValues = 3;
let suggestions: ToolSuggestion[] = $state([]);
async function fetchSuggestions(query: string) {
suggestions = (await fetch(`${base}/api/tools/search?q=${query}`).then((res) =>
res.json()
)) satisfies ToolSuggestion[];
}
const debouncedFetch = debounce((query: string) => fetchSuggestions(query), 300);
function addValue(value: ToolSuggestion) {
if (selectedValues.length < maxValues && !selectedValues.includes(value)) {
selectedValues = [...selectedValues, value];
toolIds = [...toolIds, value._id];
inputValue = "";
suggestions = [];
}
}
function removeValue(id: ToolSuggestion["_id"]) {
selectedValues = selectedValues.filter((v) => v._id !== id);
toolIds = selectedValues.map((value) => value._id);
}
</script>
{#if selectedValues.length > 0}
<div class="flex flex-wrap items-center justify-center gap-2">
{#each selectedValues as value}
<div
class="flex items-center justify-center space-x-2 rounded border border-gray-300 bg-gray-200 px-2 py-1"
>
{#key value.color + value.icon}
<ToolLogo color={value.color} icon={value.icon} size="sm" />
{/key}
<div class="flex flex-col items-center justify-center py-1">
<a
href={`${base}/tools/${value._id}`}
target="_blank"
class="line-clamp-1 truncate font-semibold text-blue-600 hover:underline"
>{value.displayName}</a
>
{#if value.createdByName}
<p class="text-center text-xs text-gray-500">
Created by
<a class="underline" href="{base}/tools?user={value.createdByName}" target="_blank"
>{value.createdByName}</a
>
</p>
{:else}
<p class="text-center text-xs text-gray-500">Official HuggingChat tool</p>
{/if}
</div>
<button
onclick={(e) => {
e.preventDefault();
e.stopPropagation();
removeValue(value._id);
}}
class="text-lg text-gray-600"
>
<CarbonClose />
</button>
</div>
{/each}
</div>
{/if}
{#if selectedValues.length < maxValues}
<div class="group relative block">
<input
type="text"
bind:value={inputValue}
oninput={(ev) => {
inputValue = ev.currentTarget.value;
debouncedFetch(inputValue);
}}
disabled={selectedValues.length >= maxValues}
class="w-full rounded border border-gray-200 bg-gray-100 px-3 py-2"
class:opacity-50={selectedValues.length >= maxValues}
class:bg-gray-100={selectedValues.length >= maxValues}
placeholder="Type to search tools..."
tabindex="0"
/>
{#if suggestions.length > 0}
<div
class="invisible absolute z-10 mt-1 w-full rounded border border-gray-300 bg-white shadow-lg group-focus-within:visible"
tabindex="-1"
>
{#if inputValue === ""}
<p class="px-3 py-2 text-left text-xs text-gray-500">
Start typing to search for tools...
</p>
{:else}
{#each suggestions as suggestion}
<button
onclick={(e) => {
e.preventDefault();
e.stopPropagation();
addValue(suggestion);
}}
class="w-full cursor-pointer px-3 py-2 text-left hover:bg-blue-500 hover:text-white"
tabindex="0"
>
{suggestion.displayName}
{#if suggestion.createdByName}
<span class="text-xs text-gray-500"> by {suggestion.createdByName}</span>
{/if}
</button>
{/each}
{/if}
</div>
{/if}
</div>
{/if}
| chat-ui/src/lib/components/AssistantToolPicker.svelte/0 | {
"file_path": "chat-ui/src/lib/components/AssistantToolPicker.svelte",
"repo_id": "chat-ui",
"token_count": 1794
} | 72 |
<script lang="ts">
import { page } from "$app/stores";
import { getHref } from "$lib/utils/getHref";
import PaginationArrow from "./PaginationArrow.svelte";
interface Props {
classNames?: string;
numItemsPerPage: number;
numTotalItems: number;
}
let { classNames = "", numItemsPerPage, numTotalItems }: Props = $props();
const ELLIPSIS_IDX = -1 as const;
function getPageIndexes(pageIdx: number, nTotalPages: number) {
let pageIdxs: number[] = [];
const NUM_EXTRA_BUTTONS = 2; // The number of page links to show on either side of the current page link.
const minIdx = 0;
const maxIdx = nTotalPages - 1;
pageIdxs = [pageIdx];
// forward
for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) {
const newPageIdx = pageIdx + i;
if (newPageIdx > maxIdx) {
continue;
}
pageIdxs.push(newPageIdx);
}
if (maxIdx - pageIdxs[pageIdxs.length - 1] > 1) {
pageIdxs.push(...[ELLIPSIS_IDX, maxIdx]);
} else if (maxIdx - pageIdxs[pageIdxs.length - 1] === 1) {
pageIdxs.push(maxIdx);
}
// backward
for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) {
const newPageIdx = pageIdx - i;
if (newPageIdx < minIdx) {
continue;
}
pageIdxs.unshift(newPageIdx);
}
if (pageIdxs[0] - minIdx > 1) {
pageIdxs.unshift(...[minIdx, ELLIPSIS_IDX]);
} else if (pageIdxs[0] - minIdx === 1) {
pageIdxs.unshift(minIdx);
}
return pageIdxs;
}
let numTotalPages = $derived(Math.ceil(numTotalItems / numItemsPerPage));
let pageIndex = $derived(parseInt($page.url.searchParams.get("p") ?? "0"));
let pageIndexes = $derived(getPageIndexes(pageIndex, numTotalPages));
</script>
{#if numTotalPages > 1}
<nav>
<ul
class="flex select-none items-center justify-between space-x-2 text-gray-700 dark:text-gray-300 sm:justify-center {classNames}"
>
<li>
<PaginationArrow
href={getHref($page.url, { newKeys: { p: (pageIndex - 1).toString() } })}
direction="previous"
isDisabled={pageIndex - 1 < 0}
/>
</li>
{#each pageIndexes as pageIdx}
<li class="hidden sm:block">
<a
class="
rounded-lg px-2.5 py-1
{pageIndex === pageIdx
? 'bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-800 dark:text-yellow-500 dark:ring-gray-700'
: ''}
"
class:pointer-events-none={pageIdx === ELLIPSIS_IDX || pageIndex === pageIdx}
href={getHref($page.url, { newKeys: { p: pageIdx.toString() } })}
>
{pageIdx === ELLIPSIS_IDX ? "..." : pageIdx + 1}
</a>
</li>
{/each}
<li>
<PaginationArrow
href={getHref($page.url, { newKeys: { p: (pageIndex + 1).toString() } })}
direction="next"
isDisabled={pageIndex + 1 >= numTotalPages}
/>
</li>
</ul>
</nav>
{/if}
| chat-ui/src/lib/components/Pagination.svelte/0 | {
"file_path": "chat-ui/src/lib/components/Pagination.svelte",
"repo_id": "chat-ui",
"token_count": 1249
} | 73 |
<script lang="ts">
import { webSearchParameters } from "$lib/stores/webSearchParameters";
import CarbonInformation from "~icons/carbon/information";
import Switch from "./Switch.svelte";
const toggle = () => ($webSearchParameters.useSearch = !$webSearchParameters.useSearch);
</script>
<div
class="flex h-8 cursor-pointer select-none items-center gap-2 rounded-lg border bg-white p-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900"
onclick={toggle}
onkeydown={toggle}
aria-checked={$webSearchParameters.useSearch}
aria-label="Web Search Toggle"
role="switch"
tabindex="0"
>
<Switch name="useSearch" bind:checked={$webSearchParameters.useSearch} />
<label for="useSearch" class="whitespace-nowrap text-sm text-gray-800 dark:text-gray-200">
Search web
</label>
<div class="group relative w-max">
<CarbonInformation class="text-xs text-gray-500" />
<div
class="pointer-events-none absolute -top-20 left-1/2 w-max -translate-x-1/2 rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800"
>
<p class="max-w-sm text-sm text-gray-800 dark:text-gray-200">
When enabled, the model will try to complement its answer with information queried from the
web.
</p>
</div>
</div>
</div>
| chat-ui/src/lib/components/WebSearchToggle.svelte/0 | {
"file_path": "chat-ui/src/lib/components/WebSearchToggle.svelte",
"repo_id": "chat-ui",
"token_count": 448
} | 74 |
<script lang="ts">
interface Props {
classNames?: string;
}
let { classNames = "" }: Props = $props();
</script>
<svg
width="1em"
height="1em"
viewBox="0 0 15 6"
class={classNames}
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M1.67236 1L7.67236 7L13.6724 1"
stroke="currentColor"
stroke-width="2"
stroke-linecap="round"
stroke-linejoin="round"
/>
</svg>
| chat-ui/src/lib/components/icons/IconChevron.svelte/0 | {
"file_path": "chat-ui/src/lib/components/icons/IconChevron.svelte",
"repo_id": "chat-ui",
"token_count": 181
} | 75 |
import type { ConversationStats } from "$lib/types/ConversationStats";
import { CONVERSATION_STATS_COLLECTION, collections } from "$lib/server/database";
import { logger } from "$lib/server/logger";
import type { ObjectId } from "mongodb";
import { acquireLock, refreshLock } from "$lib/migrations/lock";
import { Semaphores } from "$lib/types/Semaphore";
async function getLastComputationTime(): Promise<Date> {
const lastStats = await collections.conversationStats.findOne({}, { sort: { "date.at": -1 } });
return lastStats?.date?.at || new Date(0);
}
async function shouldComputeStats(): Promise<boolean> {
const lastComputationTime = await getLastComputationTime();
const oneDayAgo = new Date(Date.now() - 24 * 3_600_000);
return lastComputationTime < oneDayAgo;
}
export async function computeAllStats() {
for (const span of ["day", "week", "month"] as const) {
computeStats({ dateField: "updatedAt", type: "conversation", span }).catch((e) =>
logger.error(e)
);
computeStats({ dateField: "createdAt", type: "conversation", span }).catch((e) =>
logger.error(e)
);
computeStats({ dateField: "createdAt", type: "message", span }).catch((e) => logger.error(e));
}
}
async function computeStats(params: {
dateField: ConversationStats["date"]["field"];
span: ConversationStats["date"]["span"];
type: ConversationStats["type"];
}) {
const lastComputed = await collections.conversationStats.findOne(
{ "date.field": params.dateField, "date.span": params.span, type: params.type },
{ sort: { "date.at": -1 } }
);
// If the last computed week is at the beginning of the last computed month, we need to include some days from the previous month
// In those cases we need to compute the stats from before the last month as everything is one aggregation
const minDate = lastComputed ? lastComputed.date.at : new Date(0);
logger.debug(
{ minDate, dateField: params.dateField, span: params.span, type: params.type },
"Computing conversation stats"
);
const dateField = params.type === "message" ? "messages." + params.dateField : params.dateField;
const pipeline = [
{
$match: {
[dateField]: { $gte: minDate },
},
},
{
$project: {
[dateField]: 1,
sessionId: 1,
userId: 1,
},
},
...(params.type === "message"
? [
{
$unwind: "$messages",
},
{
$match: {
[dateField]: { $gte: minDate },
},
},
]
: []),
{
$sort: {
[dateField]: 1,
},
},
{
$facet: {
userId: [
{
$match: {
userId: { $exists: true },
},
},
{
$group: {
_id: {
at: { $dateTrunc: { date: `$${dateField}`, unit: params.span } },
userId: "$userId",
},
},
},
{
$group: {
_id: "$_id.at",
count: { $sum: 1 },
},
},
{
$project: {
_id: 0,
date: {
at: "$_id",
field: params.dateField,
span: params.span,
},
distinct: "userId",
count: 1,
},
},
],
sessionId: [
{
$match: {
sessionId: { $exists: true },
},
},
{
$group: {
_id: {
at: { $dateTrunc: { date: `$${dateField}`, unit: params.span } },
sessionId: "$sessionId",
},
},
},
{
$group: {
_id: "$_id.at",
count: { $sum: 1 },
},
},
{
$project: {
_id: 0,
date: {
at: "$_id",
field: params.dateField,
span: params.span,
},
distinct: "sessionId",
count: 1,
},
},
],
userOrSessionId: [
{
$group: {
_id: {
at: { $dateTrunc: { date: `$${dateField}`, unit: params.span } },
userOrSessionId: { $ifNull: ["$userId", "$sessionId"] },
},
},
},
{
$group: {
_id: "$_id.at",
count: { $sum: 1 },
},
},
{
$project: {
_id: 0,
date: {
at: "$_id",
field: params.dateField,
span: params.span,
},
distinct: "userOrSessionId",
count: 1,
},
},
],
_id: [
{
$group: {
_id: { $dateTrunc: { date: `$${dateField}`, unit: params.span } },
count: { $sum: 1 },
},
},
{
$project: {
_id: 0,
date: {
at: "$_id",
field: params.dateField,
span: params.span,
},
distinct: "_id",
count: 1,
},
},
],
},
},
{
$project: {
stats: {
$concatArrays: ["$userId", "$sessionId", "$userOrSessionId", "$_id"],
},
},
},
{
$unwind: "$stats",
},
{
$replaceRoot: {
newRoot: "$stats",
},
},
{
$set: {
type: params.type,
},
},
{
$merge: {
into: CONVERSATION_STATS_COLLECTION,
on: ["date.at", "type", "date.span", "date.field", "distinct"],
whenMatched: "replace",
whenNotMatched: "insert",
},
},
];
await collections.conversations.aggregate(pipeline, { allowDiskUse: true }).next();
logger.debug(
{ minDate, dateField: params.dateField, span: params.span, type: params.type },
"Computed conversation stats"
);
}
let hasLock = false;
let lockId: ObjectId | null = null;
async function maintainLock() {
if (hasLock && lockId) {
hasLock = await refreshLock(Semaphores.CONVERSATION_STATS, lockId);
if (!hasLock) {
lockId = null;
}
} else if (!hasLock) {
lockId = (await acquireLock(Semaphores.CONVERSATION_STATS)) || null;
hasLock = !!lockId;
}
setTimeout(maintainLock, 10_000);
}
export function refreshConversationStats() {
const ONE_HOUR_MS = 3_600_000;
maintainLock().then(async () => {
if (await shouldComputeStats()) {
computeAllStats();
}
setInterval(async () => {
if (await shouldComputeStats()) {
computeAllStats();
}
}, 24 * ONE_HOUR_MS);
});
}
| chat-ui/src/lib/jobs/refresh-conversation-stats.ts/0 | {
"file_path": "chat-ui/src/lib/jobs/refresh-conversation-stats.ts",
"repo_id": "chat-ui",
"token_count": 2855
} | 76 |
// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
import { logger } from "$lib/server/logger";
import { collections } from "$lib/server/database";
import { onExit } from "./exitHandler";
export class AbortedGenerations {
private static instance: AbortedGenerations;
private abortedGenerations: Record<string, Date> = {};
private constructor() {
const interval = setInterval(() => this.updateList(), 1000);
onExit(() => clearInterval(interval));
this.updateList();
}
public static getInstance(): AbortedGenerations {
if (!AbortedGenerations.instance) {
AbortedGenerations.instance = new AbortedGenerations();
}
return AbortedGenerations.instance;
}
public getAbortTime(conversationId: string): Date | undefined {
return this.abortedGenerations[conversationId];
}
private async updateList() {
try {
const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray();
this.abortedGenerations = Object.fromEntries(
aborts.map((abort) => [abort.conversationId.toString(), abort.createdAt])
);
} catch (err) {
logger.error(err);
}
}
}
| chat-ui/src/lib/server/abortedGenerations.ts/0 | {
"file_path": "chat-ui/src/lib/server/abortedGenerations.ts",
"repo_id": "chat-ui",
"token_count": 397
} | 77 |
import { z } from "zod";
import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints";
import { chunk } from "$lib/utils/chunk";
import { config } from "$lib/server/config";
import { logger } from "$lib/server/logger";
export const embeddingEndpointTeiParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("tei"),
url: z.string().url(),
authorization: z
.string()
.optional()
.transform((v) => (!v && config.HF_TOKEN ? "Bearer " + config.HF_TOKEN : v)), // if the header is not set but HF_TOKEN is, use it as the authorization header
});
const getModelInfoByUrl = async (url: string, authorization?: string) => {
const { origin } = new URL(url);
const response = await fetch(`${origin}/info`, {
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(authorization ? { Authorization: authorization } : {}),
},
});
try {
const json = await response.json();
return { max_client_batch_size: 32, max_batch_tokens: 16384, ...json };
} catch {
logger.debug("Could not get info from TEI embedding endpoint. Using defaults.");
return { max_client_batch_size: 32, max_batch_tokens: 16384 };
}
};
export async function embeddingEndpointTei(
input: z.input<typeof embeddingEndpointTeiParametersSchema>
): Promise<EmbeddingEndpoint> {
const { url, model, authorization } = embeddingEndpointTeiParametersSchema.parse(input);
const { max_client_batch_size, max_batch_tokens } = await getModelInfoByUrl(url);
const maxBatchSize = Math.min(
max_client_batch_size,
Math.floor(max_batch_tokens / model.chunkCharLength)
);
return async ({ inputs }) => {
const { origin } = new URL(url);
const batchesInputs = chunk(inputs, maxBatchSize);
const batchesResults = await Promise.all(
batchesInputs.map(async (batchInputs) => {
const response = await fetch(`${origin}/embed`, {
method: "POST",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(authorization ? { Authorization: authorization } : {}),
},
body: JSON.stringify({ inputs: batchInputs, normalize: true, truncate: true }),
});
const embeddings: Embedding[] = await response.json();
return embeddings;
})
);
const flatAllEmbeddings = batchesResults.flat();
return flatAllEmbeddings;
};
}
| chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts/0 | {
"file_path": "chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts",
"repo_id": "chat-ui",
"token_count": 837
} | 78 |
import { buildPrompt } from "$lib/buildPrompt";
import { z } from "zod";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { logger } from "$lib/server/logger";
export const endpointLangserveParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("langserve"),
url: z.string().url(),
});
export function endpointLangserve(
input: z.input<typeof endpointLangserveParametersSchema>
): Endpoint {
const { url, model } = endpointLangserveParametersSchema.parse(input);
return async ({ messages, preprompt, continueMessage }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
const r = await fetch(`${url}/stream`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
input: { text: prompt },
}),
});
if (!r.ok) {
throw new Error(`Failed to generate text: ${await r.text()}`);
}
const encoder = new TextDecoderStream();
const reader = r.body?.pipeThrough(encoder).getReader();
return (async function* () {
let stop = false;
let generatedText = "";
let tokenId = 0;
let accumulatedData = ""; // Buffer to accumulate data chunks
while (!stop) {
// Read the stream and log the outputs to console
const out = (await reader?.read()) ?? { done: false, value: undefined };
// If it's done, we cancel
if (out.done) {
reader?.cancel();
return;
}
if (!out.value) {
return;
}
// Accumulate the data chunk
accumulatedData += out.value;
// Keep read data to check event type
const eventData = out.value;
// Process each complete JSON object in the accumulated data
while (accumulatedData.includes("\n")) {
// Assuming each JSON object ends with a newline
const endIndex = accumulatedData.indexOf("\n");
let jsonString = accumulatedData.substring(0, endIndex).trim();
// Remove the processed part from the buffer
accumulatedData = accumulatedData.substring(endIndex + 1);
// Stopping with end event
if (eventData.startsWith("event: end")) {
stop = true;
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: generatedText,
details: null,
} satisfies TextGenerationStreamOutput;
reader?.cancel();
continue;
}
if (eventData.startsWith("event: data") && jsonString.startsWith("data: ")) {
jsonString = jsonString.slice(6);
let data = null;
// Handle the parsed data
try {
data = JSON.parse(jsonString);
} catch (e) {
logger.error(e, "Failed to parse JSON");
logger.error(jsonString, "Problematic JSON string:");
continue; // Skip this iteration and try the next chunk
}
// Assuming content within data is a plain string
if (data) {
generatedText += data;
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: data,
logprob: 0,
special: false,
},
generated_text: null,
details: null,
};
yield output;
}
}
}
}
})();
};
}
export default endpointLangserve;
| chat-ui/src/lib/server/endpoints/langserve/endpointLangserve.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/langserve/endpointLangserve.ts",
"repo_id": "chat-ui",
"token_count": 1394
} | 79 |
import { runWebSearch } from "$lib/server/websearch/runWebSearch";
import { preprocessMessages } from "../endpoints/preprocessMessages";
import { generateTitleForConversation } from "./title";
import {
assistantHasDynamicPrompt,
assistantHasWebSearch,
getAssistantById,
processPreprompt,
} from "./assistant";
import { getTools, runTools } from "./tools";
import type { WebSearch } from "$lib/types/WebSearch";
import {
type MessageUpdate,
MessageUpdateType,
MessageUpdateStatus,
} from "$lib/types/MessageUpdate";
import { generate } from "./generate";
import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators";
import type { TextGenerationContext } from "./types";
import type { ToolResult } from "$lib/types/Tool";
import { toolHasName } from "../tools/utils";
import directlyAnswer from "../tools/directlyAnswer";
async function* keepAlive(done: AbortSignal): AsyncGenerator<MessageUpdate, undefined, undefined> {
while (!done.aborted) {
yield {
type: MessageUpdateType.Status,
status: MessageUpdateStatus.KeepAlive,
};
await new Promise((resolve) => setTimeout(resolve, 100));
}
}
export async function* textGeneration(ctx: TextGenerationContext) {
const done = new AbortController();
const titleGen = generateTitleForConversation(ctx.conv);
const textGen = textGenerationWithoutTitle(ctx, done);
const keepAliveGen = keepAlive(done.signal);
// keep alive until textGen is done
yield* mergeAsyncGenerators([titleGen, textGen, keepAliveGen]);
}
async function* textGenerationWithoutTitle(
ctx: TextGenerationContext,
done: AbortController
): AsyncGenerator<MessageUpdate, undefined, undefined> {
yield {
type: MessageUpdateType.Status,
status: MessageUpdateStatus.Started,
};
ctx.assistant ??= await getAssistantById(ctx.conv.assistantId);
const { model, conv, messages, assistant, isContinue, webSearch, toolsPreference } = ctx;
const convId = conv._id;
let webSearchResult: WebSearch | undefined;
// run websearch if:
// - it's not continuing a previous message
// - AND the model doesn't support tools and websearch is selected
// - OR the assistant has websearch enabled (no tools for assistants for now)
if (!isContinue && ((webSearch && !conv.assistantId) || assistantHasWebSearch(assistant))) {
webSearchResult = yield* runWebSearch(conv, messages, assistant?.rag);
}
let preprompt = conv.preprompt;
if (assistantHasDynamicPrompt(assistant) && preprompt) {
preprompt = await processPreprompt(preprompt, messages.at(-1)?.content);
if (messages[0].from === "system") messages[0].content = preprompt;
}
let toolResults: ToolResult[] = [];
let tools = model.tools ? await getTools(toolsPreference, ctx.assistant) : undefined;
if (tools) {
const toolCallsRequired = tools.some((tool) => !toolHasName(directlyAnswer.name, tool));
if (toolCallsRequired) {
toolResults = yield* runTools(ctx, tools, preprompt);
} else tools = undefined;
}
const processedMessages = await preprocessMessages(messages, webSearchResult, convId);
yield* generate({ ...ctx, messages: processedMessages }, toolResults, preprompt);
done.abort();
}
| chat-ui/src/lib/server/textGeneration/index.ts/0 | {
"file_path": "chat-ui/src/lib/server/textGeneration/index.ts",
"repo_id": "chat-ui",
"token_count": 960
} | 80 |
import type { MarkdownElement } from "../markdown/types";
export function flattenTree(elem: MarkdownElement): MarkdownElement[] {
if ("children" in elem) return [elem, ...elem.children.flatMap(flattenTree)];
return [elem];
}
| chat-ui/src/lib/server/websearch/embed/tree.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/embed/tree.ts",
"repo_id": "chat-ui",
"token_count": 74
} | 81 |
import { config } from "$lib/server/config";
import { getJson, type GoogleParameters } from "serpapi";
import type { WebSearchSource } from "$lib/types/WebSearch";
import { isURL } from "$lib/utils/isUrl";
type SerpApiResponse = {
organic_results: {
link: string;
}[];
};
export default async function searchWebSerpApi(query: string): Promise<WebSearchSource[]> {
const params = {
q: query,
hl: "en",
gl: "us",
google_domain: "google.com",
api_key: config.SERPAPI_KEY,
} satisfies GoogleParameters;
// Show result as JSON
const response = (await getJson("google", params)) as unknown as SerpApiResponse;
return response.organic_results.filter(({ link }) => isURL(link));
}
| chat-ui/src/lib/server/websearch/search/endpoints/serpApi.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serpApi.ts",
"repo_id": "chat-ui",
"token_count": 235
} | 82 |
export function switchTheme() {
const { classList } = document.querySelector("html") as HTMLElement;
const metaTheme = document.querySelector('meta[name="theme-color"]') as HTMLMetaElement;
if (classList.contains("dark")) {
classList.remove("dark");
metaTheme.setAttribute("content", "rgb(249, 250, 251)");
localStorage.theme = "light";
} else {
classList.add("dark");
metaTheme.setAttribute("content", "rgb(26, 36, 50)");
localStorage.theme = "dark";
}
}
| chat-ui/src/lib/switchTheme.ts/0 | {
"file_path": "chat-ui/src/lib/switchTheme.ts",
"repo_id": "chat-ui",
"token_count": 164
} | 83 |
import type { ObjectId } from "bson";
import type { Timestamps } from "./Timestamps";
import type { User } from "./User";
export interface Session extends Timestamps {
_id: ObjectId;
sessionId: string;
userId: User["_id"];
userAgent?: string;
ip?: string;
expiresAt: Date;
admin?: boolean;
}
| chat-ui/src/lib/types/Session.ts/0 | {
"file_path": "chat-ui/src/lib/types/Session.ts",
"repo_id": "chat-ui",
"token_count": 103
} | 84 |
const file2base64 = (file: File): Promise<string> => {
return new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = () => {
const dataUrl = reader.result as string;
const base64 = dataUrl.split(",")[1];
resolve(base64);
};
reader.onerror = (error) => reject(error);
});
};
export default file2base64;
| chat-ui/src/lib/utils/file2base64.ts/0 | {
"file_path": "chat-ui/src/lib/utils/file2base64.ts",
"repo_id": "chat-ui",
"token_count": 142
} | 85 |
const PUNCTUATION_REGEX = /\p{P}/gu;
function removeDiacritics(s: string, form: "NFD" | "NFKD" = "NFD"): string {
return s.normalize(form).replace(/[\u0300-\u036f]/g, "");
}
export function generateSearchTokens(value: string): string[] {
const fullTitleToken = removeDiacritics(value)
.replace(PUNCTUATION_REGEX, "")
.replaceAll(/\s+/g, "")
.toLowerCase();
return [
...new Set([
...removeDiacritics(value)
.split(/\s+/)
.map((word) => word.replace(PUNCTUATION_REGEX, "").toLowerCase())
.filter((word) => word.length),
...(fullTitleToken.length ? [fullTitleToken] : []),
]),
];
}
function escapeForRegExp(s: string): string {
return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
}
export function generateQueryTokens(query: string): RegExp[] {
return removeDiacritics(query)
.split(/\s+/)
.map((word) => word.replace(PUNCTUATION_REGEX, "").toLowerCase())
.filter((word) => word.length)
.map((token) => new RegExp(`^${escapeForRegExp(token)}`));
}
| chat-ui/src/lib/utils/searchTokens.ts/0 | {
"file_path": "chat-ui/src/lib/utils/searchTokens.ts",
"repo_id": "chat-ui",
"token_count": 426
} | 86 |
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
import { describe, expect, it } from "vitest";
import { convertLegacyConversation } from "./convertLegacyConversation";
import { insertLegacyConversation } from "./treeHelpers.spec";
describe("convertLegacyConversation", () => {
it("should convert a legacy conversation", async () => {
const convId = await insertLegacyConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
const newConv = convertLegacyConversation(conv);
expect(newConv.rootMessageId).toBe(newConv.messages[0].id);
expect(newConv.messages[0].ancestors).toEqual([]);
expect(newConv.messages[1].ancestors).toEqual([newConv.messages[0].id]);
expect(newConv.messages[0].children).toEqual([newConv.messages[1].id]);
});
it("should work on empty conversations", async () => {
const conv = {
_id: new ObjectId(),
rootMessageId: undefined,
messages: [],
};
const newConv = convertLegacyConversation(conv);
expect(newConv.rootMessageId).toBe(undefined);
expect(newConv.messages).toEqual([]);
});
});
| chat-ui/src/lib/utils/tree/convertLegacyConversation.spec.ts/0 | {
"file_path": "chat-ui/src/lib/utils/tree/convertLegacyConversation.spec.ts",
"repo_id": "chat-ui",
"token_count": 425
} | 87 |
import { base } from "$app/paths";
import { collections } from "$lib/server/database";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import { z } from "zod";
import { config } from "$lib/server/config";
import { sendSlack } from "$lib/server/sendSlack";
import type { Assistant } from "$lib/types/Assistant";
export async function POST({ params, request, locals, url }) {
// is there already a report from this user for this model ?
const report = await collections.reports.findOne({
createdBy: locals.user?._id ?? locals.sessionId,
object: "assistant",
contentId: new ObjectId(params.id),
});
if (report) {
return error(400, "Already reported");
}
const { reason } = z.object({ reason: z.string().min(1).max(128) }).parse(await request.json());
if (!reason) {
return error(400, "Invalid report reason");
}
const { acknowledged } = await collections.reports.insertOne({
_id: new ObjectId(),
contentId: new ObjectId(params.id),
object: "assistant",
createdBy: locals.user?._id ?? locals.sessionId,
createdAt: new Date(),
updatedAt: new Date(),
reason,
});
if (!acknowledged) {
return error(500, "Failed to report assistant");
}
if (config.WEBHOOK_URL_REPORT_ASSISTANT) {
const prefixUrl = config.PUBLIC_SHARE_PREFIX || `${config.PUBLIC_ORIGIN || url.origin}${base}`;
const assistantUrl = `${prefixUrl}/assistant/${params.id}`;
const assistant = await collections.assistants.findOne<Pick<Assistant, "name">>(
{ _id: new ObjectId(params.id) },
{ projection: { name: 1 } }
);
const username = locals.user?.username;
await sendSlack(
`๐ด Assistant <${assistantUrl}|${assistant?.name}> reported by ${
username ? `<http://hf.co/${username}|${username}>` : "non-logged in user"
}.\n\n> ${reason}`
);
}
return new Response("Assistant reported", { status: 200 });
}
| chat-ui/src/routes/api/assistant/[id]/report/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/assistant/[id]/report/+server.ts",
"repo_id": "chat-ui",
"token_count": 655
} | 88 |
import { adminTokenManager } from "$lib/server/adminToken";
import { z } from "zod";
const validateTokenSchema = z.object({
token: z.string(),
});
export const POST = async ({ request, locals }) => {
const { success, data } = validateTokenSchema.safeParse(await request.json());
if (!success) {
return new Response(JSON.stringify({ error: "Invalid token" }), { status: 400 });
}
if (adminTokenManager.checkToken(data.token, locals.sessionId)) {
return new Response(JSON.stringify({ valid: true }));
}
return new Response(JSON.stringify({ valid: false }));
};
| chat-ui/src/routes/api/user/validate-token/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/user/validate-token/+server.ts",
"repo_id": "chat-ui",
"token_count": 180
} | 89 |
import { authCondition } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
/**
* Ideally, we'd be able to detect the client-side abort, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
*/
export async function POST({ params, locals }) {
const conversationId = new ObjectId(params.id);
const conversation = await collections.conversations.findOne({
_id: conversationId,
...authCondition(locals),
});
if (!conversation) {
error(404, "Conversation not found");
}
await collections.abortedGenerations.updateOne(
{ conversationId },
{ $set: { updatedAt: new Date() }, $setOnInsert: { createdAt: new Date() } },
{ upsert: true }
);
return new Response();
}
| chat-ui/src/routes/conversation/[id]/stop-generating/+server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/[id]/stop-generating/+server.ts",
"repo_id": "chat-ui",
"token_count": 260
} | 90 |
<script lang="ts">
import {
ToolOutputComponents,
type CommunityToolEditable,
type ToolInput,
} from "$lib/types/Tool";
import { createEventDispatcher, onMount } from "svelte";
import { browser } from "$app/environment";
import ToolLogo from "$lib/components/ToolLogo.svelte";
import { colors, icons } from "$lib/utils/tools";
import { goto } from "$app/navigation";
import { base } from "$app/paths";
import ToolInputComponent from "./ToolInputComponent.svelte";
import { error as errorStore } from "$lib/stores/errors";
import CarbonInformation from "~icons/carbon/information";
import { page } from "$app/state";
import { handleResponse, useAPIClient } from "$lib/APIClient";
interface Props {
tool?: CommunityToolEditable | undefined;
readonly?: boolean;
}
let errors = $state<{ field: string; message: string }[]>([]);
let { tool = undefined, readonly = false }: Props = $props();
function getError(field: string) {
return errors.find((error) => error.field === field)?.message ?? "";
}
let APIloading = $state(false);
let formLoading = $state(false);
const client = useAPIClient();
const dispatch = createEventDispatcher<{ close: void }>();
onMount(async () => {
await updateConfig();
});
let spaceUrl = $state(tool?.baseUrl ?? "");
let editableTool: CommunityToolEditable = $state(
tool ?? {
displayName: "",
description: "",
// random color & icon for new tools
color: colors[Math.floor(Math.random() * colors.length)],
icon: icons[Math.floor(Math.random() * icons.length)],
baseUrl: "",
endpoint: "",
name: "",
inputs: [],
outputComponent: null,
outputComponentIdx: 0,
showOutput: true,
}
);
$effect(() => {
editableTool.baseUrl && (spaceUrl = editableTool.baseUrl);
});
async function updateConfig() {
if (!browser || !editableTool.baseUrl || !editableTool.endpoint) {
return;
}
APIloading = true;
const api = await client["spaces-config"]
.get({
query: {
space: editableTool.baseUrl,
},
})
.then(handleResponse);
const newInputs = api.named_endpoints[editableTool.endpoint].parameters.map((param, idx) => {
if (tool?.inputs[idx]?.name === param.parameter_name) {
// if the tool has the same name, we use the tool's type
return {
...tool?.inputs[idx],
} satisfies ToolInput;
}
const type = parseValidInputType(param.python_type.type);
if (param.parameter_has_default && param.python_type.type !== "filepath") {
// optional if it has a default
return {
name: param.parameter_name,
description: param.description,
paramType: "optional" as const,
default: param.parameter_default,
...(type === "file" ? { mimeTypes: "*/*", type } : { type }),
};
} else {
// required if it doesn't have a default
return {
name: param.parameter_name,
description: param.description,
paramType: "required" as const,
...(type === "file" ? { mimeTypes: "*/*", type } : { type }),
};
}
});
editableTool.inputs = newInputs;
// outout components
const parsedOutputComponent = ToolOutputComponents.safeParse(
api.named_endpoints[editableTool.endpoint].returns?.[0]?.component ?? null
);
if (parsedOutputComponent.success) {
editableTool.outputComponent = "0;" + parsedOutputComponent.data;
} else {
errors = [
{
field: "outputComponent",
message: `Invalid output component. Type ${
api.named_endpoints[editableTool.endpoint].returns?.[0]?.component
} is not yet supported. Feel free to report this issue so we can add support for it.`,
},
];
editableTool.outputComponent = null;
}
APIloading = false;
}
async function onEndpointChange(e: Event) {
const target = e.target as HTMLInputElement;
editableTool.endpoint = target.value;
editableTool.name = target.value.replace(/\//g, "");
await updateConfig();
}
function parseValidInputType(type: string) {
switch (type) {
case "str":
case "int":
case "float":
case "bool":
return type;
case "filepath":
return "file" as const;
default:
return "str";
}
}
let formSubmittable = $derived(
editableTool.name && editableTool.baseUrl && editableTool.outputComponent
);
</script>
<form
class="relative flex h-full flex-col overflow-y-auto p-4 md:p-8"
onsubmit={async (e) => {
e.preventDefault();
formLoading = true;
errors = [];
try {
const body = JSON.stringify(editableTool);
let response: Response;
if (page.params.toolId) {
response = await fetch(`${base}/api/tools/${page.params.toolId}`, {
method: "PATCH",
headers: { "Content-Type": "application/json" },
body,
});
} else {
response = await fetch(`${base}/api/tools`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body,
});
}
if (response.ok) {
const { toolId } = await response.json();
goto(`${base}/tools/${toolId}`, { invalidateAll: true });
} else if (response.status === 400) {
const data = await response.json();
errors = data.errors;
} else {
$errorStore = response.statusText;
}
} catch (e) {
$errorStore = (e as Error).message;
} finally {
formLoading = false;
}
}}
>
{#if tool}
<h2 class="text-xl font-semibold">
{readonly ? "View" : "Edit"} Tool: {tool.displayName}
</h2>
{#if !readonly}
<p class="mb-6 text-sm text-gray-500">
Modifying an existing tool will propagate the changes to all users.
</p>
{/if}
{:else}
<h2 class="text-xl font-semibold">Create new tool</h2>
<p class="mb-6 text-sm text-gray-500">
Create and share your own tools. All tools are <span
class="rounded-full border px-2 py-0.5 leading-none">public</span
>
</p>
{/if}
<div class="grid h-full w-full flex-1 grid-cols-2 gap-6 text-sm max-sm:grid-cols-1">
<div class="col-span-1 flex flex-col gap-4">
<div class="flex flex-col gap-4">
<label>
<div class="mb-1 font-semibold">Tool Display Name</div>
<input
type="text"
name="displayName"
disabled={readonly}
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
placeholder="Image generator"
bind:value={editableTool.displayName}
/>
<p class="text-xs text-red-500">{getError("displayName")}</p>
</label>
<div class="flex flex-row gap-4">
<div>
{#key editableTool.color + editableTool.icon}
<ToolLogo color={editableTool.color} icon={editableTool.icon} />
{/key}
</div>
<label class="flex-grow">
<div class="mb-1 font-semibold">Icon</div>
<select
name="icon"
disabled={readonly}
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
bind:value={editableTool.icon}
>
{#each icons as icon}
<option value={icon}>{icon}</option>
{/each}
</select>
<p class="text-xs text-red-500">{getError("icon")}</p>
</label>
<label class="flex-grow">
<div class="mb-1 font-semibold">Color scheme</div>
<select
name="color"
disabled={readonly}
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
bind:value={editableTool.color}
>
{#each colors as color}
<option value={color}>{color}</option>
{/each}
</select>
<p class="text-xs text-red-500">{getError("color")}</p>
</label>
</div>
<label>
<div class=" font-semibold">Tool Description</div>
<p class="mb-1 text-sm text-gray-500">
This description will be passed to the model when picking tools. Describe what your tool
does and when it is appropriate to use.
</p>
<textarea
name="description"
disabled={readonly}
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
placeholder="This tool lets you generate images using SDXL."
bind:value={editableTool.description}
></textarea>
<p class="text-xs text-red-500">{getError("description")}</p>
</label>
<label>
<div class="mb-1 font-semibold">Hugging Face Space URL</div>
<p class="mb-1 text-sm text-gray-500">
Specify the Hugging Face Space where your tool is hosted. <a
href="https://huggingface.co/spaces"
target="_blank"
class="underline">See trending spaces here</a
>.
</p>
<input
type="text"
name="spaceUrl"
disabled={readonly}
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
placeholder="ByteDance/Hyper-SDXL-1Step-T2I"
bind:value={editableTool.baseUrl}
/>
<p class="text-xs text-red-500">{getError("spaceUrl")}</p>
</label>
<p class="text-justify text-gray-800">
Tools allows models that support them to use external application directly via function
calling. Tools must use Hugging Face Gradio Spaces as we detect the input and output types
automatically from the <a
class="underline"
href="https://www.gradio.app/guides/sharing-your-app#api-page">Gradio API</a
>. For GPU intensive tool consider using a ZeroGPU Space.
</p>
</div>
</div>
<div class="col-span-1 flex flex-col gap-4">
<div class="flex flex-col gap-2">
<h3 class="mb-1 font-semibold">Functions</h3>
{#if editableTool.baseUrl}
<p class="text-sm text-gray-500">Choose functions that can be called in your tool.</p>
{:else}
<p class="text-sm text-gray-500">Start by specifying a Hugging Face Space URL.</p>
{/if}
{#if editableTool.baseUrl}
{#await client["spaces-config"].get({ query: { space: spaceUrl } }).then(handleResponse)}
<p class="text-sm text-gray-500">Loading...</p>
{:then api}
<div class="flex flex-row flex-wrap gap-4">
{#each Object.keys(api["named_endpoints"] ?? {}) as name}
<label class="rounded-lg bg-gray-200 p-2">
<input
type="radio"
disabled={readonly}
oninput={onEndpointChange}
bind:group={editableTool.endpoint}
value={name}
name="endpoint"
/>
<span
class="font-mono text-gray-800"
class:font-semibold={editableTool.endpoint === name}>{name}</span
>
</label>
{/each}
</div>
{#if editableTool.endpoint && api["named_endpoints"][editableTool.endpoint] && !APIloading}
{@const endpoint = api["named_endpoints"][editableTool.endpoint]}
<div class="flex flex-col gap-2">
<div class="flex flex-col gap-2 rounded-lg border border-gray-200 p-2">
<div class="flex items-center gap-1 border-b border-gray-200 p-1 pb-2">
<span class="flex-grow font-mono text-smd font-semibold"
>{editableTool.endpoint}</span
>
<label class="ml-auto">
<span
class="group relative flex w-max items-center justify-center text-sm font-semibold text-gray-700"
>
AI Function Name
<CarbonInformation class="m-1 align-middle text-xs text-purple-500" />
<div
class="pointer-events-none absolute -top-16 right-0 w-max rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800"
>
<p class="max-w-sm text-sm font-normal text-gray-800 dark:text-gray-200">
This is the function name that will be used when prompting the model.
Make sure it describes your tool well, is short and unique.
</p>
</div>
</span>
<input
class="h-fit rounded-lg border-2 border-gray-200 bg-gray-100 p-1"
type="text"
name="name"
disabled={readonly}
bind:value={editableTool.name}
/>
</label>
</div>
<div>
<h3 class="text-lg font-semibold">Arguments</h3>
<p class="mb-2 text-sm text-gray-500">
Choose parameters that can be passed to your tool.
</p>
</div>
<p class="text-xs text-red-500">
{getError("inputs")}
</p>
{#each editableTool.inputs as input, inputIdx}
{@const parameter = endpoint.parameters.find(
(parameter) => parameter.parameter_name === input.name
)}
<div class="flex items-center gap-1">
<div class="inline w-full">
<span class="font-mono text-sm">{input.name}</span>
<span
class="inline-block max-w-lg truncate rounded-lg bg-orange-50 p-1 text-sm text-orange-800"
>{parameter?.python_type.type}</span
>
{#if parameter?.description}
<p class="text-sm text-gray-500">
{parameter.description}
</p>
{/if}
<div class="flex w-fit justify-start gap-2">
<label class="ml-auto">
<input
type="radio"
name="{input.name}-parameter-type"
value="required"
disabled={readonly}
bind:group={editableTool.inputs[inputIdx].paramType}
/>
<span class="text-sm text-gray-500">Required</span>
</label>
<label class="ml-auto">
<input
type="radio"
name="{input.name}-parameter-type"
value="optional"
disabled={readonly || parameter?.python_type.type === "filepath"}
bind:group={editableTool.inputs[inputIdx].paramType}
/>
<span class="text-sm text-gray-500">Optional</span>
</label>
<label class="ml-auto">
<input
type="radio"
name="{input.name}-parameter-type"
value="fixed"
disabled={readonly || parameter?.python_type.type === "filepath"}
bind:group={editableTool.inputs[inputIdx].paramType}
/>
<span class="text-sm text-gray-500">Fixed</span>
</label>
</div>
</div>
</div>
<!-- for required we need a description, for optional we need a default value and for fixed we need a value -->
{#if input.paramType === "required" || input.paramType === "optional"}
<label class="flex flex-row gap-2">
<div class="mb-1 font-semibold">
Description
<p class="text-xs font-normal text-gray-500">
Will be passed in the model prompt, make it as clear and concise as
possible
</p>
</div>
<textarea
name="{input.name}-description"
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
placeholder="This is the description of the input."
bind:value={input.description}
disabled={readonly}
></textarea>
</label>
{/if}
{#if input.paramType === "optional" || input.paramType === "fixed"}
{@const isOptional = input.paramType === "optional"}
<div class="flex flex-row gap-2">
<div class="mb-1 flex-grow font-semibold">
{isOptional ? "Default value" : "Value"}
<p class="text-xs font-normal text-gray-500">
{#if isOptional}
The tool will use this value by default but the model can specify a
different one.
{:else}
The tool will use this value and it cannot be changed.
{/if}
</p>
</div>
{#if input.paramType === "optional"}
<ToolInputComponent
type={parameter?.python_type.type ?? "str"}
disabled={readonly}
bind:value={input.default}
/>
{:else}
<ToolInputComponent
type={parameter?.python_type.type ?? "str"}
disabled={readonly}
bind:value={input.value}
/>
{/if}
</div>
{/if}
{#if input.type === "file"}
<label class="flex flex-row gap-2">
<div class="mb-1 font-semibold">
MIME types
<p class="text-xs font-normal text-gray-500">
This input is a file. Specify the MIME types that are allowed to be
passed to the tool.
</p>
</div>
<select
name="{input.name}-mimeTypes"
class="h-fit w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
bind:value={input.mimeTypes}
disabled={readonly}
>
<option value="image/*">image/*</option>
<option value="audio/*">audio/*</option>
<option value="video/*">video/*</option>
<option value="application/pdf">application/pdf</option>
<option value="text/csv">text/csv</option>
<option value="*/*">*/*</option>
</select></label
>
{/if}
<!-- divider -->
<div
class="flex w-full flex-row flex-nowrap gap-2 border-b border-gray-200 pt-2"
></div>
{/each}
<div class="flex flex-col gap-4">
<div>
<h3 class="text-lg font-semibold">Output options</h3>
<p class="mb-2 text-sm text-gray-500">
Choose what value your tool will return and how
</p>
</div>
<label class="flex flex-col gap-2" for="showOutput">
<div class="mb-1 font-semibold">
Output component
<p class="text-xs font-normal text-gray-500">
Pick the gradio output component whose output will be used in the tool.
</p>
</div>
{#if editableTool.outputComponent}
{#if api.named_endpoints[editableTool.endpoint].returns.length > 1}
<div class="flex flex-row gap-4">
{#each api.named_endpoints[editableTool.endpoint].returns as { component }, idx}
<label class="text-gray-800">
<input
type="radio"
disabled={readonly ||
!ToolOutputComponents.safeParse(component).success}
bind:group={editableTool.outputComponent}
value={idx + ";" + component.toLowerCase()}
name="outputComponent"
/>
<span
class="font-mono"
class:text-gray-400={!ToolOutputComponents.safeParse(component)
.success}
class:font-semibold={editableTool?.outputComponent?.split(
";"
)[1] === component}>{component.toLowerCase()}-{idx}</span
>
</label>
{/each}
</div>
{:else}
<div>
<input disabled checked type="radio" />
<span class="font-mono text-gray-800"
>{editableTool.outputComponent.split(";")[1]}</span
>
</div>
{/if}
{/if}
<p class="text-xs text-red-500">
{getError("outputComponent")}
</p>
</label>
<label class="flex flex-row gap-2" for="showOutput">
<div class="mb-1 font-semibold">
Show output to user directly
<p class="text-xs font-normal text-gray-500">
Some tools return long context that should not be shown to the user
directly.
</p>
</div>
<input
type="checkbox"
name="showOutput"
bind:checked={editableTool.showOutput}
class="peer rounded-lg border-2 border-gray-200 bg-gray-100 p-1"
/>
<p class="text-xs text-red-500">
{getError("showOutput")}
</p>
</label>
</div>
</div>
</div>
{:else if APIloading}
<p class="text-sm text-gray-500">Loading API...</p>
{:else if !api["named_endpoints"]}
<p class="font-medium text-red-800">
No endpoints found in this space. Try another one.
</p>
{/if}
{:catch rejected}
<p class="text-sm text-gray-500">{JSON.parse(rejected.message).value}</p>
{/await}
{/if}
</div>
<div class="relative bottom-0 mb-4 mt-auto flex w-full flex-row justify-end gap-2">
<button
type="button"
class="mt-4 w-fit rounded-full bg-gray-200 px-4 py-2 font-semibold text-gray-700"
onclick={() => dispatch("close")}
>
Cancel
</button>
{#if !readonly}
<button
type="submit"
disabled={formLoading || !formSubmittable}
class="mt-4 w-fit rounded-full bg-black px-4 py-2 font-semibold"
class:text-white={!formLoading && formSubmittable}
class:text-gray-300={formLoading || !formSubmittable}
class:bg-gray-400={formLoading || !formSubmittable}
>
{formLoading ? "Saving..." : "Save"}
</button>
{/if}
</div>
</div>
</div>
</form>
| chat-ui/src/routes/tools/ToolEdit.svelte/0 | {
"file_path": "chat-ui/src/routes/tools/ToolEdit.svelte",
"repo_id": "chat-ui",
"token_count": 10344
} | 91 |
{
"background_color": "#ffffff",
"name": "Chat UI",
"short_name": "Chat UI",
"display": "standalone",
"start_url": "/",
"icons": [
{
"src": "/chatui/icon-128x128.png",
"sizes": "128x128",
"type": "image/png"
},
{
"src": "/chatui/icon-256x256.png",
"sizes": "256x256",
"type": "image/png"
},
{
"src": "/chatui/icon-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
]
}
| chat-ui/static/chatui/manifest.json/0 | {
"file_path": "chat-ui/static/chatui/manifest.json",
"repo_id": "chat-ui",
"token_count": 218
} | 92 |
{
"background_color": "#ffffff",
"name": "HuggingChat",
"short_name": "HuggingChat",
"display": "standalone",
"start_url": "/chat",
"icons": [
{
"src": "/chat/huggingchat/icon-36x36.png",
"sizes": "36x36",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-48x48.png",
"sizes": "48x48",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-72x72.png",
"sizes": "72x72",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-96x96.png",
"sizes": "96x96",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-128x128.png",
"sizes": "128x128",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-144x144.png",
"sizes": "144x144",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-192x192.png",
"sizes": "192x192",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-256x256.png",
"sizes": "256x256",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
]
}
| chat-ui/static/huggingchat/manifest.json/0 | {
"file_path": "chat-ui/static/huggingchat/manifest.json",
"repo_id": "chat-ui",
"token_count": 569
} | 93 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def get_duration(func):
def wrapper(*args, **kwargs):
starttime = timeit.default_timer()
_ = func(*args, **kwargs)
delta = timeit.default_timer() - starttime
return delta
wrapper.__name__ = func.__name__
return wrapper
def generate_examples(features: dict, num_examples=100, seq_shapes=None):
dummy_data = []
seq_shapes = seq_shapes or {}
for i in range(num_examples):
example = {}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(v, _ArrayXD):
data = np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(v, datasets.Value):
if v.dtype == "string":
data = "The small grey turtle was surprisingly fast when challenged."
else:
data = np.random.randint(10, size=1).astype(v.dtype).item()
elif isinstance(v, datasets.Sequence):
while isinstance(v, datasets.Sequence):
v = v.feature
shape = seq_shapes[k]
data = np.random.rand(*shape).astype(v.dtype)
example[k] = data
dummy_data.append((i, example))
return dummy_data
def generate_example_dataset(dataset_path, features, num_examples=100, seq_shapes=None):
dummy_data = generate_examples(features, num_examples=num_examples, seq_shapes=seq_shapes)
with ArrowWriter(features=features, path=dataset_path) as writer:
for key, record in dummy_data:
example = features.encode_example(record)
writer.write(example)
num_final_examples, num_bytes = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}."
)
dataset = datasets.Dataset.from_file(filename=dataset_path, info=datasets.DatasetInfo(features=features))
return dataset
| datasets/benchmarks/utils.py/0 | {
"file_path": "datasets/benchmarks/utils.py",
"repo_id": "datasets",
"token_count": 927
} | 94 |
# Command Line Interface (CLI)
๐ค Datasets provides a command line interface (CLI) with useful shell commands to interact with your dataset.
You can check the available commands:
```bash
>>> datasets-cli --help
usage: datasets-cli <command> [<args>]
positional arguments:
{env,test,delete_from_hub}
datasets-cli command helpers
env Print relevant system environment info.
test Test dataset loading.
delete_from_hub Delete dataset config from the Hub
optional arguments:
-h, --help show this help message and exit
```
## Delete from Hub
Delete a dataset configuration from a [supported dataset](repository_structure) on the Hub.
```bash
>>> datasets-cli delete_from_hub --help
usage: datasets-cli <command> [<args>] delete_from_hub [-h] [--token TOKEN] [--revision REVISION] dataset_id config_name
positional arguments:
dataset_id source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME
config_name config name to delete
optional arguments:
-h, --help show this help message and exit
--token TOKEN access token to the Hugging Face Hub
--revision REVISION source revision
```
For example:
```bash
>>> datasets-cli delete_from_hub USERNAME/DATASET_NAME CONFIG_NAME
```
<Tip>
Do not forget that you need to log in first to your Hugging Face account:
```bash
>>> hf auth login
```
</Tip>
| datasets/docs/source/cli.mdx/0 | {
"file_path": "datasets/docs/source/cli.mdx",
"repo_id": "datasets",
"token_count": 515
} | 95 |
# Installation
Before you start, you'll need to setup your environment and install the appropriate packages. ๐ค Datasets is tested on **Python 3.9+**.
<Tip>
If you want to use ๐ค Datasets with TensorFlow or PyTorch, you'll need to install them separately. Refer to the [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2-packages-are-available) or the [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) for the specific install command for your framework.
</Tip>
## Virtual environment
You should install ๐ค Datasets in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts.
1. Create and navigate to your project directory:
```bash
mkdir ~/my-project
cd ~/my-project
```
2. Start a virtual environment inside your directory:
```bash
python -m venv .env
```
3. Activate and deactivate the virtual environment with the following commands:
```bash
# Activate the virtual environment
source .env/bin/activate
# Deactivate the virtual environment
source .env/bin/deactivate
```
Once you've created your virtual environment, you can install ๐ค Datasets in it.
## pip
The most straightforward way to install ๐ค Datasets is with pip:
```bash
pip install datasets
```
Run the following command to check if ๐ค Datasets has been properly installed:
```bash
python -c "from datasets import load_dataset; print(load_dataset('rajpurkar/squad', split='train')[0])"
```
This command downloads version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer/), loads the training split, and prints the first training example. You should see:
```python
{'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']}, 'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.', 'id': '5733be284776f41900661182', 'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?', 'title': 'University_of_Notre_Dame'}
```
## Audio
To work with audio datasets, you need to install the [`Audio`] feature as an extra dependency:
```bash
pip install datasets[audio]
```
## Vision
To work with image datasets, you need to install the [`Image`] feature as an extra dependency:
```bash
pip install datasets[vision]
```
## source
Building ๐ค Datasets from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands:
```bash
git clone https://github.com/huggingface/datasets.git
cd datasets
pip install -e .
```
Again, you can check if ๐ค Datasets was properly installed with the following command:
```bash
python -c "from datasets import load_dataset; print(load_dataset('rajpurkar/squad', split='train')[0])"
```
## conda
๐ค Datasets can also be installed from conda, a package management system:
```bash
conda install -c huggingface -c conda-forge datasets
```
| datasets/docs/source/installation.md/0 | {
"file_path": "datasets/docs/source/installation.md",
"repo_id": "datasets",
"token_count": 1063
} | 96 |
# Stream
Dataset streaming lets you work with a dataset without downloading it.
The data is streamed as you iterate over the dataset.
This is especially helpful when:
- You don't want to wait for an extremely large dataset to download.
- The dataset size exceeds the amount of available disk space on your computer.
- You want to quickly explore just a few samples of a dataset.
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/streaming.gif"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/streaming-dark.gif"/>
</div>
For example, the English split of the [HuggingFaceFW/fineweb](https://huggingface.co/datasets/HuggingFaceFW/fineweb) dataset is 45 terabytes, but you can use it instantly with streaming. Stream a dataset by setting `streaming=True` in [`load_dataset`] as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('HuggingFaceFW/fineweb', split='train', streaming=True)
>>> print(next(iter(dataset)))
{'text': "How AP reported in all formats from tornado-stricken regionsMarch 8, 2012\nWhen the first serious bout of tornadoes of 2012 blew through middle America in the middle of the night, they touched down in places hours from any AP bureau...
```
Dataset streaming also lets you work with a dataset made of local files without doing any conversion.
In this case, the data is streamed from the local files as you iterate over the dataset.
This is especially helpful when:
- You don't want to wait for an extremely large local dataset to be converted to Arrow.
- The converted files size would exceed the amount of available disk space on your computer.
- You want to quickly explore just a few samples of a dataset.
For example, you can stream a local dataset of hundreds of compressed JSONL files like [oscar-corpus/OSCAR-2201](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) to use it instantly:
```py
>>> from datasets import load_dataset
>>> data_files = {'train': 'path/to/OSCAR-2201/compressed/en_meta/*.jsonl.gz'}
>>> dataset = load_dataset('json', data_files=data_files, split='train', streaming=True)
>>> print(next(iter(dataset)))
{'id': 0, 'text': 'Founded in 2015, Golden Bees is a leading programmatic recruitment platform dedicated to employers, HR agencies and job boards. The company has developed unique HR-custom technologies and predictive algorithms to identify and attract the best candidates for a job opportunity.', ...
```
Loading a dataset in streaming mode creates a new dataset type instance (instead of the classic [`Dataset`] object), known as an [`IterableDataset`].
This special type of dataset has its own set of processing methods shown below.
<Tip>
An [`IterableDataset`] is useful for iterative jobs like training a model.
You shouldn't use a [`IterableDataset`] for jobs that require random access to examples because you have to iterate all over it using a for loop. Getting the last example in an iterable dataset would require you to iterate over all the previous examples.
You can find more details in the [Dataset vs. IterableDataset guide](./about_mapstyle_vs_iterable).
</Tip>
## Column indexing
Sometimes it is convenient to iterate over values of a specific column. Fortunately, an [`IterableDataset`] supports column indexing:
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("allenai/c4", "en", streaming=True, split="train")
>>> print(next(iter(dataset["text"])))
Beginners BBQ Class Taking Place in Missoula!...
```
## Convert from a Dataset
If you have an existing [`Dataset`] object, you can convert it to an [`IterableDataset`] with the [`~Dataset.to_iterable_dataset`] function. This is actually faster than setting the `streaming=True` argument in [`load_dataset`] because the data is streamed from local files.
```py
>>> from datasets import load_dataset
# faster ๐
>>> dataset = load_dataset("ethz/food101")
>>> iterable_dataset = dataset.to_iterable_dataset()
# slower ๐ข
>>> iterable_dataset = load_dataset("ethz/food101", streaming=True)
```
The [`~Dataset.to_iterable_dataset`] function supports sharding when the [`IterableDataset`] is instantiated. This is useful when working with big datasets, and you'd like to shuffle the dataset or to enable fast parallel loading with a PyTorch DataLoader.
```py
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("ethz/food101")
>>> iterable_dataset = dataset.to_iterable_dataset(num_shards=64) # shard the dataset
>>> iterable_dataset = iterable_dataset.shuffle(buffer_size=10_000) # shuffles the shards order and use a shuffle buffer when you start iterating
dataloader = torch.utils.data.DataLoader(iterable_dataset, num_workers=4) # assigns 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
```
## Shuffle
Like a regular [`Dataset`] object, you can also shuffle a [`IterableDataset`] with [`IterableDataset.shuffle`].
The `buffer_size` argument controls the size of the buffer to randomly sample examples from. Let's say your dataset has one million examples, and you set the `buffer_size` to ten thousand. [`IterableDataset.shuffle`] will randomly select examples from the first ten thousand examples in the buffer. Selected examples in the buffer are replaced with new examples. By default, the buffer size is 1,000.
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('HuggingFaceFW/fineweb', split='train', streaming=True)
>>> shuffled_dataset = dataset.shuffle(seed=42, buffer_size=10_000)
```
<Tip>
[`IterableDataset.shuffle`] will also shuffle the order of the shards if the dataset is sharded into multiple files.
</Tip>
## Reshuffle
Sometimes you may want to reshuffle the dataset after each epoch. This will require you to set a different seed for each epoch. Use [`IterableDataset.set_epoch`] in between epochs to tell the dataset what epoch you're on.
Your seed effectively becomes: `initial seed + current epoch`.
```py
>>> for epoch in range(epochs):
... shuffled_dataset.set_epoch(epoch)
... for example in shuffled_dataset:
... ...
```
## Split dataset
You can split your dataset one of two ways:
- [`IterableDataset.take`] returns the first `n` examples in a dataset:
```py
>>> dataset = load_dataset('HuggingFaceFW/fineweb', split='train', streaming=True)
>>> dataset_head = dataset.take(2)
>>> list(dataset_head)
[{'text': "How AP reported in all formats from tor...},
{'text': 'Did you know you have two little yellow...}]
```
- [`IterableDataset.skip`] omits the first `n` examples in a dataset and returns the remaining examples:
```py
>>> train_dataset = shuffled_dataset.skip(1000)
```
<Tip warning={true}>
`take` and `skip` prevent future calls to `shuffle` because they lock in the order of the shards. You should `shuffle` your dataset before splitting it.
</Tip>
<a id='interleave_datasets'></a>
### Shard
๐ค Datasets supports sharding to divide a very large dataset into a predefined number of chunks. Specify the `num_shards` parameter in [`~IterableDataset.shard`] to determine the number of shards to split the dataset into. You'll also need to provide the shard you want to return with the `index` parameter.
For example, the [amazon_polarity](https://huggingface.co/datasets/amazon_polarity) dataset has 4 shards (in this case they are 4 Parquet files):
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("amazon_polarity", split="train", streaming=True)
>>> print(dataset)
IterableDataset({
features: ['label', 'title', 'content'],
num_shards: 4
})
```
After sharding the dataset into two chunks, the first one will only have 2 shards:
```py
>>> dataset.shard(num_shards=2, index=0)
IterableDataset({
features: ['label', 'title', 'content'],
num_shards: 2
})
```
If your dataset has `dataset.num_shards==1`, you should chunk it using [`IterableDataset.skip`] and [`IterableDataset.take`] instead.
## Interleave
[`interleave_datasets`] can combine an [`IterableDataset`] with other datasets. The combined dataset returns alternating examples from each of the original datasets.
```py
>>> from datasets import interleave_datasets
>>> es_dataset = load_dataset('allenai/c4', 'es', split='train', streaming=True)
>>> fr_dataset = load_dataset('allenai/c4', 'fr', split='train', streaming=True)
>>> multilingual_dataset = interleave_datasets([es_dataset, fr_dataset])
>>> list(multilingual_dataset.take(2))
[{'text': 'Comprar Zapatillas para niรฑa en chancla con goma por...'},
{'text': 'Le sacre de philippe ier, 23 mai 1059 - Compte Rendu...'}]
```
Define sampling probabilities from each of the original datasets for more control over how each of them are sampled and combined. Set the `probabilities` argument with your desired sampling probabilities:
```py
>>> multilingual_dataset_with_oversampling = interleave_datasets([es_dataset, fr_dataset], probabilities=[0.8, 0.2], seed=42)
>>> list(multilingual_dataset_with_oversampling.take(2))
[{'text': 'Comprar Zapatillas para niรฑa en chancla con goma por...'},
{'text': 'Chevrolet Cavalier Usados en Bogota - Carros en Vent...'}]
```
Around 80% of the final dataset is made of the `es_dataset`, and 20% of the `fr_dataset`.
You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples.
You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached.
Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`.
## Rename, remove, and cast
The following methods allow you to modify the columns of a dataset. These methods are useful for renaming or removing columns and changing columns to a new set of features.
### Rename
Use [`IterableDataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place.
Provide [`IterableDataset.rename_column`] with the name of the original column, and the new column name:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('allenai/c4', 'en', streaming=True, split='train')
>>> dataset = dataset.rename_column("text", "content")
```
### Remove
When you need to remove one or more columns, give [`IterableDataset.remove_columns`] the name of the column to remove. Remove more than one column by providing a list of column names:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('allenai/c4', 'en', streaming=True, split='train')
>>> dataset = dataset.remove_columns('timestamp')
```
### Cast
[`IterableDataset.cast`] changes the feature type of one or more columns. This method takes your new `Features` as its argument. The following sample code shows how to change the feature types of `ClassLabel` and `Value`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('nyu-mll/glue', 'mrpc', split='train', streaming=True)
>>> dataset.features
{'sentence1': Value('string'),
'sentence2': Value('string'),
'label': ClassLabel(names=['not_equivalent', 'equivalent']),
'idx': Value('int32')}
>>> from datasets import ClassLabel, Value
>>> new_features = dataset.features.copy()
>>> new_features["label"] = ClassLabel(names=['negative', 'positive'])
>>> new_features["idx"] = Value('int64')
>>> dataset = dataset.cast(new_features)
>>> dataset.features
{'sentence1': Value('string'),
'sentence2': Value('string'),
'label': ClassLabel(names=['negative', 'positive']),
'idx': Value('int64')}
```
<Tip>
Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value('int32')` to `Value('bool')` if the original column only contains ones and zeros.
</Tip>
Use [`IterableDataset.cast_column`] to change the feature type of just one column. Pass the column name and its new feature type as arguments:
```py
>>> dataset.features
{'audio': Audio(sampling_rate=44100, mono=True)}
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
>>> dataset.features
{'audio': Audio(sampling_rate=16000, mono=True)}
```
## Map
Similar to the [`Dataset.map`] function for a regular [`Dataset`], ๐ค Datasets features [`IterableDataset.map`] for processing an [`IterableDataset`].
[`IterableDataset.map`] applies processing on-the-fly when examples are streamed.
It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns.
The following example demonstrates how to tokenize a [`IterableDataset`]. The function needs to accept and output a `dict`:
```py
>>> def add_prefix(example):
... example['text'] = 'My text: ' + example['text']
... return example
```
Next, apply this function to the dataset with [`IterableDataset.map`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('allenai/c4', 'en', streaming=True, split='train')
>>> updated_dataset = dataset.map(add_prefix)
>>> list(updated_dataset.take(3))
[{'text': 'My text: Beginners BBQ Class Taking Place in Missoula!\nDo you want to get better at making...',
'timestamp': '2019-04-25 12:57:54',
'url': 'https://klyq.com/beginners-bbq-class-taking-place-in-missoula/'},
{'text': 'My text: Discussion in \'Mac OS X Lion (10.7)\' started by axboi87, Jan 20, 2012.\nI\'ve go...',
'timestamp': '2019-04-21 10:07:13',
'url': 'https://forums.macrumors.com/threads/restore-from-larger-disk-to-smaller-disk.1311329/'},
{'text': 'My text: Foil plaid lycra and spandex shortall with metallic slinky insets. Attached metall...',
'timestamp': '2019-04-25 10:40:23',
'url': 'https://awishcometrue.com/Catalogs/Clearance/Tweens/V1960-Find-A-Way'}]
```
Let's take a look at another example, except this time, you will remove columns with [`IterableDataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed.
Specify the column to remove with the `remove_columns` argument in [`IterableDataset.map`]:
```py
>>> updated_dataset = dataset.map(add_prefix, remove_columns=["timestamp", "url"])
>>> list(updated_dataset.take(3))
[{'text': 'My text: Beginners BBQ Class Taking Place in Missoula!\nDo you want to get better at making...'},
{'text': 'My text: Discussion in \'Mac OS X Lion (10.7)\' started by axboi87, Jan 20, 2012.\nI\'ve go...'},
{'text': 'My text: Foil plaid lycra and spandex shortall with metallic slinky insets. Attached metall...'}]
```
### Batch processing
[`IterableDataset.map`] also supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` argument. This opens the door to many interesting applications such as tokenization, splitting long sentences into shorter chunks, and data augmentation.
#### Tokenization
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> dataset = load_dataset("allenai/c4", "en", streaming=True, split="train")
>>> tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
>>> def encode(examples):
... return tokenizer(examples['text'], truncation=True, padding='max_length')
>>> dataset = dataset.map(encode, batched=True, remove_columns=["text", "timestamp", "url"])
>>> next(iter(dataset))
{'input_ids': [101, 4088, 16912, 22861, 4160, 2465, 2635, 2173, 1999, 3335, ..., 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ..., 0, 0]}
```
<Tip>
See other examples of batch processing in the [batched map processing](./process#batch-processing) documentation. They work the same for iterable datasets.
</Tip>
### Filter
You can filter rows in the dataset based on a predicate function using [`Dataset.filter`]. It returns rows that match a specified condition:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('HuggingFaceFW/fineweb', streaming=True, split='train')
>>> start_with_ar = dataset.filter(lambda example: example['text'].startswith('San Francisco'))
>>> next(iter(start_with_ar))
{'text': 'San Francisco 49ers cornerback Shawntae Spencer will miss the rest of the sea...}
```
[`Dataset.filter`] can also filter by indices if you set `with_indices=True`:
```py
>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
>>> list(even_dataset.take(3))
[{'text': 'How AP reported in all formats from tornado-stricken regionsMarch 8, 2012 Whe...},
{'text': 'Car Wash For Clara! Now is your chance to help! 2 year old Clara Woodward has...},
{'text': 'Log In Please enter your ECode to log in. Forgotten your eCode? If you create...}]
```
## Batch
The `batch` method transforms your `IterableDataset` into an iterable of batches. This is particularly useful when you want to work with batches in your training loop or when using frameworks that expect batched inputs.
<Tip>
There is also a "Batch Processing" option when using the `map` function to apply a function to batches of data, which is discussed in the [Map section](#map) above. The `batch` method described here is different and provides a more direct way to create batches from your dataset.
</Tip>
You can use the `batch` method like this:
```python
from datasets import load_dataset
# Load a dataset in streaming mode
dataset = load_dataset("some_dataset", split="train", streaming=True)
# Create batches of 32 samples
batched_dataset = dataset.batch(batch_size=32)
# Iterate over the batched dataset
for batch in batched_dataset:
print(batch)
break
```
In this example, batched_dataset is still an IterableDataset, but each item yielded is now a batch of 32 samples instead of a single sample.
This batching is done on-the-fly as you iterate over the dataset, preserving the memory-efficient nature of IterableDataset.
The batch method also provides a drop_last_batch parameter.
When set to True, it will discard the last batch if it's smaller than the specified batch_size.
This can be useful in scenarios where your downstream processing requires all batches to be of the same size:
```python
batched_dataset = dataset.batch(batch_size=32, drop_last_batch=True)
```
## Stream in a training loop
[`IterableDataset`] can be integrated into a training loop. First, shuffle the dataset:
<frameworkcontent>
<pt>
```py
>>> seed, buffer_size = 42, 10_000
>>> dataset = dataset.shuffle(seed, buffer_size=buffer_size)
```
Lastly, create a simple training loop and start training:
```py
>>> import torch
>>> from torch.utils.data import DataLoader
>>> from transformers import AutoModelForMaskedLM, DataCollatorForLanguageModeling
>>> from tqdm import tqdm
>>> dataset = dataset.with_format("torch")
>>> dataloader = DataLoader(dataset, collate_fn=DataCollatorForLanguageModeling(tokenizer))
>>> device = 'cuda' if torch.cuda.is_available() else 'cpu'
>>> model = AutoModelForMaskedLM.from_pretrained("distilbert-base-uncased")
>>> model.train().to(device)
>>> optimizer = torch.optim.AdamW(params=model.parameters(), lr=1e-5)
>>> for epoch in range(3):
... dataset.set_epoch(epoch)
... for i, batch in enumerate(tqdm(dataloader, total=5)):
... if i == 5:
... break
... batch = {k: v.to(device) for k, v in batch.items()}
... outputs = model(**batch)
... loss = outputs[0]
... loss.backward()
... optimizer.step()
... optimizer.zero_grad()
... if i % 10 == 0:
... print(f"loss: {loss}")
```
</pt>
</frameworkcontent>
<!-- TODO: Write the TF content! -->
### Save a dataset checkpoint and resume iteration
If your training loop stops, you may want to restart the training from where it was. To do so you can save a checkpoint of your model and optimizers, as well as your data loader.
Iterable datasets don't provide random access to a specific example index to resume from, but you can use [`IterableDataset.state_dict`] and [`IterableDataset.load_state_dict`] to resume from a checkpoint instead, similarly to what you can do for models and optimizers:
```python
>>> iterable_dataset = Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3)
>>> for idx, example in enumerate(iterable_dataset):
... print(example)
... if idx == 2:
... state_dict = iterable_dataset.state_dict()
... print("checkpoint")
... break
>>> iterable_dataset.load_state_dict(state_dict)
>>> print(f"restart from checkpoint")
>>> for example in iterable_dataset:
... print(example)
```
Returns:
```
{'a': 0}
{'a': 1}
{'a': 2}
checkpoint
restart from checkpoint
{'a': 3}
{'a': 4}
{'a': 5}
```
Under the hood, the iterable dataset keeps track of the current shard being read and the example index in the current shard and it stores this info in the `state_dict`.
To resume from a checkpoint, the dataset skips all the shards that were previously read to restart from the current shard.
Then it reads the shard and skips examples until it reaches the exact example from the checkpoint.
Therefore restarting a dataset is quite fast, since it will not re-read the shards that have already been iterated on. Still, resuming a dataset is generally not instantaneous since it has to restart reading from the beginning of the current shard and skip examples until it reaches the checkpoint location.
This can be used with the `StatefulDataLoader` from `torchdata`:
```python
>>> from torchdata.stateful_dataloader import StatefulDataLoader
>>> iterable_dataset = load_dataset("deepmind/code_contests", streaming=True, split="train")
>>> dataloader = StatefulDataLoader(iterable_dataset, batch_size=32, num_workers=4)
>>> # checkpoint
>>> state_dict = dataloader.state_dict() # uses iterable_dataset.state_dict() under the hood
>>> # resume from checkpoint
>>> dataloader.load_state_dict(state_dict) # uses iterable_dataset.load_state_dict() under the hood
```
<Tip>
Resuming returns exactly where the checkpoint was saved except if `.shuffle()` is used: examples from shuffle buffers are lost when resuming and the buffers are refilled with new data.
</Tip>
## Save
Once your iterable dataset is ready, you can save it as a Hugging Face Dataset in Parquet format and reuse it later with [`load_dataset`].
Save your dataset by providing the name of the dataset repository on Hugging Face you wish to save it to to [`~Dataset.push_to_hub`]. This iterates over the dataset and progressively uploads the data to Hugging Face:
```python
dataset.push_to_hub("username/my_dataset")
```
If the dataset consists of multiple shards (`dataset.num_shards > 1`), you can use multiple processes to upload it in parallel. This is especially useful if you applied `map()` or `filter()` steps since they will run faster in parallel:
```python
dataset.push_to_hub("username/my_dataset", num_proc=8)
```
Use the [`load_dataset`] function to reload the dataset:
```python
from datasets import load_dataset
reloaded_dataset = load_dataset("username/my_dataset")
```
## Export
๐ค Datasets supports exporting as well so you can work with your dataset in other applications. The following table shows currently supported file formats you can export to:
| File type | Export method |
|-------------------------|----------------------------------------------------------------|
| CSV | [`IterableDataset.to_csv`] |
| JSON | [`IterableDataset.to_json`] |
| Parquet | [`IterableDataset.to_parquet`] |
| SQL | [`IterableDataset.to_sql`] |
| In-memory Python object | [`IterableDataset.to_pandas`], [`IterableDataset.to_polars`] or [`IterableDataset.to_dict`] |
For example, export your dataset to a CSV file like this:
```py
>>> dataset.to_csv("path/of/my/dataset.csv")
```
If you have a large dataset, you can save one file per shard, e.g.
```py
>>> num_shards = dataset.num_shards
>>> for index in range(num_shards):
... shard = dataset.shard(index, num_shards)
... shard.to_parquet(f"path/of/my/dataset/data-{index:05d}.parquet")
```
| datasets/docs/source/stream.mdx/0 | {
"file_path": "datasets/docs/source/stream.mdx",
"repo_id": "datasets",
"token_count": 7937
} | 97 |
<!---
Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# ๐ค Datasets Notebooks
You can find here a list of the official notebooks provided by Hugging Face.
Also, we would like to list here interesting content created by the community.
If you wrote some notebook(s) leveraging ๐ค Datasets and would like it to be listed here, please open a
Pull Request so it can be included under the Community notebooks.
## Hugging Face's notebooks ๐ค
### Documentation notebooks
You can open any page of the documentation as a notebook in Colab (there is a button directly on said pages) but they are also listed here if you need them:
| Notebook | Description | | |
|:----------|:-------------|:-------------|------:|
| [Quickstart](https://github.com/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb) | A quick presentation on integrating Datasets into a model training workflow |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)|
| datasets/notebooks/README.md/0 | {
"file_path": "datasets/notebooks/README.md",
"repo_id": "datasets",
"token_count": 534
} | 98 |
import contextlib
import copy
import fnmatch
import itertools
import json
import math
import posixpath
import random
import re
import time
from collections.abc import Sequence
from functools import partial
from pathlib import Path
from typing import Callable, Optional, Union
import fsspec
import numpy as np
from fsspec.core import url_to_fs
from huggingface_hub import (
CommitInfo,
CommitOperationAdd,
CommitOperationDelete,
DatasetCard,
DatasetCardData,
HfApi,
)
from huggingface_hub.hf_api import RepoFile
from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError
from requests import HTTPError
from . import config
from .arrow_dataset import (
PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED,
Dataset,
)
from .features import Features
from .features.features import FeatureType
from .info import DatasetInfo, DatasetInfosDict
from .iterable_dataset import IterableDataset
from .naming import _split_re
from .splits import NamedSplit, Split, SplitDict, SplitInfo
from .table import Table
from .utils import logging
from .utils.doc_utils import is_documented_by
from .utils.metadata import MetadataConfigs
from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict
from .utils.typing import PathLike
logger = logging.get_logger(__name__)
class bind(partial):
def __call__(self, *fn_args, **fn_kwargs):
return self.func(*fn_args, *self.args, **fn_kwargs)
class DatasetDict(dict[Union[str, NamedSplit], "Dataset"]):
"""A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
def _check_values_type(self):
for dataset in self.values():
if not isinstance(dataset, Dataset):
raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
def _check_values_features(self):
items = list(self.items())
for item_a, item_b in zip(items[:-1], items[1:]):
if item_a[1].features != item_b[1].features:
raise ValueError(
f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
for dataset in self.values():
if hasattr(dataset, "_data"):
del dataset._data
if hasattr(dataset, "_indices"):
del dataset._indices
def __getitem__(self, k) -> Dataset:
if isinstance(k, (str, NamedSplit)) or len(self) == 0:
return super().__getitem__(k)
else:
available_suggested_splits = [
split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
]
suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
raise KeyError(
f"Invalid key: {k}. Please first select a split. For example: "
f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
f"Available splits: {sorted(self)}"
)
@property
def data(self) -> dict[str, Table]:
"""The Apache Arrow tables backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.data
```
"""
self._check_values_type()
return {k: dataset.data for k, dataset in self.items()}
@property
def cache_files(self) -> dict[str, dict]:
"""The cache files containing the Apache Arrow table backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.cache_files
{'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
```
"""
self._check_values_type()
return {k: dataset.cache_files for k, dataset in self.items()}
@property
def num_columns(self) -> dict[str, int]:
"""Number of columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
"""
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
@property
def num_rows(self) -> dict[str, int]:
"""Number of rows in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.num_rows
{'test': 1066, 'train': 8530, 'validation': 1066}
```
"""
self._check_values_type()
return {k: dataset.num_rows for k, dataset in self.items()}
@property
def column_names(self) -> dict[str, list[str]]:
"""Names of the columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.column_names
{'test': ['text', 'label'],
'train': ['text', 'label'],
'validation': ['text', 'label']}
```
"""
self._check_values_type()
return {k: dataset.column_names for k, dataset in self.items()}
@property
def shape(self) -> dict[str, tuple[int]]:
"""Shape of each split of the dataset (number of rows, number of columns).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.shape
{'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
```
"""
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
def flatten(self, max_depth=16) -> "DatasetDict":
"""Flatten the Apache Arrow Table of each split (nested features are flatten).
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rajpurkar/squad")
>>> ds["train"].features
{'id': Value('string'),
'title': Value('string'),
'context': Value('string'),
'question': Value('string'),
'answers.text': List(Value('string')),
'answers.answer_start': List(Value('int32'))}
>>> ds.flatten()
DatasetDict({
train: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
validation: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 10570
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
def unique(self, column: str) -> dict[str, list]:
"""Return a list of the unique elements in a column for each split.
This is implemented in the low-level backend and as such, very fast.
Args:
column (`str`):
column name (list all the column names with [`~datasets.DatasetDict.column_names`])
Returns:
Dict[`str`, `list`]: Dictionary of unique elements in the given column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.unique("label")
{'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
```
"""
self._check_values_type()
return {k: dataset.unique(column) for k, dataset in self.items()}
def cleanup_cache_files(self) -> dict[str, int]:
"""Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
Be careful when running this command that no other process is currently using other cache files.
Return:
`Dict` with the number of removed files for each split
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.cleanup_cache_files()
{'test': 0, 'train': 0, 'validation': 0}
```
"""
self._check_values_type()
return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, count=0, flags=re.M)
return f"DatasetDict({{\n{repr}\n}})"
def cast(self, features: Features) -> "DatasetDict":
"""
Cast the dataset to a new set of features.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name and order of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~DatasetDict.map`] to update the dataset.
Example:
```py
>>> from datasets import load_dataset, ClassLabel, Value
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos']),
'text': Value('string')}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good']),
'text': Value('large_string')}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
def cast_column(self, column: str, feature) -> "DatasetDict":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos']),
'text': Value('string')}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good']),
'text': Value('string')}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
def remove_columns(self, column_names: Union[str, list[str]]) -> "DatasetDict":
"""
Remove one or several column(s) from each split in the dataset
and the features associated to the column(s).
The transformation is applied to all the splits of the dataset dictionary.
You can also remove a column using [`~DatasetDict.map`] with `remove_columns` but the present method
doesn't copy the data of the remaining columns and is thus faster.
Args:
column_names (`Union[str, list[str]]`):
Name of the column(s) to remove.
Returns:
[`DatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds = ds.remove_columns("label")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
"""
Rename a column in the dataset and move the features associated to the original column under the new column name.
The transformation is applied to all the datasets of the dataset dictionary.
You can also rename a column using [`~DatasetDict.map`] with `remove_columns` but the present method:
- takes care of moving the original features under the new column name.
- doesn't copy the data to a new dataset and is thus much faster.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds = ds.rename_column("label", "label_new")
DatasetDict({
train: Dataset({
features: ['text', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict(
{
k: dataset.rename_column(
original_column_name=original_column_name,
new_column_name=new_column_name,
)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: dict[str, str]) -> "DatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`DatasetDict`]: A copy of the dataset with renamed columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
DatasetDict({
train: Dataset({
features: ['text_new', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, list[str]]) -> "DatasetDict":
"""Select one or several column(s) from each split in the dataset and
the features associated to the column(s).
The transformation is applied to all the splits of the dataset
dictionary.
Args:
column_names (`Union[str, list[str]]`):
Name of the column(s) to keep.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.select_columns("text")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
"""Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
Args:
column (`str`):
The name of the column to cast.
include_nulls (`bool`, defaults to `False`):
Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
<Added version="1.14.2"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("boolq")
>>> ds["train"].features
{'answer': Value('bool'),
'passage': Value('string'),
'question': Value('string')}
>>> ds = ds.class_encode_column("answer")
>>> ds["train"].features
{'answer': ClassLabel(num_classes=2, names=['False', 'True']),
'passage': Value('string'),
'question': Value('string')}
```
"""
self._check_values_type()
return DatasetDict(
{k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
)
@contextlib.contextmanager
def formatted_as(
self,
type: Optional[str] = None,
columns: Optional[list] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""To be used in a `with` statement. Set `__getitem__` return format (type and columns).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means `__getitem__` returns python objects (default).
columns (`list[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
"""
self._check_values_type()
old_format_type = {k: dataset._format_type for k, dataset in self.items()}
old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
try:
self.set_format(type, columns, output_all_columns, **format_kwargs)
yield
finally:
for k, dataset in self.items():
dataset.set_format(
old_format_type[k],
old_format_columns[k],
old_output_all_columns[k],
**old_format_kwargs[k],
)
def set_format(
self,
type: Optional[str] = None,
columns: Optional[list] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns).
The format is set for every dataset in the dataset dictionary.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means `__getitem__` returns python objects (default).
columns (`list[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects),
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
`new formatted columns = (all columns - previously unformatted columns)`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format(
type=type,
columns=columns,
output_all_columns=output_all_columns,
**format_kwargs,
)
def reset_format(self):
"""Reset `__getitem__` return format to python objects and all columns.
The transformation is applied to all the datasets of the dataset dictionary.
Same as `self.set_format()`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
>>> ds.reset_format()
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format()
def set_transform(
self,
transform: Optional[Callable],
columns: Optional[list] = None,
output_all_columns: bool = False,
):
"""Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
The transform is set for every dataset in the dataset dictionary
As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
Args:
transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in ``__getitem__``.
columns (`list[str]`, optional): columns to format in the output
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
If set to True, then the other un-formatted columns are kept with the output of the transform.
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format(
"custom",
columns=columns,
output_all_columns=output_all_columns,
transform=transform,
)
def with_format(
self,
type: Optional[str] = None,
columns: Optional[list] = None,
output_all_columns: bool = False,
**format_kwargs,
) -> "DatasetDict":
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
The format is set for every dataset in the dataset dictionary.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means `__getitem__` returns python objects (default).
columns (`list[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
>>> ds = ds.with_format("torch")
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'torch'}
>>> ds["train"][0]
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_format(
type=type,
columns=columns,
output_all_columns=output_all_columns,
**format_kwargs,
)
return dataset
def with_transform(
self,
transform: Optional[Callable],
columns: Optional[list] = None,
output_all_columns: bool = False,
) -> "DatasetDict":
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
The transform is set for every dataset in the dataset dictionary
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
transform (`Callable`, *optional*):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`list[str]`, *optional*):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
If set to `True`, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> def encode(example):
... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
>>> ds = ds.with_transform(encode)
>>> ds["train"][0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
188, 1566, 7912, 14516, 6997, 119, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
return dataset
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
with_split: bool = False,
input_columns: Optional[Union[str, list[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, list[str]]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
try_original_type: Optional[bool] = True,
) -> "DatasetDict":
"""
Apply a function to all the examples in the table (individually or in batches) and update the table.
If your function returns a column that already exists, then it overwrites it.
The transformation is applied to all the datasets of the dataset dictionary.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
If the function is asynchronous, then `map` will run your function in parallel, with up to one thousand simultaneous calls.
It is recommended to use a `asyncio.Semaphore` in your function if you want to set a maximum number of operations that can run at the same time.
Args:
function (`callable`): with one of the following signature:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, list]) -> Dict[str, list]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, list], indices: list[int]) -> Dict[str, list]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
If the function is asynchronous, then `map` will run your function in parallel.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
with_split (`bool`, defaults to `False`):
Provide process split to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], split): ...`.
input_columns (`[Union[str, list[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`,
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`[Union[str, list[str]]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, default `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`[datasets.Features]`, *optional*, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Disallow null values in the table.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
The number of processes to use for multiprocessing.
- If `None` or `0`, no multiprocessing is used and the operation runs in the main process.
- If greater than `1`, one or multiple worker processes are used to process data in parallel.
Note: The function passed to `map()` must be picklable for multiprocessing to work correctly
(i.e., prefer functions defined at the top level of a module, not inside another function or class).
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
try_original_type (`Optional[bool]`, defaults to `True`):
Try to keep the types of the original columns (e.g. int32 -> int32).
Set to False if you want to always infer new types.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds["train"][0:3]["text"]
['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'Review: effective but too-tepid biopic']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = dict.fromkeys(self)
dataset_dict = {}
for split, dataset in self.items():
if with_split:
function = bind(function, split)
dataset_dict[split] = dataset.map(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[split],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
try_original_type=try_original_type,
)
if with_split:
function = function.func
return DatasetDict(dataset_dict)
def filter(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, list[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`): Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, list]) -> list[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, list], *extra_args) -> list[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, list[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
The number of processes to use for multiprocessing.
- If `None` or `0`, no multiprocessing is used and the operation runs in the main process.
- If greater than `1`, one or multiple worker processes are used to process data in parallel.
Note: The function passed to `map()` must be picklable for multiprocessing to work correctly
(i.e., prefer functions defined at the top level of a module, not inside another function or class).
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.filter(lambda x: x["label"] == 1)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 4265
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 533
})
test: Dataset({
features: ['text', 'label'],
num_rows: 533
})
})
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = dict.fromkeys(self)
return DatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_names: Optional[dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "DatasetDict":
"""Create and cache a new Dataset by flattening the indices mapping.
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_names (`Dict[str, str]`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = dict.fromkeys(self)
return DatasetDict(
{
k: dataset.flatten_indices(
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
num_proc=num_proc,
new_fingerprint=new_fingerprint,
)
for k, dataset in self.items()
}
)
def sort(
self,
column_names: Union[str, Sequence[str]],
reverse: Union[bool, Sequence[bool]] = False,
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new dataset sorted according to a single or multiple columns.
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('cornell-movie-review-data/rotten_tomatoes')
>>> ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['train']['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
self._check_values_type()
if indices_cache_file_names is None:
indices_cache_file_names = dict.fromkeys(self)
return DatasetDict(
{
k: dataset.sort(
column_names=column_names,
reverse=reverse,
null_placement=null_placement,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def shuffle(
self,
seeds: Optional[Union[int, dict[str, Optional[int]]]] = None,
seed: Optional[int] = None,
generators: Optional[dict[str, np.random.Generator]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new Dataset where the rows are shuffled.
The transformation is applied to all the datasets of the dataset dictionary.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Args:
seeds (`Dict[str, int]` or `int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
You can provide one `seed` per dataset in the dataset dictionary.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
generators (`Dict[str, *optional*, np.random.Generator]`):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
You have to provide one `generator` per dataset in the dataset dictionary.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
indices_cache_file_names (`Dict[str, str]`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices mappings instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds["train"]["label"][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds["train"]["label"][:10]
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
```
"""
self._check_values_type()
if seed is not None and seeds is not None:
raise ValueError("Please specify seed or seeds, but not both")
seeds = seed if seed is not None else seeds
if seeds is None:
seeds = dict.fromkeys(self)
elif not isinstance(seeds, dict):
seeds = dict.fromkeys(self, seeds)
if generators is None:
generators = dict.fromkeys(self)
if indices_cache_file_names is None:
indices_cache_file_names = dict.fromkeys(self)
return DatasetDict(
{
k: dataset.shuffle(
seed=seeds[k],
generator=generators[k],
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def save_to_disk(
self,
dataset_dict_path: PathLike,
max_shard_size: Optional[Union[str, int]] = None,
num_shards: Optional[dict[str, int]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
):
"""
Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
For [`Image`], [`Audio`] and [`Video`] data:
All the Image(), Audio() and Video() data are stored in the arrow files.
If you want to store paths or urls, please use the Value("string") type.
Args:
dataset_dict_path (`path-like`):
Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
of the dataset dict directory where the dataset dict will be saved to.
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be saved to the filesystem. If expressed as a string, needs to be digits followed by a unit
(like `"50MB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
You need to provide the number of shards for each dataset in the dataset dictionary.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
num_proc (`int`, *optional*, default `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.8.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Example:
```python
>>> dataset_dict.save_to_disk("path/to/dataset/directory")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
```
"""
fs: fsspec.AbstractFileSystem
fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {}))
if num_shards is None:
num_shards = dict.fromkeys(self)
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
fs.makedirs(dataset_dict_path, exist_ok=True)
with fs.open(
posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME),
"w",
encoding="utf-8",
) as f:
json.dump({"splits": list(self)}, f)
for k, dataset in self.items():
dataset.save_to_disk(
posixpath.join(dataset_dict_path, k),
num_shards=num_shards.get(k),
max_shard_size=max_shard_size,
num_proc=num_proc,
storage_options=storage_options,
)
@staticmethod
def load_from_disk(
dataset_dict_path: PathLike,
keep_in_memory: Optional[bool] = None,
storage_options: Optional[dict] = None,
) -> "DatasetDict":
"""
Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
Args:
dataset_dict_path (`path-like`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
of the dataset dict directory where the dataset dict will be loaded from.
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the
dataset will not be copied in-memory unless explicitly enabled by setting
`datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
[improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Returns:
[`DatasetDict`]
Example:
```py
>>> ds = load_from_disk('path/to/dataset/directory')
```
"""
fs: fsspec.AbstractFileSystem
fs, dataset_dict_path = url_to_fs(dataset_dict_path, **(storage_options or {}))
dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME)
if not fs.isfile(dataset_dict_json_path):
if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
)
with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
splits = json.load(f)["splits"]
dataset_dict = DatasetDict()
for k in splits:
dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k)
dataset_dict[k] = Dataset.load_from_disk(
dataset_dict_split_path,
keep_in_memory=keep_in_memory,
storage_options=storage_options,
)
return dataset_dict
@staticmethod
def from_csv(
path_or_paths: dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from CSV file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`pandas.read_csv`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetReader
return CsvDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
**kwargs,
).read()
@staticmethod
def from_json(
path_or_paths: dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from JSON Lines file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the JSON Lines file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`JsonConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetReader
return JsonDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
**kwargs,
).read()
@staticmethod
def from_parquet(
path_or_paths: dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
columns: Optional[list[str]] = None,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from Parquet file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
columns (`list[str]`, *optional*):
If not `None`, only these columns will be read from the file.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`ParquetConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetReader
return ParquetDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
columns=columns,
**kwargs,
).read()
@staticmethod
def from_text(
path_or_paths: dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from text file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the text file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`TextConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
```
"""
# Dynamic import to avoid circular dependency
from .io.text import TextDatasetReader
return TextDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
**kwargs,
).read()
@is_documented_by(Dataset.align_labels_with_mapping)
def align_labels_with_mapping(self, label2id: dict, label_column: str) -> "DatasetDict":
self._check_values_type()
return DatasetDict(
{
k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
for k, dataset in self.items()
}
)
def push_to_hub(
self,
repo_id,
config_name: str = "default",
set_default: Optional[bool] = None,
data_dir: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[dict[str, int]] = None,
embed_external_files: bool = True,
num_proc: Optional[int] = None,
) -> CommitInfo:
"""Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to False.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`):
Configuration name of a dataset. Defaults to "default".
set_default (`bool`, *optional*):
Whether to set this configuration as the default one. Otherwise, the default configuration is the one
named "default".
data_dir (`str`, *optional*):
Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
from "default", else "data".
<Added version="2.17.0"/>
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
commit_description (`str`, *optional*):
Description of the commit that will be created.
Additionally, description of the PR if a PR is created (`create_pr` is True).
<Added version="2.16.0"/>
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
revision (`str`, *optional*):
Branch to push the uploaded files to. Defaults to the `"main"` branch.
<Added version="2.15.0"/>
create_pr (`bool`, *optional*, defaults to `False`):
Whether to create a PR with the uploaded files or directly commit.
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"500MB"` or `"1GB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default, the number of shards depends on `max_shard_size`.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when preparing and uploading the dataset.
This is helpful if the dataset is made of many samples or media files to embed.
Multiprocessing is disabled by default.
<Added version="4.0.0"/>
Return:
huggingface_hub.CommitInfo
Example:
```python
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if num_shards is None:
num_shards = dict.fromkeys(self)
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
self._check_values_type()
self._check_values_features()
total_uploaded_size = 0
total_dataset_nbytes = 0
info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict()
for split in self.keys():
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
try:
repo_id = api.repo_info(repo_id, repo_type="dataset").id
except RepositoryNotFoundError:
repo_url = api.create_repo(
repo_id,
repo_type="dataset",
private=private,
exist_ok=True,
)
repo_id = repo_url.repo_id
if revision is not None and not revision.startswith("refs/pr/"):
# We do not call create_branch for a PR reference: 400 Bad Request
api.create_branch(
repo_id,
branch=revision,
token=token,
repo_type="dataset",
exist_ok=True,
)
if not data_dir:
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
additions = []
for split in self.keys():
logger.info(f"Pushing split {split} to the Hub.")
# The split=key needs to be removed before merging
split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
repo_id,
data_dir=data_dir,
split=split,
token=token,
revision=revision,
create_pr=create_pr,
max_shard_size=max_shard_size,
num_shards=num_shards.get(split),
embed_external_files=embed_external_files,
num_proc=num_proc,
)
additions += split_additions
total_uploaded_size += uploaded_size
total_dataset_nbytes += dataset_nbytes
info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
info_to_dump.download_checksums = None
info_to_dump.download_size = total_uploaded_size
info_to_dump.dataset_size = total_dataset_nbytes
info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
def get_deletions_and_dataset_card() -> tuple[str, list[CommitOperationDelete], str, Optional[str]]:
parent_commit = api.repo_info(repo_id, repo_type="dataset", revision=revision).sha
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
repo_splits: list[str] = [] # use a list to keep the order of the splits
deletions: list[CommitOperationDelete] = []
repo_files_to_add = [addition.path_in_repo for addition in additions]
for repo_file in api.list_repo_tree(
repo_id=repo_id,
revision=parent_commit,
repo_type="dataset",
token=token,
recursive=True,
):
if not isinstance(repo_file, RepoFile):
continue
if repo_file.rfilename == config.REPOCARD_FILENAME:
repo_with_dataset_card = True
elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
repo_with_dataset_infos = True
elif (
repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
and repo_file.rfilename not in repo_files_to_add
):
deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
elif fnmatch.fnmatch(
repo_file.rfilename,
PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*"),
):
pattern = glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED)
split_pattern_fields = string_to_dict(repo_file.rfilename, pattern)
assert split_pattern_fields is not None
repo_split = split_pattern_fields["split"]
if repo_split not in repo_splits:
repo_splits.append(repo_split)
# get the info from the README to update them
if repo_with_dataset_card:
dataset_card_path = api.hf_hub_download(
repo_id,
config.REPOCARD_FILENAME,
repo_type="dataset",
revision=parent_commit,
)
dataset_card = DatasetCard.load(Path(dataset_card_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
# get the deprecated dataset_infos.json to update them
elif repo_with_dataset_infos:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs and repo_splits:
default_metadata_configs_to_dump = {
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
metadata_config_to_dump = {
"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
}
configs_to_dump = {config_name: metadata_config_to_dump}
if set_default and config_name != "default":
if metadata_configs:
current_default_config_name = metadata_configs.get_default_config_name()
if current_default_config_name == "default":
raise ValueError(
"There exists a configuration named 'default'. To set a different configuration as default, "
"rename the 'default' one first."
)
if current_default_config_name:
_ = metadata_configs[current_default_config_name].pop("default")
configs_to_dump[current_default_config_name] = metadata_configs[current_default_config_name]
metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
repo_id,
config.DATASETDICT_INFOS_FILENAME,
repo_type="dataset",
revision=parent_commit,
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
new_dataset_infos = json.dumps(dataset_infos, indent=4)
else:
new_dataset_infos = None
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs(configs_to_dump).to_dataset_card_data(dataset_card_data)
new_dataset_card = (
DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
)
return parent_commit, deletions, new_dataset_card, new_dataset_infos
commit_message = commit_message if commit_message is not None else "Upload dataset"
if len(additions) > config.UPLOADS_MAX_NUMBER_PER_COMMIT:
logger.info(
f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
)
num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
for i in range(0, num_commits):
operations = additions[
i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
]
for retry, sleep_time in enumerate(itertools.chain(range(10), itertools.repeat(30)), start=1):
# We need to retry if another commit happens at the same time
sleep_time *= 1 + random.random()
try:
commit_info = api.create_commit(
repo_id,
operations=operations,
commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
commit_description=commit_description,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
except HfHubHTTPError as err:
if (
err.__context__
and isinstance(err.__context__, HTTPError)
and err.__context__.response.status_code == 409
):
# 409 is Conflict (another commit is in progress)
time.sleep(sleep_time)
logger.info(
f"Retrying intermediate commit for {repo_id}, {config_name} ({retry}/n with status_code {err.__context__.response.status_code})"
)
continue
else:
raise
break
logger.info(
f"Commit #{i + 1} completed"
+ (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ "."
)
last_commit_additions = []
else:
last_commit_additions = additions
for retry, sleep_time in enumerate(itertools.chain(range(10), itertools.repeat(30)), start=1):
# We need to retry if there was a commit in between in case it touched the dataset card data
sleep_time *= 1 + random.random()
parent_commit, deletions, dataset_card, dataset_infos = get_deletions_and_dataset_card()
dataset_card_additions = []
if dataset_infos:
dataset_card_additions.append(
CommitOperationAdd(
path_in_repo=config.DATASETDICT_INFOS_FILENAME,
path_or_fileobj=dataset_infos.encode("utf-8"),
)
)
dataset_card_additions.append(
CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
)
try:
commit_info = api.create_commit(
repo_id,
operations=last_commit_additions + dataset_card_additions + deletions,
commit_message=commit_message,
commit_description=commit_description,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
parent_commit=parent_commit,
)
except HfHubHTTPError as err:
if (
err.__context__
and isinstance(err.__context__, HTTPError)
and err.__context__.response.status_code in (412, 409)
):
# 412 is Precondition failed (parent_commit isn't satisfied)
# 409 is Conflict (another commit is in progress)
time.sleep(sleep_time)
logger.info(
f"Retrying commit for {repo_id}, {config_name} ({retry}/n with status_code {err.__context__.response.status_code})"
)
continue
else:
raise
break
return commit_info
class IterableDatasetDict(dict[Union[str, NamedSplit], IterableDataset]):
def _check_values_type(self):
for dataset in self.values():
if not isinstance(dataset, IterableDataset):
raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
def _check_values_features(self):
items = [(key, dataset._resolve_features()) for key, dataset in self.items()]
for item_a, item_b in zip(items[:-1], items[1:]):
if item_a[1].features != item_b[1].features:
raise ValueError(
f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
)
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, count=0, flags=re.M)
return f"IterableDatasetDict({{\n{repr}\n}})"
@property
def num_columns(self) -> dict[str, Optional[int]]:
"""Number of columns in each split of the dataset.
This can contain None valies if some splits have unknown features (e.g. after a map() operation).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
"""
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
@property
def column_names(self) -> dict[str, Optional[list[str]]]:
"""Names of the columns in each split of the dataset.
This can contain None valies if some splits have unknown features (e.g. after a map() operation).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes")
>>> ds.column_names
{'test': ['text', 'label'],
'train': ['text', 'label'],
'validation': ['text', 'label']}
```
"""
self._check_values_type()
return {k: dataset.column_names for k, dataset in self.items()}
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDatasetDict":
"""
Return a dataset with the specified format.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means it returns python objects (default).
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="validation", streaming=True)
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds = ds.with_format("torch")
>>> next(iter(ds))
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_split: bool = False,
input_columns: Optional[Union[str, list[str]]] = None,
batched: bool = False,
batch_size: int = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, list[str]]] = None,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
The transformation is applied to all the datasets of the dataset dictionary.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
If the function is asynchronous, then `map` will run your function in parallel, with up to one thousand simultaneous calls.
It is recommended to use a `asyncio.Semaphore` in your function if you want to set a maximum number of operations that can run at the same time.
Args:
function (`Callable`, *optional*, defaults to `None`):
Function applied on-the-fly on the examples when you iterate on the dataset.
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, list]) -> Dict[str, list]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, list], indices: list[int]) -> Dict[str, list]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
If the function is asynchronous, then `map` will run your function in parallel.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`[Union[str, list[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the `batch_size` should be
dropped instead of being processed by the function.
remove_columns (`[list[str]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> next(iter(ds["train"]))
{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
dataset_dict = {}
for split, dataset in self.items():
if with_split:
function = bind(function, split)
dataset_dict[split] = dataset.map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
fn_kwargs=fn_kwargs,
)
if with_split:
function = function.func
return IterableDatasetDict(dataset_dict)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, list[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
The filtering is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`):
Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, list]) -> list[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, list], indices: list[int]) -> list[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always True function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `list[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds["train"].take(3))
[{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
{'label': 0,
'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
return IterableDatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def shuffle(
self,
seed=None,
generator: Optional[np.random.Generator] = None,
buffer_size: int = 1000,
) -> "IterableDatasetDict":
"""
Randomly shuffles the elements of this dataset.
The shuffling is applied to all the datasets of the dataset dictionary.
This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
initially select a random element from only the first 1000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1000 element buffer.
If the dataset is made of several shards, it also does `shuffle` the order of the shards.
However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
then the order of the shards is kept unchanged.
Args:
seed (`int`, *optional*, defaults to `None`):
Random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffer and also to shuffle the data shards.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
buffer_size (`int`, defaults to `1000`):
Size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.shuffle(seed=42)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
return IterableDatasetDict(
{
k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
for k, dataset in self.items()
}
)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds["train"]))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.rename_column(
original_column_name=original_column_name,
new_column_name=new_column_name,
)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: dict[str, str]) -> "IterableDatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with renamed columns
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
>>> next(iter(ds["train"]))
{'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'rating': 1}
```
"""
return IterableDatasetDict(
{k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
)
def remove_columns(self, column_names: Union[str, list[str]]) -> "IterableDatasetDict":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
The removal is applied to all the datasets of the dataset dictionary.
Args:
column_names (`Union[str, list[str]]`):
Name of the column(s) to remove.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> ds = ds.remove_columns("label")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, list[str]]) -> "IterableDatasetDict":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset. The selection is applied to all the
datasets of the dataset dictionary.
Args:
column_names (`Union[str, list[str]]`):
Name of the column(s) to keep.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> ds = ds.select("text")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
"""Cast column to feature for decoding.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`IterableDatasetDict`]
Example:
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos']),
'text': Value('string')}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good']),
'text': Value('string')}
```
"""
return IterableDatasetDict(
{k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
)
def cast(
self,
features: Features,
) -> "IterableDatasetDict":
"""
Cast the dataset to a new set of features.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
features (`Features`):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos']),
'text': Value('string')}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good']),
'text': Value('large_string')}
```
"""
return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
def push_to_hub(
self,
repo_id,
config_name: str = "default",
set_default: Optional[bool] = None,
data_dir: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
# max_shard_size: Optional[Union[int, str]] = None, # TODO(QL): add arg
num_shards: Optional[dict[str, int]] = None,
embed_external_files: bool = True,
num_proc: Optional[int] = None,
) -> CommitInfo:
"""Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to False.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`):
Configuration name of a dataset. Defaults to "default".
set_default (`bool`, *optional*):
Whether to set this configuration as the default one. Otherwise, the default configuration is the one
named "default".
data_dir (`str`, *optional*):
Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
from "default", else "data".
<Added version="2.17.0"/>
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
commit_description (`str`, *optional*):
Description of the commit that will be created.
Additionally, description of the PR if a PR is created (`create_pr` is True).
<Added version="2.16.0"/>
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
revision (`str`, *optional*):
Branch to push the uploaded files to. Defaults to the `"main"` branch.
create_pr (`bool`, *optional*, defaults to `False`):
Whether to create a PR with the uploaded files or directly commit.
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. Equals to this dataset's `.num_shards` by default.
Use a dictionary to define a different num_shards for each split.
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when preparing and uploading the dataset.
This is helpful if the dataset is made of many samples or media files to embed.
Multiprocessing is disabled by default.
<Added version="4.0.0"/>
Return:
huggingface_hub.CommitInfo
Example:
```python
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if num_shards is None:
num_shards = dict.fromkeys(self)
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
self._check_values_type()
self._check_values_features()
total_uploaded_size = 0
total_dataset_nbytes = 0
info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict()
for split in self.keys():
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
try:
repo_id = api.repo_info(repo_id, repo_type="dataset").id
except RepositoryNotFoundError:
repo_url = api.create_repo(
repo_id,
repo_type="dataset",
private=private,
exist_ok=True,
)
repo_id = repo_url.repo_id
if revision is not None and not revision.startswith("refs/pr/"):
# We do not call create_branch for a PR reference: 400 Bad Request
api.create_branch(
repo_id,
branch=revision,
token=token,
repo_type="dataset",
exist_ok=True,
)
if not data_dir:
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
additions = []
for split in self.keys():
logger.info(f"Pushing split {split} to the Hub.")
# The split=key needs to be removed before merging
split_additions, uploaded_size, dataset_nbytes, num_examples = self[split]._push_parquet_shards_to_hub(
repo_id,
data_dir=data_dir,
split=split,
token=token,
revision=revision,
create_pr=create_pr,
# max_shard_size=max_shard_size, # TODO(QL): add arg
num_shards=num_shards.get(split),
embed_external_files=embed_external_files,
num_proc=num_proc,
)
additions += split_additions
total_uploaded_size += uploaded_size
total_dataset_nbytes += dataset_nbytes
info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=num_examples)
info_to_dump.download_checksums = None
info_to_dump.download_size = total_uploaded_size
info_to_dump.dataset_size = total_dataset_nbytes
info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
def get_deletions_and_dataset_card() -> tuple[str, list[CommitOperationDelete], str, Optional[str]]:
parent_commit = api.repo_info(repo_id, repo_type="dataset", revision=revision).sha
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
repo_splits: list[str] = [] # use a list to keep the order of the splits
deletions: list[CommitOperationDelete] = []
repo_files_to_add = [addition.path_in_repo for addition in additions]
for repo_file in api.list_repo_tree(
repo_id=repo_id,
revision=parent_commit,
repo_type="dataset",
token=token,
recursive=True,
):
if not isinstance(repo_file, RepoFile):
continue
if repo_file.rfilename == config.REPOCARD_FILENAME:
repo_with_dataset_card = True
elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
repo_with_dataset_infos = True
elif (
repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
and repo_file.rfilename not in repo_files_to_add
):
deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
elif fnmatch.fnmatch(
repo_file.rfilename,
PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*"),
):
pattern = glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED)
split_pattern_fields = string_to_dict(repo_file.rfilename, pattern)
assert split_pattern_fields is not None
repo_split = split_pattern_fields["split"]
if repo_split not in repo_splits:
repo_splits.append(repo_split)
# get the info from the README to update them
if repo_with_dataset_card:
dataset_card_path = api.hf_hub_download(
repo_id,
config.REPOCARD_FILENAME,
repo_type="dataset",
revision=parent_commit,
)
dataset_card = DatasetCard.load(Path(dataset_card_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
# get the deprecated dataset_infos.json to update them
elif repo_with_dataset_infos:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs and repo_splits:
default_metadata_configs_to_dump = {
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
metadata_config_to_dump = {
"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
}
configs_to_dump = {config_name: metadata_config_to_dump}
if set_default and config_name != "default":
if metadata_configs:
current_default_config_name = metadata_configs.get_default_config_name()
if current_default_config_name == "default":
raise ValueError(
"There exists a configuration named 'default'. To set a different configuration as default, "
"rename the 'default' one first."
)
if current_default_config_name:
_ = metadata_configs[current_default_config_name].pop("default")
configs_to_dump[current_default_config_name] = metadata_configs[current_default_config_name]
metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
repo_id,
config.DATASETDICT_INFOS_FILENAME,
repo_type="dataset",
revision=parent_commit,
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
new_dataset_infos = json.dumps(dataset_infos, indent=4)
else:
new_dataset_infos = None
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs(configs_to_dump).to_dataset_card_data(dataset_card_data)
new_dataset_card = (
DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
)
return parent_commit, deletions, new_dataset_card, new_dataset_infos
commit_message = commit_message if commit_message is not None else "Upload dataset"
if len(additions) > config.UPLOADS_MAX_NUMBER_PER_COMMIT:
logger.info(
f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
)
num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
for i in range(0, num_commits):
operations = additions[
i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
]
for retry, sleep_time in enumerate(itertools.chain(range(10), itertools.repeat(30)), start=1):
# We need to retry if another commit happens at the same time
sleep_time *= 1 + random.random()
try:
commit_info = api.create_commit(
repo_id,
operations=operations,
commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
commit_description=commit_description,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
except HfHubHTTPError as err:
if (
err.__context__
and isinstance(err.__context__, HTTPError)
and err.__context__.response.status_code == 409
):
# 409 is Conflict (another commit is in progress)
time.sleep(sleep_time)
logger.info(
f"Retrying intermediate commit for {repo_id}, {config_name} ({retry}/n with status_code {err.__context__.response.status_code})"
)
continue
else:
raise
break
logger.info(
f"Commit #{i + 1} completed"
+ (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ "."
)
last_commit_additions = []
else:
last_commit_additions = additions
for retry, sleep_time in enumerate(itertools.chain(range(10), itertools.repeat(30)), start=1):
# We need to retry if there was a commit in between in case it touched the dataset card data
sleep_time *= 1 + random.random()
parent_commit, deletions, dataset_card, dataset_infos = get_deletions_and_dataset_card()
dataset_card_additions = []
if dataset_infos:
dataset_card_additions.append(
CommitOperationAdd(
path_in_repo=config.DATASETDICT_INFOS_FILENAME,
path_or_fileobj=dataset_infos.encode("utf-8"),
)
)
dataset_card_additions.append(
CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
)
try:
commit_info = api.create_commit(
repo_id,
operations=last_commit_additions + dataset_card_additions + deletions,
commit_message=commit_message,
commit_description=commit_description,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
parent_commit=parent_commit,
)
except HfHubHTTPError as err:
if (
err.__context__
and isinstance(err.__context__, HTTPError)
and err.__context__.response.status_code in (412, 409)
):
# 412 is Precondition failed (parent_commit isn't satisfied)
# 409 is Conflict (another commit is in progress)
time.sleep(sleep_time)
logger.info(
f"Retrying commit for {repo_id}, {config_name} ({retry}/n with status_code {err.__context__.response.status_code})"
)
continue
else:
raise
break
return commit_info
| datasets/src/datasets/dataset_dict.py/0 | {
"file_path": "datasets/src/datasets/dataset_dict.py",
"repo_id": "datasets",
"token_count": 62611
} | 99 |
import os
from functools import partial
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
compression: str = None # compression type in fsspec. ex: "gzip"
extensions: list[str] = None # extensions of the filename to strip. ex: ".gz" to get file.txt from file.txt.gz
def __init__(
self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
):
"""
The compressed file system can be instantiated from any compressed file.
It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
The single file inside the filesystem is named after the compresssed file,
without the compression extension at the end of the filename.
Args:
fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
mode (:obj:``str``): Currently, only 'rb' accepted
target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
"""
super().__init__(self, **kwargs)
self.fo = fo.__fspath__() if hasattr(fo, "__fspath__") else fo
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
self._open_with_fsspec = partial(
fsspec.open,
self.fo,
mode="rb",
protocol=target_protocol,
compression=self.compression,
client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
},
**(target_options or {}),
)
self.compressed_name = os.path.basename(self.fo.split("::")[0])
self.uncompressed_name = (
self.compressed_name[: self.compressed_name.rindex(".")]
if "." in self.compressed_name
else self.compressed_name
)
self.dir_cache = None
@classmethod
def _strip_protocol(cls, path):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
if self.dir_cache is None:
f = {**self._open_with_fsspec().fs.info(self.fo), "name": self.uncompressed_name}
self.dir_cache = {f["name"]: f}
def cat(self, path: str):
with self._open_with_fsspec().open() as f:
return f.read()
def _open(
self,
path: str,
mode: str = "rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.fo} opened with mode 'rb'")
return self._open_with_fsspec().open()
class Bz2FileSystem(BaseCompressedFileFileSystem):
"""Read contents of BZ2 file as a filesystem with one file inside."""
protocol = "bz2"
compression = "bz2"
extensions = [".bz2"]
class GzipFileSystem(BaseCompressedFileFileSystem):
"""Read contents of GZIP file as a filesystem with one file inside."""
protocol = "gzip"
compression = "gzip"
extensions = [".gz", ".gzip"]
class Lz4FileSystem(BaseCompressedFileFileSystem):
"""Read contents of LZ4 file as a filesystem with one file inside."""
protocol = "lz4"
compression = "lz4"
extensions = [".lz4"]
class XzFileSystem(BaseCompressedFileFileSystem):
"""Read contents of .xz (LZMA) file as a filesystem with one file inside."""
protocol = "xz"
compression = "xz"
extensions = [".xz"]
class ZstdFileSystem(BaseCompressedFileFileSystem):
"""
Read contents of .zstd file as a filesystem with one file inside.
"""
protocol = "zstd"
compression = "zstd"
extensions = [".zst", ".zstd"]
| datasets/src/datasets/filesystems/compression.py/0 | {
"file_path": "datasets/src/datasets/filesystems/compression.py",
"repo_id": "datasets",
"token_count": 1827
} | 100 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import tqdm as hf_tqdm
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class JsonDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
field: Optional[str] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.field = field
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Json(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
field=field,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class JsonDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_json_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.encoding = "utf-8"
self.storage_options = storage_options or {}
self.to_json_kwargs = to_json_kwargs
def write(self) -> int:
_ = self.to_json_kwargs.pop("path_or_buf", None)
orient = self.to_json_kwargs.pop("orient", "records")
lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
if "index" not in self.to_json_kwargs and orient in ["split", "table"]:
self.to_json_kwargs["index"] = False
# Determine the default compression value based on self.path_or_buf type
default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None
compression = self.to_json_kwargs.pop("compression", default_compression)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
if not lines and self.batch_size < self.dataset.num_rows:
raise NotImplementedError(
"Output JSON will not be formatted correctly when lines = False and batch_size < number of rows in the dataset. Use pandas.DataFrame.to_json() instead."
)
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(
self.path_or_buf, "wb", compression=compression, **(self.storage_options or {})
) as buffer:
written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead."
)
written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs)
return written
def _batch_json(self, args):
offset, orient, lines, to_json_kwargs = args
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _write(
self,
file_obj: BinaryIO,
orient,
lines,
**to_json_kwargs,
) -> int:
"""Writes the pyarrow table as JSON lines to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating json from Arrow format",
):
json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
written += file_obj.write(json_str)
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in hf_tqdm(
pool.imap(
self._batch_json,
[(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating json from Arrow format",
):
written += file_obj.write(json_str)
return written
| datasets/src/datasets/io/json.py/0 | {
"file_path": "datasets/src/datasets/io/json.py",
"repo_id": "datasets",
"token_count": 3162
} | 101 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
logger = logging.get_logger(__name__)
class ParallelBackendConfig:
backend_name = None
@experimental
def parallel_map(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func):
"""
**Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either
multiprocessing.Pool or joblib for parallelization.
Args:
function (`Callable[[Any], Any]`): Function to be applied to `iterable`.
iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.
num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).
types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.
disable_tqdm (`bool`): Whether to disable the tqdm progressbar.
desc (`str`): Prefix for the tqdm progressbar.
single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.
Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an
element of `iterable`, and `rank` is used for progress bar.
"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
)
return _map_with_joblib(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
)
def _map_with_multiprocessing_pool(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
):
num_proc = num_proc if num_proc <= len(iterable) else len(iterable)
split_kwds = [] # We organize the splits ourselve (contiguous splits)
for index in range(num_proc):
div = len(iterable) // num_proc
mod = len(iterable) % num_proc
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], batched, batch_size, types, index, disable_tqdm, desc))
if len(iterable) != sum(len(i[1]) for i in split_kwds):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(iterable)}, "
f"length: {sum(len(i[1]) for i in split_kwds)}"
)
logger.info(
f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"
)
initargs, initializer = None, None
if not disable_tqdm:
initargs, initializer = (RLock(),), tqdm.set_lock
with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:
mapped = pool.map(single_map_nested_func, split_kwds)
logger.info(f"Finished {num_proc} processes")
mapped = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(mapped)} objects")
return mapped
def _map_with_joblib(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):
return joblib.Parallel()(
joblib.delayed(single_map_nested_func)((function, obj, batched, batch_size, types, None, True, None))
for obj in iterable
)
@experimental
@contextlib.contextmanager
def parallel_backend(backend_name: str):
"""
**Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization
implemented by joblib.
Args:
backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.
Example usage:
```py
with parallel_backend('spark'):
dataset = load_dataset(..., num_proc=2)
```
"""
ParallelBackendConfig.backend_name = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
ParallelBackendConfig.backend_name = None
| datasets/src/datasets/parallel/parallel.py/0 | {
"file_path": "datasets/src/datasets/parallel/parallel.py",
"repo_id": "datasets",
"token_count": 1783
} | 102 |
import enum
import os
from typing import Optional
from huggingface_hub.utils import insecure_hashlib
from .. import config
from ..exceptions import (
ExpectedMoreDownloadedFilesError,
ExpectedMoreSplitsError,
NonMatchingChecksumError,
NonMatchingSplitsSizesError,
UnexpectedDownloadedFileError,
UnexpectedSplitsError,
)
from .logging import get_logger
logger = get_logger(__name__)
class VerificationMode(enum.Enum):
"""`Enum` that specifies which verification checks to run.
The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns
when generating/downloading a dataset for the first time.
The verification modes:
| | Verification checks |
|---------------------------|------------------------------------------------------------------------------ |
| `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |
| | and the validity (number of files, checksums, etc.) of downloaded files |
| `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |
| `NO_CHECKS` | None |
"""
ALL_CHECKS = "all_checks"
BASIC_CHECKS = "basic_checks"
NO_CHECKS = "no_checks"
def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(expected_checksums) - set(recorded_checksums)) > 0:
raise ExpectedMoreDownloadedFilesError(str(set(expected_checksums) - set(recorded_checksums)))
if len(set(recorded_checksums) - set(expected_checksums)) > 0:
raise UnexpectedDownloadedFileError(str(set(recorded_checksums) - set(expected_checksums)))
bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
for_verification_name = " for " + verification_name if verification_name is not None else ""
if len(bad_urls) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error"
)
logger.info("All the checksums matched successfully" + for_verification_name)
def verify_splits(expected_splits: Optional[dict], recorded_splits: dict):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(expected_splits) - set(recorded_splits)) > 0:
raise ExpectedMoreSplitsError(str(set(expected_splits) - set(recorded_splits)))
if len(set(recorded_splits) - set(expected_splits)) > 0:
raise UnexpectedSplitsError(str(set(recorded_splits) - set(expected_splits)))
bad_splits = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(bad_splits) > 0:
raise NonMatchingSplitsSizesError(str(bad_splits))
logger.info("All the splits matched successfully.")
def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict:
"""Compute the file size and the sha256 checksum of a file"""
if record_checksum:
m = insecure_hashlib.sha256()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(1 << 20), b""):
m.update(chunk)
checksum = m.hexdigest()
else:
checksum = None
return {"num_bytes": os.path.getsize(path), "checksum": checksum}
def is_small_dataset(dataset_size):
"""Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
Args:
dataset_size (int): Dataset size in bytes.
Returns:
bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| datasets/src/datasets/utils/info_utils.py/0 | {
"file_path": "datasets/src/datasets/utils/info_utils.py",
"repo_id": "datasets",
"token_count": 1731
} | 103 |
import os
from typing import TypeVar, Union
T = TypeVar("T")
ListLike = Union[list[T], tuple[T, ...]]
NestedDataStructureLike = Union[T, list[T], dict[str, T]]
PathLike = Union[str, bytes, os.PathLike]
| datasets/src/datasets/utils/typing.py/0 | {
"file_path": "datasets/src/datasets/utils/typing.py",
"repo_id": "datasets",
"token_count": 74
} | 104 |
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesList
from datasets.packaged_modules.json.json import Json, JsonConfig
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
# ndjson format is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
@pytest.fixture
def ndjson_file(tmp_path):
filename = tmp_path / "file.ndjson"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def jsonl_file_utf16_encoded(tmp_path):
filename = tmp_path / "file_utf16_encoded.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w", encoding="utf-16") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_strings(tmp_path):
filename = tmp_path / "file_with_list_of_strings.json"
data = textwrap.dedent(
"""\
[
"First text.",
"Second text.",
"Third text."
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_strings_field(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
"First text.",
"Second text.",
"Third text."
]
}
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
@pytest.fixture
def json_file_with_dict_of_lists_field(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": {
"col_1": [-1, 1, 10],
"col_2": [null, 2, 20]
}
}
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
@pytest.fixture
def json_file_with_list_of_dicts_with_sorted_columns(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
[
{"ID": 0, "Language": "Language-0", "Topic": "Topic-0"},
{"ID": 1, "Language": "Language-1", "Topic": "Topic-1"},
{"ID": 2, "Language": "Language-2", "Topic": "Topic-2"}
]
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
@pytest.fixture
def json_file_with_list_of_dicts_with_sorted_columns_field(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"ID": 0, "Language": "Language-0", "Topic": "Topic-0"},
{"ID": 1, "Language": "Language-1", "Topic": "Topic-1"},
{"ID": 2, "Language": "Language-2", "Topic": "Topic-2"}
]
}
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = JsonConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = JsonConfig(name="name", data_files=data_files)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("ndjson_file", {}),
("jsonl_file_utf16_encoded", {"encoding": "utf-16"}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
("json_file_with_list_of_strings", {}),
("json_file_with_list_of_strings_field", {"field": "field3"}),
("json_file_with_dict_of_lists_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
if "list_of_strings" in file_fixture:
expected = {"text": ["First text.", "Second text.", "Third text."]}
else:
expected = {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
assert pa_table.to_pydict() == expected
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("json_file_with_list_of_dicts_with_sorted_columns", {}),
("json_file_with_list_of_dicts_with_sorted_columns_field", {"field": "field3"}),
],
)
def test_json_generate_tables_with_sorted_columns(file_fixture, config_kwargs, request):
builder = Json(**config_kwargs)
generator = builder._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.column_names == ["ID", "Language", "Topic"]
| datasets/tests/packaged_modules/test_json.py/0 | {
"file_path": "datasets/tests/packaged_modules/test_json.py",
"repo_id": "datasets",
"token_count": 3820
} | 105 |
import warnings
import pytest
import datasets.utils.deprecation_utils
from datasets.exceptions import (
ChecksumVerificationError,
ExpectedMoreDownloadedFilesError,
ExpectedMoreSplitsError,
NonMatchingChecksumError,
NonMatchingSplitsSizesError,
SplitsVerificationError,
UnexpectedDownloadedFileError,
UnexpectedSplitsError,
)
@pytest.mark.parametrize(
"error",
[
ChecksumVerificationError,
UnexpectedDownloadedFileError,
ExpectedMoreDownloadedFilesError,
NonMatchingChecksumError,
SplitsVerificationError,
UnexpectedSplitsError,
ExpectedMoreSplitsError,
NonMatchingSplitsSizesError,
],
)
def test_error_not_deprecated(error, monkeypatch):
monkeypatch.setattr(datasets.utils.deprecation_utils, "_emitted_deprecation_warnings", set())
with warnings.catch_warnings():
warnings.simplefilter("error")
error()
| datasets/tests/test_exceptions.py/0 | {
"file_path": "datasets/tests/test_exceptions.py",
"repo_id": "datasets",
"token_count": 360
} | 106 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def add_one(i): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def test_parallel_backend_input():
with parallel_backend("spark"):
assert ParallelBackendConfig.backend_name == "spark"
lst = [1, 2, 3]
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=2)
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc", [2, -1])
def test_parallel_backend_map_nested(num_proc):
s1 = [1, 2]
s2 = {"a": 1, "b": 2}
s3 = {"a": [1, 2], "b": [3, 4]}
s4 = {"a": {"1": 1}, "b": 2}
s5 = {"a": 1, "b": 2, "c": 3, "d": 4}
expected_map_nested_s1 = [2, 3]
expected_map_nested_s2 = {"a": 2, "b": 3}
expected_map_nested_s3 = {"a": [2, 3], "b": [4, 5]}
expected_map_nested_s4 = {"a": {"1": 2}, "b": 3}
expected_map_nested_s5 = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark"):
assert map_nested(add_one, s1, num_proc=num_proc) == expected_map_nested_s1
assert map_nested(add_one, s2, num_proc=num_proc) == expected_map_nested_s2
assert map_nested(add_one, s3, num_proc=num_proc) == expected_map_nested_s3
assert map_nested(add_one, s4, num_proc=num_proc) == expected_map_nested_s4
assert map_nested(add_one, s5, num_proc=num_proc) == expected_map_nested_s5
| datasets/tests/test_parallel.py/0 | {
"file_path": "datasets/tests/test_parallel.py",
"repo_id": "datasets",
"token_count": 825
} | 107 |
import os
import pandas as pd
from huggingface_hub import hf_hub_download, upload_file
from huggingface_hub.utils import EntryNotFoundError
REPO_ID = "diffusers/benchmarks"
def has_previous_benchmark() -> str:
from run_all import FINAL_CSV_FILENAME
csv_path = None
try:
csv_path = hf_hub_download(repo_id=REPO_ID, repo_type="dataset", filename=FINAL_CSV_FILENAME)
except EntryNotFoundError:
csv_path = None
return csv_path
def filter_float(value):
if isinstance(value, str):
return float(value.split()[0])
return value
def push_to_hf_dataset():
from run_all import FINAL_CSV_FILENAME, GITHUB_SHA
csv_path = has_previous_benchmark()
if csv_path is not None:
current_results = pd.read_csv(FINAL_CSV_FILENAME)
previous_results = pd.read_csv(csv_path)
numeric_columns = current_results.select_dtypes(include=["float64", "int64"]).columns
for column in numeric_columns:
# get previous values as floats, aligned to current index
prev_vals = previous_results[column].map(filter_float).reindex(current_results.index)
# get current values as floats
curr_vals = current_results[column].astype(float)
# stringify the current values
curr_str = curr_vals.map(str)
# build an appendage only when prev exists and differs
append_str = prev_vals.where(prev_vals.notnull() & (prev_vals != curr_vals), other=pd.NA).map(
lambda x: f" ({x})" if pd.notnull(x) else ""
)
# combine
current_results[column] = curr_str + append_str
os.remove(FINAL_CSV_FILENAME)
current_results.to_csv(FINAL_CSV_FILENAME, index=False)
commit_message = f"upload from sha: {GITHUB_SHA}" if GITHUB_SHA is not None else "upload benchmark results"
upload_file(
repo_id=REPO_ID,
path_in_repo=FINAL_CSV_FILENAME,
path_or_fileobj=FINAL_CSV_FILENAME,
repo_type="dataset",
commit_message=commit_message,
)
upload_file(
repo_id="diffusers/benchmark-analyzer",
path_in_repo=FINAL_CSV_FILENAME,
path_or_fileobj=FINAL_CSV_FILENAME,
repo_type="space",
commit_message=commit_message,
)
if __name__ == "__main__":
push_to_hf_dataset()
| diffusers/benchmarks/push_results.py/0 | {
"file_path": "diffusers/benchmarks/push_results.py",
"repo_id": "diffusers",
"token_count": 1053
} | 108 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Outpainting
Outpainting extends an image beyond its original boundaries, allowing you to add, replace, or modify visual elements in an image while preserving the original image. Like [inpainting](../using-diffusers/inpaint), you want to fill the white area (in this case, the area outside of the original image) with new visual elements while keeping the original image (represented by a mask of black pixels). There are a couple of ways to outpaint, such as with a [ControlNet](https://hf.co/blog/OzzyGT/outpainting-controlnet) or with [Differential Diffusion](https://hf.co/blog/OzzyGT/outpainting-differential-diffusion).
This guide will show you how to outpaint with an inpainting model, ControlNet, and a ZoeDepth estimator.
Before you begin, make sure you have the [controlnet_aux](https://github.com/huggingface/controlnet_aux) library installed so you can use the ZoeDepth estimator.
```py
!pip install -q controlnet_aux
```
## Image preparation
Start by picking an image to outpaint with and remove the background with a Space like [BRIA-RMBG-1.4](https://hf.co/spaces/briaai/BRIA-RMBG-1.4).
<iframe
src="https://briaai-bria-rmbg-1-4.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
For example, remove the background from this image of a pair of shoes.
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/original-jordan.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/no-background-jordan.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">background removed</figcaption>
</div>
</div>
[Stable Diffusion XL (SDXL)](../using-diffusers/sdxl) models work best with 1024x1024 images, but you can resize the image to any size as long as your hardware has enough memory to support it. The transparent background in the image should also be replaced with a white background. Create a function (like the one below) that scales and pastes the image onto a white background.
```py
import random
import requests
import torch
from controlnet_aux import ZoeDetector
from PIL import Image, ImageOps
from diffusers import (
AutoencoderKL,
ControlNetModel,
StableDiffusionXLControlNetPipeline,
StableDiffusionXLInpaintPipeline,
)
def scale_and_paste(original_image):
aspect_ratio = original_image.width / original_image.height
if original_image.width > original_image.height:
new_width = 1024
new_height = round(new_width / aspect_ratio)
else:
new_height = 1024
new_width = round(new_height * aspect_ratio)
resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
white_background = Image.new("RGBA", (1024, 1024), "white")
x = (1024 - new_width) // 2
y = (1024 - new_height) // 2
white_background.paste(resized_original, (x, y), resized_original)
return resized_original, white_background
original_image = Image.open(
requests.get(
"https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/no-background-jordan.png",
stream=True,
).raw
).convert("RGBA")
resized_img, white_bg_image = scale_and_paste(original_image)
```
To avoid adding unwanted extra details, use the ZoeDepth estimator to provide additional guidance during generation and to ensure the shoes remain consistent with the original image.
```py
zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
image_zoe = zoe(white_bg_image, detect_resolution=512, image_resolution=1024)
image_zoe
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/zoedepth-jordan.png"/>
</div>
## Outpaint
Once your image is ready, you can generate content in the white area around the shoes with [controlnet-inpaint-dreamer-sdxl](https://hf.co/destitech/controlnet-inpaint-dreamer-sdxl), a SDXL ControlNet trained for inpainting.
Load the inpainting ControlNet, ZoeDepth model, VAE and pass them to the [`StableDiffusionXLControlNetPipeline`]. Then you can create an optional `generate_image` function (for convenience) to outpaint an initial image.
```py
controlnets = [
ControlNetModel.from_pretrained(
"destitech/controlnet-inpaint-dreamer-sdxl", torch_dtype=torch.float16, variant="fp16"
),
ControlNetModel.from_pretrained(
"diffusers/controlnet-zoe-depth-sdxl-1.0", torch_dtype=torch.float16
),
]
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to("cuda")
pipeline = StableDiffusionXLControlNetPipeline.from_pretrained(
"SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", controlnet=controlnets, vae=vae
).to("cuda")
def generate_image(prompt, negative_prompt, inpaint_image, zoe_image, seed: int = None):
if seed is None:
seed = random.randint(0, 2**32 - 1)
generator = torch.Generator(device="cpu").manual_seed(seed)
image = pipeline(
prompt,
negative_prompt=negative_prompt,
image=[inpaint_image, zoe_image],
guidance_scale=6.5,
num_inference_steps=25,
generator=generator,
controlnet_conditioning_scale=[0.5, 0.8],
control_guidance_end=[0.9, 0.6],
).images[0]
return image
prompt = "nike air jordans on a basketball court"
negative_prompt = ""
temp_image = generate_image(prompt, negative_prompt, white_bg_image, image_zoe, 908097)
```
Paste the original image over the initial outpainted image. You'll improve the outpainted background in a later step.
```py
x = (1024 - resized_img.width) // 2
y = (1024 - resized_img.height) // 2
temp_image.paste(resized_img, (x, y), resized_img)
temp_image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/initial-outpaint.png"/>
</div>
> [!TIP]
> Now is a good time to free up some memory if you're running low!
>
> ```py
> pipeline=None
> torch.cuda.empty_cache()
> ```
Now that you have an initial outpainted image, load the [`StableDiffusionXLInpaintPipeline`] with the [RealVisXL](https://hf.co/SG161222/RealVisXL_V4.0) model to generate the final outpainted image with better quality.
```py
pipeline = StableDiffusionXLInpaintPipeline.from_pretrained(
"OzzyGT/RealVisXL_V4.0_inpainting",
torch_dtype=torch.float16,
variant="fp16",
vae=vae,
).to("cuda")
```
Prepare a mask for the final outpainted image. To create a more natural transition between the original image and the outpainted background, blur the mask to help it blend better.
```py
mask = Image.new("L", temp_image.size)
mask.paste(resized_img.split()[3], (x, y))
mask = ImageOps.invert(mask)
final_mask = mask.point(lambda p: p > 128 and 255)
mask_blurred = pipeline.mask_processor.blur(final_mask, blur_factor=20)
mask_blurred
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/blurred-mask.png"/>
</div>
Create a better prompt and pass it to the `generate_outpaint` function to generate the final outpainted image. Again, paste the original image over the final outpainted background.
```py
def generate_outpaint(prompt, negative_prompt, image, mask, seed: int = None):
if seed is None:
seed = random.randint(0, 2**32 - 1)
generator = torch.Generator(device="cpu").manual_seed(seed)
image = pipeline(
prompt,
negative_prompt=negative_prompt,
image=image,
mask_image=mask,
guidance_scale=10.0,
strength=0.8,
num_inference_steps=30,
generator=generator,
).images[0]
return image
prompt = "high quality photo of nike air jordans on a basketball court, highly detailed"
negative_prompt = ""
final_image = generate_outpaint(prompt, negative_prompt, temp_image, mask_blurred, 7688778)
x = (1024 - resized_img.width) // 2
y = (1024 - resized_img.height) // 2
final_image.paste(resized_img, (x, y), resized_img)
final_image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/final-outpaint.png"/>
</div>
| diffusers/docs/source/en/advanced_inference/outpaint.md/0 | {
"file_path": "diffusers/docs/source/en/advanced_inference/outpaint.md",
"repo_id": "diffusers",
"token_count": 3145
} | 109 |
# Pipeline blocks
## ModularPipelineBlocks
[[autodoc]] diffusers.modular_pipelines.modular_pipeline.ModularPipelineBlocks
## SequentialPipelineBlocks
[[autodoc]] diffusers.modular_pipelines.modular_pipeline.SequentialPipelineBlocks
## LoopSequentialPipelineBlocks
[[autodoc]] diffusers.modular_pipelines.modular_pipeline.LoopSequentialPipelineBlocks
## AutoPipelineBlocks
[[autodoc]] diffusers.modular_pipelines.modular_pipeline.AutoPipelineBlocks | diffusers/docs/source/en/api/modular_diffusers/pipeline_blocks.md/0 | {
"file_path": "diffusers/docs/source/en/api/modular_diffusers/pipeline_blocks.md",
"repo_id": "diffusers",
"token_count": 159
} | 110 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/docs/diffusers/main/en/tutorials/using_peft_for_inference" target="_blank" rel="noopener">
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
</a>
</div>
</div>
# CogVideoX
[CogVideoX](https://huggingface.co/papers/2408.06072) is a large diffusion transformer model - available in 2B and 5B parameters - designed to generate longer and more consistent videos from text. This model uses a 3D causal variational autoencoder to more efficiently process video data by reducing sequence length (and associated training compute) and preventing flickering in generated videos. An "expert" transformer with adaptive LayerNorm improves alignment between text and video, and 3D full attention helps accurately capture motion and time in generated videos.
You can find all the original CogVideoX checkpoints under the [CogVideoX](https://huggingface.co/collections/THUDM/cogvideo-66c08e62f1685a3ade464cce) collection.
> [!TIP]
> Click on the CogVideoX models in the right sidebar for more examples of other video generation tasks.
The example below demonstrates how to generate a video optimized for memory or inference speed.
<hfoptions id="usage">
<hfoption id="memory">
Refer to the [Reduce memory usage](../../optimization/memory) guide for more details about the various memory saving techniques.
The quantized CogVideoX 5B model below requires ~16GB of VRAM.
```py
import torch
from diffusers import CogVideoXPipeline, AutoModel
from diffusers.quantizers import PipelineQuantizationConfig
from diffusers.hooks import apply_group_offloading
from diffusers.utils import export_to_video
# quantize weights to int8 with torchao
pipeline_quant_config = PipelineQuantizationConfig(
quant_backend="torchao",
quant_kwargs={"quant_type": "int8wo"},
components_to_quantize=["transformer"]
)
# fp8 layerwise weight-casting
transformer = AutoModel.from_pretrained(
"THUDM/CogVideoX-5b",
subfolder="transformer",
torch_dtype=torch.bfloat16
)
transformer.enable_layerwise_casting(
storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16
)
pipeline = CogVideoXPipeline.from_pretrained(
"THUDM/CogVideoX-5b",
transformer=transformer,
quantization_config=pipeline_quant_config,
torch_dtype=torch.bfloat16
)
pipeline.to("cuda")
# model-offloading
pipeline.enable_model_cpu_offload()
prompt = """
A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea.
The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse.
Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood,
with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting.
"""
video = pipeline(
prompt=prompt,
guidance_scale=6,
num_inference_steps=50
).frames[0]
export_to_video(video, "output.mp4", fps=8)
```
</hfoption>
<hfoption id="inference speed">
[Compilation](../../optimization/fp16#torchcompile) is slow the first time but subsequent calls to the pipeline are faster.
The average inference time with torch.compile on a 80GB A100 is 76.27 seconds compared to 96.89 seconds for an uncompiled model.
```py
import torch
from diffusers import CogVideoXPipeline
from diffusers.utils import export_to_video
pipeline = CogVideoXPipeline.from_pretrained(
"THUDM/CogVideoX-2b",
torch_dtype=torch.float16
).to("cuda")
# torch.compile
pipeline.transformer.to(memory_format=torch.channels_last)
pipeline.transformer = torch.compile(
pipeline.transformer, mode="max-autotune", fullgraph=True
)
prompt = """
A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea.
The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse.
Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood,
with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting.
"""
video = pipeline(
prompt=prompt,
guidance_scale=6,
num_inference_steps=50
).frames[0]
export_to_video(video, "output.mp4", fps=8)
```
</hfoption>
</hfoptions>
## Notes
- CogVideoX supports LoRAs with [`~loaders.CogVideoXLoraLoaderMixin.load_lora_weights`].
<details>
<summary>Show example code</summary>
```py
import torch
from diffusers import CogVideoXPipeline
from diffusers.hooks import apply_group_offloading
from diffusers.utils import export_to_video
pipeline = CogVideoXPipeline.from_pretrained(
"THUDM/CogVideoX-5b",
torch_dtype=torch.bfloat16
)
pipeline.to("cuda")
# load LoRA weights
pipeline.load_lora_weights("finetrainers/CogVideoX-1.5-crush-smol-v0", adapter_name="crush-lora")
pipeline.set_adapters("crush-lora", 0.9)
# model-offloading
pipeline.enable_model_cpu_offload()
prompt = """
PIKA_CRUSH A large metal cylinder is seen pressing down on a pile of Oreo cookies, flattening them as if they were under a hydraulic press.
"""
negative_prompt = "inconsistent motion, blurry motion, worse quality, degenerate outputs, deformed outputs"
video = pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
num_frames=81,
height=480,
width=768,
num_inference_steps=50
).frames[0]
export_to_video(video, "output.mp4", fps=16)
```
</details>
- The text-to-video (T2V) checkpoints work best with a resolution of 1360x768 because that was the resolution it was pretrained on.
- The image-to-video (I2V) checkpoints work with multiple resolutions. The width can vary from 768 to 1360, but the height must be 758. Both height and width must be divisible by 16.
- Both T2V and I2V checkpoints work best with 81 and 161 frames. It is recommended to export the generated video at 16fps.
- Refer to the table below to view memory usage when various memory-saving techniques are enabled.
| method | memory usage (enabled) | memory usage (disabled) |
|---|---|---|
| enable_model_cpu_offload | 19GB | 33GB |
| enable_sequential_cpu_offload | <4GB | ~33GB (very slow inference speed) |
| enable_tiling | 11GB (with enable_model_cpu_offload) | --- |
## CogVideoXPipeline
[[autodoc]] CogVideoXPipeline
- all
- __call__
## CogVideoXImageToVideoPipeline
[[autodoc]] CogVideoXImageToVideoPipeline
- all
- __call__
## CogVideoXVideoToVideoPipeline
[[autodoc]] CogVideoXVideoToVideoPipeline
- all
- __call__
## CogVideoXFunControlPipeline
[[autodoc]] CogVideoXFunControlPipeline
- all
- __call__
## CogVideoXPipelineOutput
[[autodoc]] pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput
| diffusers/docs/source/en/api/pipelines/cogvideox.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/cogvideox.md",
"repo_id": "diffusers",
"token_count": 2455
} | 111 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
> [!WARNING]
> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model.
# Dance Diffusion
[Dance Diffusion](https://github.com/Harmonai-org/sample-generator) is by Zach Evans.
Dance Diffusion is the first in a suite of generative audio tools for producers and musicians released by [Harmonai](https://github.com/Harmonai-org).
<Tip>
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
</Tip>
## DanceDiffusionPipeline
[[autodoc]] DanceDiffusionPipeline
- all
- __call__
## AudioPipelineOutput
[[autodoc]] pipelines.AudioPipelineOutput
| diffusers/docs/source/en/api/pipelines/dance_diffusion.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/dance_diffusion.md",
"repo_id": "diffusers",
"token_count": 422
} | 112 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
> [!WARNING]
> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model.
# Text-to-video
<div class="flex flex-wrap space-x-1">
<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>
</div>
[ModelScope Text-to-Video Technical Report](https://huggingface.co/papers/2308.06571) is by Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, Shiwei Zhang.
The abstract from the paper is:
*This paper introduces ModelScopeT2V, a text-to-video synthesis model that evolves from a text-to-image synthesis model (i.e., Stable Diffusion). ModelScopeT2V incorporates spatio-temporal blocks to ensure consistent frame generation and smooth movement transitions. The model could adapt to varying frame numbers during training and inference, rendering it suitable for both image-text and video-text datasets. ModelScopeT2V brings together three components (i.e., VQGAN, a text encoder, and a denoising UNet), totally comprising 1.7 billion parameters, in which 0.5 billion parameters are dedicated to temporal capabilities. The model demonstrates superior performance over state-of-the-art methods across three evaluation metrics. The code and an online demo are available at https://modelscope.cn/models/damo/text-to-video-synthesis/summary.*
You can find additional information about Text-to-Video on the [project page](https://modelscope.cn/models/damo/text-to-video-synthesis/summary), [original codebase](https://github.com/modelscope/modelscope/), and try it out in a [demo](https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis). Official checkpoints can be found at [damo-vilab](https://huggingface.co/damo-vilab) and [cerspense](https://huggingface.co/cerspense).
## Usage example
### `text-to-video-ms-1.7b`
Let's start by generating a short video with the default length of 16 frames (2s at 8 fps):
```python
import torch
from diffusers import DiffusionPipeline
from diffusers.utils import export_to_video
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
pipe = pipe.to("cuda")
prompt = "Spiderman is surfing"
video_frames = pipe(prompt).frames[0]
video_path = export_to_video(video_frames)
video_path
```
Diffusers supports different optimization techniques to improve the latency
and memory footprint of a pipeline. Since videos are often more memory-heavy than images,
we can enable CPU offloading and VAE slicing to keep the memory footprint at bay.
Let's generate a video of 8 seconds (64 frames) on the same GPU using CPU offloading and VAE slicing:
```python
import torch
from diffusers import DiffusionPipeline
from diffusers.utils import export_to_video
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
pipe.enable_model_cpu_offload()
# memory optimization
pipe.enable_vae_slicing()
prompt = "Darth Vader surfing a wave"
video_frames = pipe(prompt, num_frames=64).frames[0]
video_path = export_to_video(video_frames)
video_path
```
It just takes **7 GBs of GPU memory** to generate the 64 video frames using PyTorch 2.0, "fp16" precision and the techniques mentioned above.
We can also use a different scheduler easily, using the same method we'd use for Stable Diffusion:
```python
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
prompt = "Spiderman is surfing"
video_frames = pipe(prompt, num_inference_steps=25).frames[0]
video_path = export_to_video(video_frames)
video_path
```
Here are some sample outputs:
<table>
<tr>
<td><center>
An astronaut riding a horse.
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astr.gif"
alt="An astronaut riding a horse."
style="width: 300px;" />
</center></td>
<td ><center>
Darth vader surfing in waves.
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/vader.gif"
alt="Darth vader surfing in waves."
style="width: 300px;" />
</center></td>
</tr>
</table>
### `cerspense/zeroscope_v2_576w` & `cerspense/zeroscope_v2_XL`
Zeroscope are watermark-free model and have been trained on specific sizes such as `576x320` and `1024x576`.
One should first generate a video using the lower resolution checkpoint [`cerspense/zeroscope_v2_576w`](https://huggingface.co/cerspense/zeroscope_v2_576w) with [`TextToVideoSDPipeline`],
which can then be upscaled using [`VideoToVideoSDPipeline`] and [`cerspense/zeroscope_v2_XL`](https://huggingface.co/cerspense/zeroscope_v2_XL).
```py
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video
from PIL import Image
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
# memory optimization
pipe.unet.enable_forward_chunking(chunk_size=1, dim=1)
pipe.enable_vae_slicing()
prompt = "Darth Vader surfing a wave"
video_frames = pipe(prompt, num_frames=24).frames[0]
video_path = export_to_video(video_frames)
video_path
```
Now the video can be upscaled:
```py
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
# memory optimization
pipe.unet.enable_forward_chunking(chunk_size=1, dim=1)
pipe.enable_vae_slicing()
video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames]
video_frames = pipe(prompt, video=video, strength=0.6).frames[0]
video_path = export_to_video(video_frames)
video_path
```
Here are some sample outputs:
<table>
<tr>
<td ><center>
Darth vader surfing in waves.
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/darthvader_cerpense.gif"
alt="Darth vader surfing in waves."
style="width: 576px;" />
</center></td>
</tr>
</table>
## Tips
Video generation is memory-intensive and one way to reduce your memory usage is to set `enable_forward_chunking` on the pipeline's UNet so you don't run the entire feedforward layer at once. Breaking it up into chunks in a loop is more efficient.
Check out the [Text or image-to-video](text-img2vid) guide for more details about how certain parameters can affect video generation and how to optimize inference by reducing memory usage.
<Tip>
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
</Tip>
## TextToVideoSDPipeline
[[autodoc]] TextToVideoSDPipeline
- all
- __call__
## VideoToVideoSDPipeline
[[autodoc]] VideoToVideoSDPipeline
- all
- __call__
## TextToVideoSDPipelineOutput
[[autodoc]] pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput
| diffusers/docs/source/en/api/pipelines/text_to_video.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/text_to_video.md",
"repo_id": "diffusers",
"token_count": 2721
} | 113 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# DPMSolverMultistepInverse
`DPMSolverMultistepInverse` is the inverted scheduler from [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.
The implementation is mostly based on the DDIM inversion definition of [Null-text Inversion for Editing Real Images using Guided Diffusion Models](https://huggingface.co/papers/2211.09794) and notebook implementation of the [`DiffEdit`] latent inversion from [Xiang-cd/DiffEdit-stable-diffusion](https://github.com/Xiang-cd/DiffEdit-stable-diffusion/blob/main/diffedit.ipynb).
## Tips
Dynamic thresholding from [Imagen](https://huggingface.co/papers/2205.11487) is supported, and for pixel-space
diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic
thresholding. This thresholding method is unsuitable for latent-space diffusion models such as
Stable Diffusion.
## DPMSolverMultistepInverseScheduler
[[autodoc]] DPMSolverMultistepInverseScheduler
## SchedulerOutput
[[autodoc]] schedulers.scheduling_utils.SchedulerOutput
| diffusers/docs/source/en/api/schedulers/multistep_dpm_solver_inverse.md/0 | {
"file_path": "diffusers/docs/source/en/api/schedulers/multistep_dpm_solver_inverse.md",
"repo_id": "diffusers",
"token_count": 547
} | 114 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Evaluating Diffusion Models
<a target="_blank" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/evaluation.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
> [!TIP]
> This document has now grown outdated given the emergence of existing evaluation frameworks for diffusion models for image generation. Please check
> out works like [HEIM](https://crfm.stanford.edu/helm/heim/latest/), [T2I-Compbench](https://huggingface.co/papers/2307.06350),
> [GenEval](https://huggingface.co/papers/2310.11513).
Evaluation of generative models like [Stable Diffusion](https://huggingface.co/docs/diffusers/stable_diffusion) is subjective in nature. But as practitioners and researchers, we often have to make careful choices amongst many different possibilities. So, when working with different generative models (like GANs, Diffusion, etc.), how do we choose one over the other?
Qualitative evaluation of such models can be error-prone and might incorrectly influence a decision.
However, quantitative metrics don't necessarily correspond to image quality. So, usually, a combination
of both qualitative and quantitative evaluations provides a stronger signal when choosing one model
over the other.
In this document, we provide a non-exhaustive overview of qualitative and quantitative methods to evaluate Diffusion models. For quantitative methods, we specifically focus on how to implement them alongside `diffusers`.
The methods shown in this document can also be used to evaluate different [noise schedulers](https://huggingface.co/docs/diffusers/main/en/api/schedulers/overview) keeping the underlying generation model fixed.
## Scenarios
We cover Diffusion models with the following pipelines:
- Text-guided image generation (such as the [`StableDiffusionPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2img)).
- Text-guided image generation, additionally conditioned on an input image (such as the [`StableDiffusionImg2ImgPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/img2img) and [`StableDiffusionInstructPix2PixPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/pix2pix)).
- Class-conditioned image generation models (such as the [`DiTPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/dit)).
## Qualitative Evaluation
Qualitative evaluation typically involves human assessment of generated images. Quality is measured across aspects such as compositionality, image-text alignment, and spatial relations. Common prompts provide a degree of uniformity for subjective metrics.
DrawBench and PartiPrompts are prompt datasets used for qualitative benchmarking. DrawBench and PartiPrompts were introduced by [Imagen](https://imagen.research.google/) and [Parti](https://parti.research.google/) respectively.
From the [official Parti website](https://parti.research.google/):
> PartiPrompts (P2) is a rich set of over 1600 prompts in English that we release as part of this work. P2 can be used to measure model capabilities across various categories and challenge aspects.

PartiPrompts has the following columns:
- Prompt
- Category of the prompt (such as โAbstractโ, โWorld Knowledgeโ, etc.)
- Challenge reflecting the difficulty (such as โBasicโ, โComplexโ, โWriting & Symbolsโ, etc.)
These benchmarks allow for side-by-side human evaluation of different image generation models.
For this, the ๐งจ Diffusers team has built **Open Parti Prompts**, which is a community-driven qualitative benchmark based on Parti Prompts to compare state-of-the-art open-source diffusion models:
- [Open Parti Prompts Game](https://huggingface.co/spaces/OpenGenAI/open-parti-prompts): For 10 parti prompts, 4 generated images are shown and the user selects the image that suits the prompt best.
- [Open Parti Prompts Leaderboard](https://huggingface.co/spaces/OpenGenAI/parti-prompts-leaderboard): The leaderboard comparing the currently best open-sourced diffusion models to each other.
To manually compare images, letโs see how we can use `diffusers` on a couple of PartiPrompts.
Below we show some prompts sampled across different challenges: Basic, Complex, Linguistic Structures, Imagination, and Writing & Symbols. Here we are using PartiPrompts as a [dataset](https://huggingface.co/datasets/nateraw/parti-prompts).
```python
from datasets import load_dataset
# prompts = load_dataset("nateraw/parti-prompts", split="train")
# prompts = prompts.shuffle()
# sample_prompts = [prompts[i]["Prompt"] for i in range(5)]
# Fixing these sample prompts in the interest of reproducibility.
sample_prompts = [
"a corgi",
"a hot air balloon with a yin-yang symbol, with the moon visible in the daytime sky",
"a car with no windows",
"a cube made of porcupine",
'The saying "BE EXCELLENT TO EACH OTHER" written on a red brick wall with a graffiti image of a green alien wearing a tuxedo. A yellow fire hydrant is on a sidewalk in the foreground.',
]
```
Now we can use these prompts to generate some images using Stable Diffusion ([v1-4 checkpoint](https://huggingface.co/CompVis/stable-diffusion-v1-4)):
```python
import torch
seed = 0
generator = torch.manual_seed(seed)
images = sd_pipeline(sample_prompts, num_images_per_prompt=1, generator=generator).images
```

We can also set `num_images_per_prompt` accordingly to compare different images for the same prompt. Running the same pipeline but with a different checkpoint ([v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)), yields:

Once several images are generated from all the prompts using multiple models (under evaluation), these results are presented to human evaluators for scoring. For
more details on the DrawBench and PartiPrompts benchmarks, refer to their respective papers.
<Tip>
It is useful to look at some inference samples while a model is training to measure the
training progress. In our [training scripts](https://github.com/huggingface/diffusers/tree/main/examples/), we support this utility with additional support for
logging to TensorBoard and Weights & Biases.
</Tip>
## Quantitative Evaluation
In this section, we will walk you through how to evaluate three different diffusion pipelines using:
- CLIP score
- CLIP directional similarity
- FID
### Text-guided image generation
[CLIP score](https://huggingface.co/papers/2104.08718) measures the compatibility of image-caption pairs. Higher CLIP scores imply higher compatibility ๐ผ. The CLIP score is a quantitative measurement of the qualitative concept "compatibility". Image-caption pair compatibility can also be thought of as the semantic similarity between the image and the caption. CLIP score was found to have high correlation with human judgement.
Let's first load a [`StableDiffusionPipeline`]:
```python
from diffusers import StableDiffusionPipeline
import torch
model_ckpt = "CompVis/stable-diffusion-v1-4"
sd_pipeline = StableDiffusionPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16).to("cuda")
```
Generate some images with multiple prompts:
```python
prompts = [
"a photo of an astronaut riding a horse on mars",
"A high tech solarpunk utopia in the Amazon rainforest",
"A pikachu fine dining with a view to the Eiffel Tower",
"A mecha robot in a favela in expressionist style",
"an insect robot preparing a delicious meal",
"A small cabin on top of a snowy mountain in the style of Disney, artstation",
]
images = sd_pipeline(prompts, num_images_per_prompt=1, output_type="np").images
print(images.shape)
# (6, 512, 512, 3)
```
And then, we calculate the CLIP score.
```python
from torchmetrics.functional.multimodal import clip_score
from functools import partial
clip_score_fn = partial(clip_score, model_name_or_path="openai/clip-vit-base-patch16")
def calculate_clip_score(images, prompts):
images_int = (images * 255).astype("uint8")
clip_score = clip_score_fn(torch.from_numpy(images_int).permute(0, 3, 1, 2), prompts).detach()
return round(float(clip_score), 4)
sd_clip_score = calculate_clip_score(images, prompts)
print(f"CLIP score: {sd_clip_score}")
# CLIP score: 35.7038
```
In the above example, we generated one image per prompt. If we generated multiple images per prompt, we would have to take the average score from the generated images per prompt.
Now, if we wanted to compare two checkpoints compatible with the [`StableDiffusionPipeline`] we should pass a generator while calling the pipeline. First, we generate images with a
fixed seed with the [v1-4 Stable Diffusion checkpoint](https://huggingface.co/CompVis/stable-diffusion-v1-4):
```python
seed = 0
generator = torch.manual_seed(seed)
images = sd_pipeline(prompts, num_images_per_prompt=1, generator=generator, output_type="np").images
```
Then we load the [v1-5 checkpoint](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) to generate images:
```python
model_ckpt_1_5 = "stable-diffusion-v1-5/stable-diffusion-v1-5"
sd_pipeline_1_5 = StableDiffusionPipeline.from_pretrained(model_ckpt_1_5, torch_dtype=torch.float16).to("cuda")
images_1_5 = sd_pipeline_1_5(prompts, num_images_per_prompt=1, generator=generator, output_type="np").images
```
And finally, we compare their CLIP scores:
```python
sd_clip_score_1_4 = calculate_clip_score(images, prompts)
print(f"CLIP Score with v-1-4: {sd_clip_score_1_4}")
# CLIP Score with v-1-4: 34.9102
sd_clip_score_1_5 = calculate_clip_score(images_1_5, prompts)
print(f"CLIP Score with v-1-5: {sd_clip_score_1_5}")
# CLIP Score with v-1-5: 36.2137
```
It seems like the [v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) checkpoint performs better than its predecessor. Note, however, that the number of prompts we used to compute the CLIP scores is quite low. For a more practical evaluation, this number should be way higher, and the prompts should be diverse.
<Tip warning={true}>
By construction, there are some limitations in this score. The captions in the training dataset
were crawled from the web and extracted from `alt` and similar tags associated an image on the internet.
They are not necessarily representative of what a human being would use to describe an image. Hence we
had to "engineer" some prompts here.
</Tip>
### Image-conditioned text-to-image generation
In this case, we condition the generation pipeline with an input image as well as a text prompt. Let's take the [`StableDiffusionInstructPix2PixPipeline`], as an example. It takes an edit instruction as an input prompt and an input image to be edited.
Here is one example:

One strategy to evaluate such a model is to measure the consistency of the change between the two images (in [CLIP](https://huggingface.co/docs/transformers/model_doc/clip) space) with the change between the two image captions (as shown in [CLIP-Guided Domain Adaptation of Image Generators](https://huggingface.co/papers/2108.00946)). This is referred to as the "**CLIP directional similarity**".
- Caption 1 corresponds to the input image (image 1) that is to be edited.
- Caption 2 corresponds to the edited image (image 2). It should reflect the edit instruction.
Following is a pictorial overview:

We have prepared a mini dataset to implement this metric. Let's first load the dataset.
```python
from datasets import load_dataset
dataset = load_dataset("sayakpaul/instructpix2pix-demo", split="train")
dataset.features
```
```bash
{'input': Value(dtype='string', id=None),
'edit': Value(dtype='string', id=None),
'output': Value(dtype='string', id=None),
'image': Image(decode=True, id=None)}
```
Here we have:
- `input` is a caption corresponding to the `image`.
- `edit` denotes the edit instruction.
- `output` denotes the modified caption reflecting the `edit` instruction.
Let's take a look at a sample.
```python
idx = 0
print(f"Original caption: {dataset[idx]['input']}")
print(f"Edit instruction: {dataset[idx]['edit']}")
print(f"Modified caption: {dataset[idx]['output']}")
```
```bash
Original caption: 2. FAROE ISLANDS: An archipelago of 18 mountainous isles in the North Atlantic Ocean between Norway and Iceland, the Faroe Islands has 'everything you could hope for', according to Big 7 Travel. It boasts 'crystal clear waterfalls, rocky cliffs that seem to jut out of nowhere and velvety green hills'
Edit instruction: make the isles all white marble
Modified caption: 2. WHITE MARBLE ISLANDS: An archipelago of 18 mountainous white marble isles in the North Atlantic Ocean between Norway and Iceland, the White Marble Islands has 'everything you could hope for', according to Big 7 Travel. It boasts 'crystal clear waterfalls, rocky cliffs that seem to jut out of nowhere and velvety green hills'
```
And here is the image:
```python
dataset[idx]["image"]
```

We will first edit the images of our dataset with the edit instruction and compute the directional similarity.
Let's first load the [`StableDiffusionInstructPix2PixPipeline`]:
```python
from diffusers import StableDiffusionInstructPix2PixPipeline
instruct_pix2pix_pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix", torch_dtype=torch.float16
).to("cuda")
```
Now, we perform the edits:
```python
import numpy as np
def edit_image(input_image, instruction):
image = instruct_pix2pix_pipeline(
instruction,
image=input_image,
output_type="np",
generator=generator,
).images[0]
return image
input_images = []
original_captions = []
modified_captions = []
edited_images = []
for idx in range(len(dataset)):
input_image = dataset[idx]["image"]
edit_instruction = dataset[idx]["edit"]
edited_image = edit_image(input_image, edit_instruction)
input_images.append(np.array(input_image))
original_captions.append(dataset[idx]["input"])
modified_captions.append(dataset[idx]["output"])
edited_images.append(edited_image)
```
To measure the directional similarity, we first load CLIP's image and text encoders:
```python
from transformers import (
CLIPTokenizer,
CLIPTextModelWithProjection,
CLIPVisionModelWithProjection,
CLIPImageProcessor,
)
clip_id = "openai/clip-vit-large-patch14"
tokenizer = CLIPTokenizer.from_pretrained(clip_id)
text_encoder = CLIPTextModelWithProjection.from_pretrained(clip_id).to("cuda")
image_processor = CLIPImageProcessor.from_pretrained(clip_id)
image_encoder = CLIPVisionModelWithProjection.from_pretrained(clip_id).to("cuda")
```
Notice that we are using a particular CLIP checkpoint, i.e.,ย `openai/clip-vit-large-patch14`. This is because the Stable Diffusion pre-training was performed with this CLIP variant. For more details, refer to theย [documentation](https://huggingface.co/docs/transformers/model_doc/clip).
Next, we prepare a PyTorchย `nn.Module`ย to compute directional similarity:
```python
import torch.nn as nn
import torch.nn.functional as F
class DirectionalSimilarity(nn.Module):
def __init__(self, tokenizer, text_encoder, image_processor, image_encoder):
super().__init__()
self.tokenizer = tokenizer
self.text_encoder = text_encoder
self.image_processor = image_processor
self.image_encoder = image_encoder
def preprocess_image(self, image):
image = self.image_processor(image, return_tensors="pt")["pixel_values"]
return {"pixel_values": image.to("cuda")}
def tokenize_text(self, text):
inputs = self.tokenizer(
text,
max_length=self.tokenizer.model_max_length,
padding="max_length",
truncation=True,
return_tensors="pt",
)
return {"input_ids": inputs.input_ids.to("cuda")}
def encode_image(self, image):
preprocessed_image = self.preprocess_image(image)
image_features = self.image_encoder(**preprocessed_image).image_embeds
image_features = image_features / image_features.norm(dim=1, keepdim=True)
return image_features
def encode_text(self, text):
tokenized_text = self.tokenize_text(text)
text_features = self.text_encoder(**tokenized_text).text_embeds
text_features = text_features / text_features.norm(dim=1, keepdim=True)
return text_features
def compute_directional_similarity(self, img_feat_one, img_feat_two, text_feat_one, text_feat_two):
sim_direction = F.cosine_similarity(img_feat_two - img_feat_one, text_feat_two - text_feat_one)
return sim_direction
def forward(self, image_one, image_two, caption_one, caption_two):
img_feat_one = self.encode_image(image_one)
img_feat_two = self.encode_image(image_two)
text_feat_one = self.encode_text(caption_one)
text_feat_two = self.encode_text(caption_two)
directional_similarity = self.compute_directional_similarity(
img_feat_one, img_feat_two, text_feat_one, text_feat_two
)
return directional_similarity
```
Let's putย `DirectionalSimilarity`ย to use now.
```python
dir_similarity = DirectionalSimilarity(tokenizer, text_encoder, image_processor, image_encoder)
scores = []
for i in range(len(input_images)):
original_image = input_images[i]
original_caption = original_captions[i]
edited_image = edited_images[i]
modified_caption = modified_captions[i]
similarity_score = dir_similarity(original_image, edited_image, original_caption, modified_caption)
scores.append(float(similarity_score.detach().cpu()))
print(f"CLIP directional similarity: {np.mean(scores)}")
# CLIP directional similarity: 0.0797976553440094
```
Like the CLIP Score, the higher the CLIP directional similarity, the better it is.
It should be noted that theย `StableDiffusionInstructPix2PixPipeline`ย exposes two arguments, namely,ย `image_guidance_scale`ย andย `guidance_scale`ย that let you control the quality of the final edited image. We encourage you to experiment with these two arguments and see the impact of that on the directional similarity.
We can extend the idea of this metric to measure how similar the original image and edited version are. To do that, we can just doย `F.cosine_similarity(img_feat_two, img_feat_one)`. For these kinds of edits, we would still want the primary semantics of the images to be preserved as much as possible, i.e., a high similarity score.
We can use these metrics for similar pipelines such as the [`StableDiffusionPix2PixZeroPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/pix2pix_zero#diffusers.StableDiffusionPix2PixZeroPipeline).
<Tip>
Both CLIP score and CLIP direction similarity rely on the CLIP model, which can make the evaluations biased.
</Tip>
***Extending metrics like IS, FID (discussed later), or KID can be difficult*** when the model under evaluation was pre-trained on a large image-captioning dataset (such as the [LAION-5B dataset](https://laion.ai/blog/laion-5b/)). This is because underlying these metrics is an InceptionNet (pre-trained on the ImageNet-1k dataset) used for extracting intermediate image features. The pre-training dataset of Stable Diffusion may have limited overlap with the pre-training dataset of InceptionNet, so it is not a good candidate here for feature extraction.
***Using the above metrics helps evaluate models that are class-conditioned. For example, [DiT](https://huggingface.co/docs/diffusers/main/en/api/pipelines/dit). It was pre-trained being conditioned on the ImageNet-1k classes.***
### Class-conditioned image generation
Class-conditioned generative models are usually pre-trained on a class-labeled dataset such as [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k). Popular metrics for evaluating these models include Frรฉchet Inception Distance (FID), Kernel Inception Distance (KID), and Inception Score (IS). In this document, we focus on FID ([Heusel et al.](https://huggingface.co/papers/1706.08500)). We show how to compute it with the [`DiTPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/dit), which uses the [DiT model](https://huggingface.co/papers/2212.09748) under the hood.
FID aims to measure how similar are two datasets of images. As per [this resource](https://mmgeneration.readthedocs.io/en/latest/quick_run.html#fid):
> Frรฉchet Inception Distance is a measure of similarity between two datasets of images. It was shown to correlate well with the human judgment of visual quality and is most often used to evaluate the quality of samples of Generative Adversarial Networks. FID is calculated by computing the Frรฉchet distance between two Gaussians fitted to feature representations of the Inception network.
These two datasets are essentially the dataset of real images and the dataset of fake images (generated images in our case). FID is usually calculated with two large datasets. However, for this document, we will work with two mini datasets.
Let's first download a few images from the ImageNet-1k training set:
```python
from zipfile import ZipFile
import requests
def download(url, local_filepath):
r = requests.get(url)
with open(local_filepath, "wb") as f:
f.write(r.content)
return local_filepath
dummy_dataset_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/sample-imagenet-images.zip"
local_filepath = download(dummy_dataset_url, dummy_dataset_url.split("/")[-1])
with ZipFile(local_filepath, "r") as zipper:
zipper.extractall(".")
```
```python
from PIL import Image
import os
import numpy as np
dataset_path = "sample-imagenet-images"
image_paths = sorted([os.path.join(dataset_path, x) for x in os.listdir(dataset_path)])
real_images = [np.array(Image.open(path).convert("RGB")) for path in image_paths]
```
These are 10 images from the following ImageNet-1k classes: "cassette_player", "chain_saw" (x2), "church", "gas_pump" (x3), "parachute" (x2), and "tench".
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/real-images.png" alt="real-images"><br>
<em>Real images.</em>
</p>
Now that the images are loaded, let's apply some lightweight pre-processing on them to use them for FID calculation.
```python
from torchvision.transforms import functional as F
import torch
def preprocess_image(image):
image = torch.tensor(image).unsqueeze(0)
image = image.permute(0, 3, 1, 2) / 255.0
return F.center_crop(image, (256, 256))
real_images = torch.cat([preprocess_image(image) for image in real_images])
print(real_images.shape)
# torch.Size([10, 3, 256, 256])
```
We now load theย [`DiTPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/dit) to generate images conditioned on the above-mentioned classes.
```python
from diffusers import DiTPipeline, DPMSolverMultistepScheduler
dit_pipeline = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256", torch_dtype=torch.float16)
dit_pipeline.scheduler = DPMSolverMultistepScheduler.from_config(dit_pipeline.scheduler.config)
dit_pipeline = dit_pipeline.to("cuda")
seed = 0
generator = torch.manual_seed(seed)
words = [
"cassette player",
"chainsaw",
"chainsaw",
"church",
"gas pump",
"gas pump",
"gas pump",
"parachute",
"parachute",
"tench",
]
class_ids = dit_pipeline.get_label_ids(words)
output = dit_pipeline(class_labels=class_ids, generator=generator, output_type="np")
fake_images = output.images
fake_images = torch.tensor(fake_images)
fake_images = fake_images.permute(0, 3, 1, 2)
print(fake_images.shape)
# torch.Size([10, 3, 256, 256])
```
Now, we can compute the FID usingย [`torchmetrics`](https://torchmetrics.readthedocs.io/).
```python
from torchmetrics.image.fid import FrechetInceptionDistance
fid = FrechetInceptionDistance(normalize=True)
fid.update(real_images, real=True)
fid.update(fake_images, real=False)
print(f"FID: {float(fid.compute())}")
# FID: 177.7147216796875
```
The lower the FID, the better it is. Several things can influence FID here:
- Number of images (both real and fake)
- Randomness induced in the diffusion process
- Number of inference steps in the diffusion process
- The scheduler being used in the diffusion process
For the last two points, it is, therefore, a good practice to run the evaluation across different seeds and inference steps, and then report an average result.
<Tip warning={true}>
FID results tend to be fragile as they depend on a lot of factors:
* The specific Inception model used during computation.
* The implementation accuracy of the computation.
* The image format (not the same if we start from PNGs vs JPGs).
Keeping that in mind, FID is often most useful when comparing similar runs, but it is
hard to reproduce paper results unless the authors carefully disclose the FID
measurement code.
These points apply to other related metrics too, such as KID and IS.
</Tip>
As a final step, let's visually inspect theย `fake_images`.
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/fake-images.png" alt="fake-images"><br>
<em>Fake images.</em>
</p>
| diffusers/docs/source/en/conceptual/evaluation.md/0 | {
"file_path": "diffusers/docs/source/en/conceptual/evaluation.md",
"repo_id": "diffusers",
"token_count": 8470
} | 115 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Overview
> [!WARNING]
> Modular Diffusers is under active development and it's API may change.
Modular Diffusers is a unified pipeline system that simplifies your workflow with *pipeline blocks*.
- Blocks are reusable and you only need to create new blocks that are unique to your pipeline.
- Blocks can be mixed and matched to adapt to or create a pipeline for a specific workflow or multiple workflows.
The Modular Diffusers docs are organized as shown below.
## Quickstart
- A [quickstart](./quickstart) demonstrating how to implement an example workflow with Modular Diffusers.
## ModularPipelineBlocks
- [States](./modular_diffusers_states) explains how data is shared and communicated between blocks and [`ModularPipeline`].
- [ModularPipelineBlocks](./pipeline_block) is the most basic unit of a [`ModularPipeline`] and this guide shows you how to create one.
- [SequentialPipelineBlocks](./sequential_pipeline_blocks) is a type of block that chains multiple blocks so they run one after another, passing data along the chain. This guide shows you how to create [`~modular_pipelines.SequentialPipelineBlocks`] and how they connect and work together.
- [LoopSequentialPipelineBlocks](./loop_sequential_pipeline_blocks) is a type of block that runs a series of blocks in a loop. This guide shows you how to create [`~modular_pipelines.LoopSequentialPipelineBlocks`].
- [AutoPipelineBlocks](./auto_pipeline_blocks) is a type of block that automatically chooses which blocks to run based on the input. This guide shows you how to create [`~modular_pipelines.AutoPipelineBlocks`].
## ModularPipeline
- [ModularPipeline](./modular_pipeline) shows you how to create and convert pipeline blocks into an executable [`ModularPipeline`].
- [ComponentsManager](./components_manager) shows you how to manage and reuse components across multiple pipelines.
- [Guiders](./guiders) shows you how to use different guidance methods in the pipeline. | diffusers/docs/source/en/modular_diffusers/overview.md/0 | {
"file_path": "diffusers/docs/source/en/modular_diffusers/overview.md",
"repo_id": "diffusers",
"token_count": 658
} | 116 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Compile and offloading quantized models
Optimizing models often involves trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it also increases memory consumption since it needs to store the outputs of intermediate attention layers. A more balanced optimization strategy combines quantizing a model, [torch.compile](./fp16#torchcompile) and various [offloading methods](./memory#offloading).
> [!TIP]
> Check the [torch.compile](./fp16#torchcompile) guide to learn more about compilation and how they can be applied here. For example, regional compilation can significantly reduce compilation time without giving up any speedups.
For image generation, combining quantization and [model offloading](./memory#model-offloading) can often give the best trade-off between quality, speed, and memory. Group offloading is not as effective for image generation because it is usually not possible to *fully* overlap data transfer if the compute kernel finishes faster. This results in some communication overhead between the CPU and GPU.
For video generation, combining quantization and [group-offloading](./memory#group-offloading) tends to be better because video models are more compute-bound.
The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage for Flux.
| combination | latency (s) | memory-usage (GB) |
|---|---|---|
| quantization | 32.602 | 14.9453 |
| quantization, torch.compile | 25.847 | 14.9448 |
| quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 |
<small>These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the [benchmarking script](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d) if you're interested in evaluating your own model.</small>
This guide will show you how to compile and offload a quantized model with [bitsandbytes](../quantization/bitsandbytes#torchcompile). Make sure you are using [PyTorch nightly](https://pytorch.org/get-started/locally/) and the latest version of bitsandbytes.
```bash
pip install -U bitsandbytes
```
## Quantization and torch.compile
Start by [quantizing](../quantization/overview) a model to reduce the memory required for storage and [compiling](./fp16#torchcompile) it to accelerate inference.
Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `capture_dynamic_output_shape_ops = True` to handle dynamic outputs when compiling bitsandbytes models.
```py
import torch
from diffusers import DiffusionPipeline
from diffusers.quantizers import PipelineQuantizationConfig
torch._dynamo.config.capture_dynamic_output_shape_ops = True
# quantize
pipeline_quant_config = PipelineQuantizationConfig(
quant_backend="bitsandbytes_4bit",
quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16},
components_to_quantize=["transformer", "text_encoder_2"],
)
pipeline = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
quantization_config=pipeline_quant_config,
torch_dtype=torch.bfloat16,
).to("cuda")
# compile
pipeline.transformer.to(memory_format=torch.channels_last)
pipeline.transformer.compile(mode="max-autotune", fullgraph=True)
pipeline("""
cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California
highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain
"""
).images[0]
```
## Quantization, torch.compile, and offloading
In addition to quantization and torch.compile, try offloading if you need to reduce memory-usage further. Offloading moves various layers or model components from the CPU to the GPU as needed for computations.
Configure the [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `cache_size_limit` during offloading to avoid excessive recompilation and set `capture_dynamic_output_shape_ops = True` to handle dynamic outputs when compiling bitsandbytes models.
<hfoptions id="offloading">
<hfoption id="model CPU offloading">
[Model CPU offloading](./memory#model-offloading) moves an individual pipeline component, like the transformer model, to the GPU when it is needed for computation. Otherwise, it is offloaded to the CPU.
```py
import torch
from diffusers import DiffusionPipeline
from diffusers.quantizers import PipelineQuantizationConfig
torch._dynamo.config.cache_size_limit = 1000
torch._dynamo.config.capture_dynamic_output_shape_ops = True
# quantize
pipeline_quant_config = PipelineQuantizationConfig(
quant_backend="bitsandbytes_4bit",
quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16},
components_to_quantize=["transformer", "text_encoder_2"],
)
pipeline = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
quantization_config=pipeline_quant_config,
torch_dtype=torch.bfloat16,
).to("cuda")
# model CPU offloading
pipeline.enable_model_cpu_offload()
# compile
pipeline.transformer.compile()
pipeline(
"cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain"
).images[0]
```
</hfoption>
<hfoption id="group offloading">
[Group offloading](./memory#group-offloading) moves the internal layers of an individual pipeline component, like the transformer model, to the GPU for computation and offloads it when it's not required. At the same time, it uses the [CUDA stream](./memory#cuda-stream) feature to prefetch the next layer for execution.
By overlapping computation and data transfer, it is faster than model CPU offloading while also saving memory.
```py
# pip install ftfy
import torch
from diffusers import AutoModel, DiffusionPipeline
from diffusers.hooks import apply_group_offloading
from diffusers.utils import export_to_video
from diffusers.quantizers import PipelineQuantizationConfig
from transformers import UMT5EncoderModel
torch._dynamo.config.cache_size_limit = 1000
torch._dynamo.config.capture_dynamic_output_shape_ops = True
# quantize
pipeline_quant_config = PipelineQuantizationConfig(
quant_backend="bitsandbytes_4bit",
quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16},
components_to_quantize=["transformer", "text_encoder"],
)
text_encoder = UMT5EncoderModel.from_pretrained(
"Wan-AI/Wan2.1-T2V-14B-Diffusers", subfolder="text_encoder", torch_dtype=torch.bfloat16
)
pipeline = DiffusionPipeline.from_pretrained(
"Wan-AI/Wan2.1-T2V-14B-Diffusers",
quantization_config=pipeline_quant_config,
torch_dtype=torch.bfloat16,
).to("cuda")
# group offloading
onload_device = torch.device("cuda")
offload_device = torch.device("cpu")
pipeline.transformer.enable_group_offload(
onload_device=onload_device,
offload_device=offload_device,
offload_type="leaf_level",
use_stream=True,
non_blocking=True
)
pipeline.vae.enable_group_offload(
onload_device=onload_device,
offload_device=offload_device,
offload_type="leaf_level",
use_stream=True,
non_blocking=True
)
apply_group_offloading(
pipeline.text_encoder,
onload_device=onload_device,
offload_type="leaf_level",
use_stream=True,
non_blocking=True
)
# compile
pipeline.transformer.compile()
prompt = """
The camera rushes from far to near in a low-angle shot,
revealing a white ferret on a log. It plays, leaps into the water, and emerges, as the camera zooms in
for a close-up. Water splashes berry bushes nearby, while moss, snow, and leaves blanket the ground.
Birch trees and a light blue sky frame the scene, with ferns in the foreground. Side lighting casts dynamic
shadows and warm highlights. Medium composition, front view, low angle, with depth of field.
"""
negative_prompt = """
Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality,
low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured,
misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards
"""
output = pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
num_frames=81,
guidance_scale=5.0,
).frames[0]
export_to_video(output, "output.mp4", fps=16)
```
</hfoption>
</hfoptions> | diffusers/docs/source/en/optimization/speed-memory-optims.md/0 | {
"file_path": "diffusers/docs/source/en/optimization/speed-memory-optims.md",
"repo_id": "diffusers",
"token_count": 2839
} | 117 |
<!--Copyright 2025 Custom Diffusion authors The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Custom Diffusion
[Custom Diffusion](https://huggingface.co/papers/2212.04488) is a training technique for personalizing image generation models. Like Textual Inversion, DreamBooth, and LoRA, Custom Diffusion only requires a few (~4-5) example images. This technique works by only training weights in the cross-attention layers, and it uses a special word to represent the newly learned concept. Custom Diffusion is unique because it can also learn multiple concepts at the same time.
If you're training on a GPU with limited vRAM, you should try enabling xFormers with `--enable_xformers_memory_efficient_attention` for faster training with lower vRAM requirements (16GB). To save even more memory, add `--set_grads_to_none` in the training argument to set the gradients to `None` instead of zero (this option can cause some issues, so if you experience any, try removing this parameter).
This guide will explore the [train_custom_diffusion.py](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) script to help you become more familiar with it, and how you can adapt it for your own use-case.
Before running the script, make sure you install the library from source:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Navigate to the example folder with the training script and install the required dependencies:
```bash
cd examples/custom_diffusion
pip install -r requirements.txt
pip install clip-retrieval
```
<Tip>
๐ค Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the ๐ค Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more.
</Tip>
Initialize an ๐ค Accelerate environment:
```bash
accelerate config
```
To setup a default ๐ค Accelerate environment without choosing any configurations:
```bash
accelerate config default
```
Or if your environment doesn't support an interactive shell, like a notebook, you can use:
```py
from accelerate.utils import write_basic_config
write_basic_config()
```
Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script.
<Tip>
The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) and let us know if you have any questions or concerns.
</Tip>
## Script parameters
The training script contains all the parameters to help you customize your training run. These are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L319) function. The function comes with default values, but you can also set your own values in the training command if you'd like.
For example, to change the resolution of the input image:
```bash
accelerate launch train_custom_diffusion.py \
--resolution=256
```
Many of the basic parameters are described in the [DreamBooth](dreambooth#script-parameters) training guide, so this guide focuses on the parameters unique to Custom Diffusion:
- `--freeze_model`: freezes the key and value parameters in the cross-attention layer; the default is `crossattn_kv`, but you can set it to `crossattn` to train all the parameters in the cross-attention layer
- `--concepts_list`: to learn multiple concepts, provide a path to a JSON file containing the concepts
- `--modifier_token`: a special word used to represent the learned concept
- `--initializer_token`: a special word used to initialize the embeddings of the `modifier_token`
### Prior preservation loss
Prior preservation loss is a method that uses a model's own generated samples to help it learn how to generate more diverse images. Because these generated sample images belong to the same class as the images you provided, they help the model retain what it has learned about the class and how it can use what it already knows about the class to make new compositions.
Many of the parameters for prior preservation loss are described in the [DreamBooth](dreambooth#prior-preservation-loss) training guide.
### Regularization
Custom Diffusion includes training the target images with a small set of real images to prevent overfitting. As you can imagine, this can be easy to do when you're only training on a few images! Download 200 real images with `clip_retrieval`. The `class_prompt` should be the same category as the target images. These images are stored in `class_data_dir`.
```bash
python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200
```
To enable regularization, add the following parameters:
- `--with_prior_preservation`: whether to use prior preservation loss
- `--prior_loss_weight`: controls the influence of the prior preservation loss on the model
- `--real_prior`: whether to use a small set of real images to prevent overfitting
```bash
accelerate launch train_custom_diffusion.py \
--with_prior_preservation \
--prior_loss_weight=1.0 \
--class_data_dir="./real_reg/samples_cat" \
--class_prompt="cat" \
--real_prior=True \
```
## Training script
<Tip>
A lot of the code in the Custom Diffusion training script is similar to the [DreamBooth](dreambooth#training-script) script. This guide instead focuses on the code that is relevant to Custom Diffusion.
</Tip>
The Custom Diffusion training script has two dataset classes:
- [`CustomDiffusionDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L165): preprocesses the images, class images, and prompts for training
- [`PromptDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L148): prepares the prompts for generating class images
Next, the `modifier_token` is [added to the tokenizer](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L811), converted to token ids, and the token embeddings are resized to account for the new `modifier_token`. Then the `modifier_token` embeddings are initialized with the embeddings of the `initializer_token`. All parameters in the text encoder are frozen, except for the token embeddings since this is what the model is trying to learn to associate with the concepts.
```py
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
```
Now you'll need to add the [Custom Diffusion weights](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L911C3-L911C3) to the attention layers. This is a really important step for getting the shape and size of the attention weights correct, and for setting the appropriate number of attention processors in each UNet block.
```py
st = unet.state_dict()
for name, _ in unet.attn_processors.items():
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
if name.startswith("mid_block"):
hidden_size = unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = unet.config.block_out_channels[block_id]
layer_name = name.split(".processor")[0]
weights = {
"to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"],
"to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"],
}
if train_q_out:
weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"]
weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"]
weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"]
if cross_attention_dim is not None:
custom_diffusion_attn_procs[name] = attention_class(
train_kv=train_kv,
train_q_out=train_q_out,
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
).to(unet.device)
custom_diffusion_attn_procs[name].load_state_dict(weights)
else:
custom_diffusion_attn_procs[name] = attention_class(
train_kv=False,
train_q_out=False,
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
)
del st
unet.set_attn_processor(custom_diffusion_attn_procs)
custom_diffusion_layers = AttnProcsLayers(unet.attn_processors)
```
The [optimizer](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L982) is initialized to update the cross-attention layer parameters:
```py
optimizer = optimizer_class(
itertools.chain(text_encoder.get_input_embeddings().parameters(), custom_diffusion_layers.parameters())
if args.modifier_token is not None
else custom_diffusion_layers.parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
```
In the [training loop](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L1048), it is important to only update the embeddings for the concept you're trying to learn. This means setting the gradients of all the other token embeddings to zero:
```py
if args.modifier_token is not None:
if accelerator.num_processes > 1:
grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad
else:
grads_text_encoder = text_encoder.get_input_embeddings().weight.grad
index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0]
for i in range(len(modifier_token_id[1:])):
index_grads_to_zero = index_grads_to_zero & (
torch.arange(len(tokenizer)) != modifier_token_id[i]
)
grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[
index_grads_to_zero, :
].fill_(0)
```
## Launch the script
Once youโve made all your changes or youโre okay with the default configuration, youโre ready to launch the training script! ๐
In this guide, you'll download and use these example [cat images](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip). You can also create and use your own dataset if you want (see the [Create a dataset for training](create_dataset) guide).
Set the environment variable `MODEL_NAME` to a model id on the Hub or a path to a local model, `INSTANCE_DIR` to the path where you just downloaded the cat images to, and `OUTPUT_DIR` to where you want to save the model. You'll use `<new1>` as the special word to tie the newly learned embeddings to. The script creates and saves model checkpoints and a pytorch_custom_diffusion_weights.bin file to your repository.
To monitor training progress with Weights and Biases, add the `--report_to=wandb` parameter to the training command and specify a validation prompt with `--validation_prompt`. This is useful for debugging and saving intermediate results.
<Tip>
If you're training on human faces, the Custom Diffusion team has found the following parameters to work well:
- `--learning_rate=5e-6`
- `--max_train_steps` can be anywhere between 1000 and 2000
- `--freeze_model=crossattn`
- use at least 15-20 images to train with
</Tip>
<hfoptions id="training-inference">
<hfoption id="single concept">
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export OUTPUT_DIR="path-to-save-model"
export INSTANCE_DIR="./data/cat"
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--class_data_dir=./real_reg/samples_cat/ \
--with_prior_preservation \
--real_prior \
--prior_loss_weight=1.0 \
--class_prompt="cat" \
--num_class_images=200 \
--instance_prompt="photo of a <new1> cat" \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=1e-5 \
--lr_warmup_steps=0 \
--max_train_steps=250 \
--scale_lr \
--hflip \
--modifier_token "<new1>" \
--validation_prompt="<new1> cat sitting in a bucket" \
--report_to="wandb" \
--push_to_hub
```
</hfoption>
<hfoption id="multiple concepts">
Custom Diffusion can also learn multiple concepts if you provide a [JSON](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with some details about each concept it should learn.
Run clip-retrieval to collect some real images to use for regularization:
```bash
pip install clip-retrieval
python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200
```
Then you can launch the script:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--output_dir=$OUTPUT_DIR \
--concepts_list=./concept_list.json \
--with_prior_preservation \
--real_prior \
--prior_loss_weight=1.0 \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=1e-5 \
--lr_warmup_steps=0 \
--max_train_steps=500 \
--num_class_images=200 \
--scale_lr \
--hflip \
--modifier_token "<new1>+<new2>" \
--push_to_hub
```
</hfoption>
</hfoptions>
Once training is finished, you can use your new Custom Diffusion model for inference.
<hfoptions id="training-inference">
<hfoption id="single concept">
```py
import torch
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16,
).to("cuda")
pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
pipeline.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin")
image = pipeline(
"<new1> cat sitting in a bucket",
num_inference_steps=100,
guidance_scale=6.0,
eta=1.0,
).images[0]
image.save("cat.png")
```
</hfoption>
<hfoption id="multiple concepts">
```py
import torch
from huggingface_hub.repocard import RepoCard
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16,
).to("cuda")
model_id = "sayakpaul/custom-diffusion-cat-wooden-pot"
pipeline.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
pipeline.load_textual_inversion(model_id, weight_name="<new1>.bin")
pipeline.load_textual_inversion(model_id, weight_name="<new2>.bin")
image = pipeline(
"the <new1> cat sculpture in the style of a <new2> wooden pot",
num_inference_steps=100,
guidance_scale=6.0,
eta=1.0,
).images[0]
image.save("multi-subject.png")
```
</hfoption>
</hfoptions>
## Next steps
Congratulations on training a model with Custom Diffusion! ๐ To learn more:
- Read the [Multi-Concept Customization of Text-to-Image Diffusion](https://www.cs.cmu.edu/~custom-diffusion/) blog post to learn more details about the experimental results from the Custom Diffusion team. | diffusers/docs/source/en/training/custom_diffusion.md/0 | {
"file_path": "diffusers/docs/source/en/training/custom_diffusion.md",
"repo_id": "diffusers",
"token_count": 5514
} | 118 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
[[open-in-colab]]
# Train a diffusion model
Unconditional image generation is a popular application of diffusion models that generates images that look like those in the dataset used for training. Typically, the best results are obtained from finetuning a pretrained model on a specific dataset. You can find many of these checkpoints on the [Hub](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model), but if you can't find one you like, you can always train your own!
This tutorial will teach you how to train a [`UNet2DModel`] from scratch on a subset of the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset to generate your own ๐ฆ butterflies ๐ฆ.
<Tip>
๐ก This training tutorial is based on the [Training with ๐งจ Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) notebook. For additional details and context about diffusion models like how they work, check out the notebook!
</Tip>
Before you begin, make sure you have ๐ค Datasets installed to load and preprocess image datasets, and ๐ค Accelerate, to simplify training on any number of GPUs. The following command will also install [TensorBoard](https://www.tensorflow.org/tensorboard) to visualize training metrics (you can also use [Weights & Biases](https://docs.wandb.ai/) to track your training).
```py
# uncomment to install the necessary libraries in Colab
#!pip install diffusers[training]
```
We encourage you to share your model with the community, and in order to do that, you'll need to login to your Hugging Face account (create one [here](https://hf.co/join) if you don't already have one!). You can login from a notebook and enter your token when prompted. Make sure your token has the write role.
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
Or login in from the terminal:
```bash
hf auth login
```
Since the model checkpoints are quite large, install [Git-LFS](https://git-lfs.com/) to version these large files:
```bash
!sudo apt -qq install git-lfs
!git config --global credential.helper store
```
## Training configuration
For convenience, create a `TrainingConfig` class containing the training hyperparameters (feel free to adjust them):
```py
>>> from dataclasses import dataclass
>>> @dataclass
... class TrainingConfig:
... image_size = 128 # the generated image resolution
... train_batch_size = 16
... eval_batch_size = 16 # how many images to sample during evaluation
... num_epochs = 50
... gradient_accumulation_steps = 1
... learning_rate = 1e-4
... lr_warmup_steps = 500
... save_image_epochs = 10
... save_model_epochs = 30
... mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision
... output_dir = "ddpm-butterflies-128" # the model name locally and on the HF Hub
... push_to_hub = True # whether to upload the saved model to the HF Hub
... hub_model_id = "<your-username>/<my-awesome-model>" # the name of the repository to create on the HF Hub
... hub_private_repo = None
... overwrite_output_dir = True # overwrite the old model when re-running the notebook
... seed = 0
>>> config = TrainingConfig()
```
## Load the dataset
You can easily load the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset with the ๐ค Datasets library:
```py
>>> from datasets import load_dataset
>>> config.dataset_name = "huggan/smithsonian_butterflies_subset"
>>> dataset = load_dataset(config.dataset_name, split="train")
```
<Tip>
๐ก You can find additional datasets from the [HugGan Community Event](https://huggingface.co/huggan) or you can use your own dataset by creating a local [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder). Set `config.dataset_name` to the repository id of the dataset if it is from the HugGan Community Event, or `imagefolder` if you're using your own images.
</Tip>
๐ค Datasets uses the [`~datasets.Image`] feature to automatically decode the image data and load it as a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html) which we can visualize:
```py
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(1, 4, figsize=(16, 4))
>>> for i, image in enumerate(dataset[:4]["image"]):
... axs[i].imshow(image)
... axs[i].set_axis_off()
>>> fig.show()
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png"/>
</div>
The images are all different sizes though, so you'll need to preprocess them first:
* `Resize` changes the image size to the one defined in `config.image_size`.
* `RandomHorizontalFlip` augments the dataset by randomly mirroring the images.
* `Normalize` is important to rescale the pixel values into a [-1, 1] range, which is what the model expects.
```py
>>> from torchvision import transforms
>>> preprocess = transforms.Compose(
... [
... transforms.Resize((config.image_size, config.image_size)),
... transforms.RandomHorizontalFlip(),
... transforms.ToTensor(),
... transforms.Normalize([0.5], [0.5]),
... ]
... )
```
Use ๐ค Datasets' [`~datasets.Dataset.set_transform`] method to apply the `preprocess` function on the fly during training:
```py
>>> def transform(examples):
... images = [preprocess(image.convert("RGB")) for image in examples["image"]]
... return {"images": images}
>>> dataset.set_transform(transform)
```
Feel free to visualize the images again to confirm that they've been resized. Now you're ready to wrap the dataset in a [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader) for training!
```py
>>> import torch
>>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True)
```
## Create a UNet2DModel
Pretrained models in ๐งจ Diffusers are easily created from their model class with the parameters you want. For example, to create a [`UNet2DModel`]:
```py
>>> from diffusers import UNet2DModel
>>> model = UNet2DModel(
... sample_size=config.image_size, # the target image resolution
... in_channels=3, # the number of input channels, 3 for RGB images
... out_channels=3, # the number of output channels
... layers_per_block=2, # how many ResNet layers to use per UNet block
... block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channels for each UNet block
... down_block_types=(
... "DownBlock2D", # a regular ResNet downsampling block
... "DownBlock2D",
... "DownBlock2D",
... "DownBlock2D",
... "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention
... "DownBlock2D",
... ),
... up_block_types=(
... "UpBlock2D", # a regular ResNet upsampling block
... "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention
... "UpBlock2D",
... "UpBlock2D",
... "UpBlock2D",
... "UpBlock2D",
... ),
... )
```
It is often a good idea to quickly check the sample image shape matches the model output shape:
```py
>>> sample_image = dataset[0]["images"].unsqueeze(0)
>>> print("Input shape:", sample_image.shape)
Input shape: torch.Size([1, 3, 128, 128])
>>> print("Output shape:", model(sample_image, timestep=0).sample.shape)
Output shape: torch.Size([1, 3, 128, 128])
```
Great! Next, you'll need a scheduler to add some noise to the image.
## Create a scheduler
The scheduler behaves differently depending on whether you're using the model for training or inference. During inference, the scheduler generates image from the noise. During training, the scheduler takes a model output - or a sample - from a specific point in the diffusion process and applies noise to the image according to a *noise schedule* and an *update rule*.
Let's take a look at the [`DDPMScheduler`] and use the `add_noise` method to add some random noise to the `sample_image` from before:
```py
>>> import torch
>>> from PIL import Image
>>> from diffusers import DDPMScheduler
>>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
>>> noise = torch.randn(sample_image.shape)
>>> timesteps = torch.LongTensor([50])
>>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps)
>>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0])
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png"/>
</div>
The training objective of the model is to predict the noise added to the image. The loss at this step can be calculated by:
```py
>>> import torch.nn.functional as F
>>> noise_pred = model(noisy_image, timesteps).sample
>>> loss = F.mse_loss(noise_pred, noise)
```
## Train the model
By now, you have most of the pieces to start training the model and all that's left is putting everything together.
First, you'll need an optimizer and a learning rate scheduler:
```py
>>> from diffusers.optimization import get_cosine_schedule_with_warmup
>>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)
>>> lr_scheduler = get_cosine_schedule_with_warmup(
... optimizer=optimizer,
... num_warmup_steps=config.lr_warmup_steps,
... num_training_steps=(len(train_dataloader) * config.num_epochs),
... )
```
Then, you'll need a way to evaluate the model. For evaluation, you can use the [`DDPMPipeline`] to generate a batch of sample images and save it as a grid:
```py
>>> from diffusers import DDPMPipeline
>>> from diffusers.utils import make_image_grid
>>> import os
>>> def evaluate(config, epoch, pipeline):
... # Sample some images from random noise (this is the backward diffusion process).
... # The default pipeline output type is `List[PIL.Image]`
... images = pipeline(
... batch_size=config.eval_batch_size,
... generator=torch.Generator(device='cpu').manual_seed(config.seed), # Use a separate torch generator to avoid rewinding the random state of the main training loop
... ).images
... # Make a grid out of the images
... image_grid = make_image_grid(images, rows=4, cols=4)
... # Save the images
... test_dir = os.path.join(config.output_dir, "samples")
... os.makedirs(test_dir, exist_ok=True)
... image_grid.save(f"{test_dir}/{epoch:04d}.png")
```
Now you can wrap all these components together in a training loop with ๐ค Accelerate for easy TensorBoard logging, gradient accumulation, and mixed precision training. To upload the model to the Hub, write a function to get your repository name and information and then push it to the Hub.
<Tip>
๐ก The training loop below may look intimidating and long, but it'll be worth it later when you launch your training in just one line of code! If you can't wait and want to start generating images, feel free to copy and run the code below. You can always come back and examine the training loop more closely later, like when you're waiting for your model to finish training. ๐ค
</Tip>
```py
>>> from accelerate import Accelerator
>>> from huggingface_hub import create_repo, upload_folder
>>> from tqdm.auto import tqdm
>>> from pathlib import Path
>>> import os
>>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler):
... # Initialize accelerator and tensorboard logging
... accelerator = Accelerator(
... mixed_precision=config.mixed_precision,
... gradient_accumulation_steps=config.gradient_accumulation_steps,
... log_with="tensorboard",
... project_dir=os.path.join(config.output_dir, "logs"),
... )
... if accelerator.is_main_process:
... if config.output_dir is not None:
... os.makedirs(config.output_dir, exist_ok=True)
... if config.push_to_hub:
... repo_id = create_repo(
... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True
... ).repo_id
... accelerator.init_trackers("train_example")
... # Prepare everything
... # There is no specific order to remember, you just need to unpack the
... # objects in the same order you gave them to the prepare method.
... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
... model, optimizer, train_dataloader, lr_scheduler
... )
... global_step = 0
... # Now you train the model
... for epoch in range(config.num_epochs):
... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process)
... progress_bar.set_description(f"Epoch {epoch}")
... for step, batch in enumerate(train_dataloader):
... clean_images = batch["images"]
... # Sample noise to add to the images
... noise = torch.randn(clean_images.shape, device=clean_images.device)
... bs = clean_images.shape[0]
... # Sample a random timestep for each image
... timesteps = torch.randint(
... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device,
... dtype=torch.int64
... )
... # Add noise to the clean images according to the noise magnitude at each timestep
... # (this is the forward diffusion process)
... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps)
... with accelerator.accumulate(model):
... # Predict the noise residual
... noise_pred = model(noisy_images, timesteps, return_dict=False)[0]
... loss = F.mse_loss(noise_pred, noise)
... accelerator.backward(loss)
... if accelerator.sync_gradients:
... accelerator.clip_grad_norm_(model.parameters(), 1.0)
... optimizer.step()
... lr_scheduler.step()
... optimizer.zero_grad()
... progress_bar.update(1)
... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step}
... progress_bar.set_postfix(**logs)
... accelerator.log(logs, step=global_step)
... global_step += 1
... # After each epoch you optionally sample some demo images with evaluate() and save the model
... if accelerator.is_main_process:
... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler)
... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1:
... evaluate(config, epoch, pipeline)
... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1:
... if config.push_to_hub:
... upload_folder(
... repo_id=repo_id,
... folder_path=config.output_dir,
... commit_message=f"Epoch {epoch}",
... ignore_patterns=["step_*", "epoch_*"],
... )
... else:
... pipeline.save_pretrained(config.output_dir)
```
Phew, that was quite a bit of code! But you're finally ready to launch the training with ๐ค Accelerate's [`~accelerate.notebook_launcher`] function. Pass the function the training loop, all the training arguments, and the number of processes (you can change this value to the number of GPUs available to you) to use for training:
```py
>>> from accelerate import notebook_launcher
>>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler)
>>> notebook_launcher(train_loop, args, num_processes=1)
```
Once training is complete, take a look at the final ๐ฆ images ๐ฆ generated by your diffusion model!
```py
>>> import glob
>>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png"))
>>> Image.open(sample_images[-1])
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png"/>
</div>
## Next steps
Unconditional image generation is one example of a task that can be trained. You can explore other tasks and training techniques by visiting the [๐งจ Diffusers Training Examples](../training/overview) page. Here are some examples of what you can learn:
* [Textual Inversion](../training/text_inversion), an algorithm that teaches a model a specific visual concept and integrates it into the generated image.
* [DreamBooth](../training/dreambooth), a technique for generating personalized images of a subject given several input images of the subject.
* [Guide](../training/text2image) to finetuning a Stable Diffusion model on your own dataset.
* [Guide](../training/lora) to using LoRA, a memory-efficient technique for finetuning really large models faster.
| diffusers/docs/source/en/tutorials/basic_training.md/0 | {
"file_path": "diffusers/docs/source/en/tutorials/basic_training.md",
"repo_id": "diffusers",
"token_count": 6239
} | 119 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
[[open-in-colab]]
# Trajectory Consistency Distillation-LoRA
Trajectory Consistency Distillation (TCD) enables a model to generate higher quality and more detailed images with fewer steps. Moreover, owing to the effective error mitigation during the distillation process, TCD demonstrates superior performance even under conditions of large inference steps.
The major advantages of TCD are:
- Better than Teacher: TCD demonstrates superior generative quality at both small and large inference steps and exceeds the performance of [DPM-Solver++(2S)](../../api/schedulers/multistep_dpm_solver) with Stable Diffusion XL (SDXL). There is no additional discriminator or LPIPS supervision included during TCD training.
- Flexible Inference Steps: The inference steps for TCD sampling can be freely adjusted without adversely affecting the image quality.
- Freely change detail level: During inference, the level of detail in the image can be adjusted with a single hyperparameter, *gamma*.
> [!TIP]
> For more technical details of TCD, please refer to the [paper](https://huggingface.co/papers/2402.19159) or official [project page](https://mhh0318.github.io/tcd/).
For large models like SDXL, TCD is trained with [LoRA](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) to reduce memory usage. This is also useful because you can reuse LoRAs between different finetuned models, as long as they share the same base model, without further training.
This guide will show you how to perform inference with TCD-LoRAs for a variety of tasks like text-to-image and inpainting, as well as how you can easily combine TCD-LoRAs with other adapters. Choose one of the supported base model and it's corresponding TCD-LoRA checkpoint from the table below to get started.
| Base model | TCD-LoRA checkpoint |
|-------------------------------------------------------------------------------------------------|----------------------------------------------------------------|
| [stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) | [TCD-SD15](https://huggingface.co/h1t/TCD-SD15-LoRA) |
| [stable-diffusion-2-1-base](https://huggingface.co/stabilityai/stable-diffusion-2-1-base) | [TCD-SD21-base](https://huggingface.co/h1t/TCD-SD21-base-LoRA) |
| [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) | [TCD-SDXL](https://huggingface.co/h1t/TCD-SDXL-LoRA) |
Make sure you have [PEFT](https://github.com/huggingface/peft) installed for better LoRA support.
```bash
pip install -U peft
```
## General tasks
In this guide, let's use the [`StableDiffusionXLPipeline`] and the [`TCDScheduler`]. Use the [`~StableDiffusionPipeline.load_lora_weights`] method to load the SDXL-compatible TCD-LoRA weights.
A few tips to keep in mind for TCD-LoRA inference are to:
- Keep the `num_inference_steps` between 4 and 50
- Set `eta` (used to control stochasticity at each step) between 0 and 1. You should use a higher `eta` when increasing the number of inference steps, but the downside is that a larger `eta` in [`TCDScheduler`] leads to blurrier images. A value of 0.3 is recommended to produce good results.
<hfoptions id="tasks">
<hfoption id="text-to-image">
```python
import torch
from diffusers import StableDiffusionXLPipeline, TCDScheduler
device = "cuda"
base_model_id = "stabilityai/stable-diffusion-xl-base-1.0"
tcd_lora_id = "h1t/TCD-SDXL-LoRA"
pipe = StableDiffusionXLPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(tcd_lora_id)
pipe.fuse_lora()
prompt = "Painting of the orange cat Otto von Garfield, Count of Bismarck-Schรถnhausen, Duke of Lauenburg, Minister-President of Prussia. Depicted wearing a Prussian Pickelhaube and eating his favorite meal - lasagna."
image = pipe(
prompt=prompt,
num_inference_steps=4,
guidance_scale=0,
eta=0.3,
generator=torch.Generator(device=device).manual_seed(0),
).images[0]
```

</hfoption>
<hfoption id="inpainting">
```python
import torch
from diffusers import AutoPipelineForInpainting, TCDScheduler
from diffusers.utils import load_image, make_image_grid
device = "cuda"
base_model_id = "diffusers/stable-diffusion-xl-1.0-inpainting-0.1"
tcd_lora_id = "h1t/TCD-SDXL-LoRA"
pipe = AutoPipelineForInpainting.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(tcd_lora_id)
pipe.fuse_lora()
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = load_image(img_url).resize((1024, 1024))
mask_image = load_image(mask_url).resize((1024, 1024))
prompt = "a tiger sitting on a park bench"
image = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
num_inference_steps=8,
guidance_scale=0,
eta=0.3,
strength=0.99, # make sure to use `strength` below 1.0
generator=torch.Generator(device=device).manual_seed(0),
).images[0]
grid_image = make_image_grid([init_image, mask_image, image], rows=1, cols=3)
```

</hfoption>
</hfoptions>
## Community models
TCD-LoRA also works with many community finetuned models and plugins. For example, load the [animagine-xl-3.0](https://huggingface.co/cagliostrolab/animagine-xl-3.0) checkpoint which is a community finetuned version of SDXL for generating anime images.
```python
import torch
from diffusers import StableDiffusionXLPipeline, TCDScheduler
device = "cuda"
base_model_id = "cagliostrolab/animagine-xl-3.0"
tcd_lora_id = "h1t/TCD-SDXL-LoRA"
pipe = StableDiffusionXLPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(tcd_lora_id)
pipe.fuse_lora()
prompt = "A man, clad in a meticulously tailored military uniform, stands with unwavering resolve. The uniform boasts intricate details, and his eyes gleam with determination. Strands of vibrant, windswept hair peek out from beneath the brim of his cap."
image = pipe(
prompt=prompt,
num_inference_steps=8,
guidance_scale=0,
eta=0.3,
generator=torch.Generator(device=device).manual_seed(0),
).images[0]
```

TCD-LoRA also supports other LoRAs trained on different styles. For example, let's load the [TheLastBen/Papercut_SDXL](https://huggingface.co/TheLastBen/Papercut_SDXL) LoRA and fuse it with the TCD-LoRA with the [`~loaders.UNet2DConditionLoadersMixin.set_adapters`] method.
> [!TIP]
> Check out the [Merge LoRAs](merge_loras) guide to learn more about efficient merging methods.
```python
import torch
from diffusers import StableDiffusionXLPipeline
from scheduling_tcd import TCDScheduler
device = "cuda"
base_model_id = "stabilityai/stable-diffusion-xl-base-1.0"
tcd_lora_id = "h1t/TCD-SDXL-LoRA"
styled_lora_id = "TheLastBen/Papercut_SDXL"
pipe = StableDiffusionXLPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(tcd_lora_id, adapter_name="tcd")
pipe.load_lora_weights(styled_lora_id, adapter_name="style")
pipe.set_adapters(["tcd", "style"], adapter_weights=[1.0, 1.0])
prompt = "papercut of a winter mountain, snow"
image = pipe(
prompt=prompt,
num_inference_steps=4,
guidance_scale=0,
eta=0.3,
generator=torch.Generator(device=device).manual_seed(0),
).images[0]
```

## Adapters
TCD-LoRA is very versatile, and it can be combined with other adapter types like ControlNets, IP-Adapter, and AnimateDiff.
<hfoptions id="adapters">
<hfoption id="ControlNet">
### Depth ControlNet
```python
import torch
import numpy as np
from PIL import Image
from transformers import DPTImageProcessor, DPTForDepthEstimation
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline
from diffusers.utils import load_image, make_image_grid
from scheduling_tcd import TCDScheduler
device = "cuda"
depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(device)
feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
def get_depth_map(image):
image = feature_extractor(images=image, return_tensors="pt").pixel_values.to(device)
with torch.no_grad(), torch.autocast(device):
depth_map = depth_estimator(image).predicted_depth
depth_map = torch.nn.functional.interpolate(
depth_map.unsqueeze(1),
size=(1024, 1024),
mode="bicubic",
align_corners=False,
)
depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
depth_map = (depth_map - depth_min) / (depth_max - depth_min)
image = torch.cat([depth_map] * 3, dim=1)
image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
return image
base_model_id = "stabilityai/stable-diffusion-xl-base-1.0"
controlnet_id = "diffusers/controlnet-depth-sdxl-1.0"
tcd_lora_id = "h1t/TCD-SDXL-LoRA"
controlnet = ControlNetModel.from_pretrained(
controlnet_id,
torch_dtype=torch.float16,
variant="fp16",
)
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
base_model_id,
controlnet=controlnet,
torch_dtype=torch.float16,
variant="fp16",
)
pipe.enable_model_cpu_offload()
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(tcd_lora_id)
pipe.fuse_lora()
prompt = "stormtrooper lecture, photorealistic"
image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-depth/resolve/main/images/stormtrooper.png")
depth_image = get_depth_map(image)
controlnet_conditioning_scale = 0.5 # recommended for good generalization
image = pipe(
prompt,
image=depth_image,
num_inference_steps=4,
guidance_scale=0,
eta=0.3,
controlnet_conditioning_scale=controlnet_conditioning_scale,
generator=torch.Generator(device=device).manual_seed(0),
).images[0]
grid_image = make_image_grid([depth_image, image], rows=1, cols=2)
```

### Canny ControlNet
```python
import torch
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline
from diffusers.utils import load_image, make_image_grid
from scheduling_tcd import TCDScheduler
device = "cuda"
base_model_id = "stabilityai/stable-diffusion-xl-base-1.0"
controlnet_id = "diffusers/controlnet-canny-sdxl-1.0"
tcd_lora_id = "h1t/TCD-SDXL-LoRA"
controlnet = ControlNetModel.from_pretrained(
controlnet_id,
torch_dtype=torch.float16,
variant="fp16",
)
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
base_model_id,
controlnet=controlnet,
torch_dtype=torch.float16,
variant="fp16",
)
pipe.enable_model_cpu_offload()
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(tcd_lora_id)
pipe.fuse_lora()
prompt = "ultrarealistic shot of a furry blue bird"
canny_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
controlnet_conditioning_scale = 0.5 # recommended for good generalization
image = pipe(
prompt,
image=canny_image,
num_inference_steps=4,
guidance_scale=0,
eta=0.3,
controlnet_conditioning_scale=controlnet_conditioning_scale,
generator=torch.Generator(device=device).manual_seed(0),
).images[0]
grid_image = make_image_grid([canny_image, image], rows=1, cols=2)
```

<Tip>
The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one.
</Tip>
</hfoption>
<hfoption id="IP-Adapter">
This example shows how to use the TCD-LoRA with the [IP-Adapter](https://github.com/tencent-ailab/IP-Adapter/tree/main) and SDXL.
```python
import torch
from diffusers import StableDiffusionXLPipeline
from diffusers.utils import load_image, make_image_grid
from ip_adapter import IPAdapterXL
from scheduling_tcd import TCDScheduler
device = "cuda"
base_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
image_encoder_path = "sdxl_models/image_encoder"
ip_ckpt = "sdxl_models/ip-adapter_sdxl.bin"
tcd_lora_id = "h1t/TCD-SDXL-LoRA"
pipe = StableDiffusionXLPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
variant="fp16"
)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(tcd_lora_id)
pipe.fuse_lora()
ip_model = IPAdapterXL(pipe, image_encoder_path, ip_ckpt, device)
ref_image = load_image("https://raw.githubusercontent.com/tencent-ailab/IP-Adapter/main/assets/images/woman.png").resize((512, 512))
prompt = "best quality, high quality, wearing sunglasses"
image = ip_model.generate(
pil_image=ref_image,
prompt=prompt,
scale=0.5,
num_samples=1,
num_inference_steps=4,
guidance_scale=0,
eta=0.3,
seed=0,
)[0]
grid_image = make_image_grid([ref_image, image], rows=1, cols=2)
```

</hfoption>
<hfoption id="AnimateDiff">
[`AnimateDiff`] allows animating images using Stable Diffusion models. TCD-LoRA can substantially accelerate the process without degrading image quality. The quality of animation with TCD-LoRA and AnimateDiff has a more lucid outcome.
```python
import torch
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
from scheduling_tcd import TCDScheduler
from diffusers.utils import export_to_gif
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5")
pipe = AnimateDiffPipeline.from_pretrained(
"frankjoshua/toonyou_beta6",
motion_adapter=adapter,
).to("cuda")
# set TCDScheduler
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
# load TCD LoRA
pipe.load_lora_weights("h1t/TCD-SD15-LoRA", adapter_name="tcd")
pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-in", weight_name="diffusion_pytorch_model.safetensors", adapter_name="motion-lora")
pipe.set_adapters(["tcd", "motion-lora"], adapter_weights=[1.0, 1.2])
prompt = "best quality, masterpiece, 1girl, looking at viewer, blurry background, upper body, contemporary, dress"
generator = torch.manual_seed(0)
frames = pipe(
prompt=prompt,
num_inference_steps=5,
guidance_scale=0,
cross_attention_kwargs={"scale": 1},
num_frames=24,
eta=0.3,
generator=generator
).frames[0]
export_to_gif(frames, "animation.gif")
```

</hfoption>
</hfoptions> | diffusers/docs/source/en/using-diffusers/inference_with_tcd_lora.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/inference_with_tcd_lora.md",
"repo_id": "diffusers",
"token_count": 6012
} | 120 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# JAX/Flax
[[open-in-colab]]
๐ค Diffusers supports Flax for super fast inference on Google TPUs, such as those available in Colab, Kaggle or Google Cloud Platform. This guide shows you how to run inference with Stable Diffusion using JAX/Flax.
Before you begin, make sure you have the necessary libraries installed:
```py
# uncomment to install the necessary libraries in Colab
#!pip install -q jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy
#!pip install -q diffusers
```
You should also make sure you're using a TPU backend. While JAX does not run exclusively on TPUs, you'll get the best performance on a TPU because each server has 8 TPU accelerators working in parallel.
If you are running this guide in Colab, select *Runtime* in the menu above, select the option *Change runtime type*, and then select *TPU* under the *Hardware accelerator* setting. Import JAX and quickly check whether you're using a TPU:
```python
import jax
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
num_devices = jax.device_count()
device_type = jax.devices()[0].device_kind
print(f"Found {num_devices} JAX devices of type {device_type}.")
assert (
"TPU" in device_type,
"Available device is not a TPU, please select TPU from Runtime > Change runtime type > Hardware accelerator"
)
# Found 8 JAX devices of type Cloud TPU.
```
Great, now you can import the rest of the dependencies you'll need:
```python
import jax.numpy as jnp
from jax import pmap
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxStableDiffusionPipeline
```
## Load a model
Flax is a functional framework, so models are stateless and parameters are stored outside of them. Loading a pretrained Flax pipeline returns *both* the pipeline and the model weights (or parameters). In this guide, you'll use `bfloat16`, a more efficient half-float type that is supported by TPUs (you can also use `float32` for full precision if you want).
```python
dtype = jnp.bfloat16
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
variant="bf16",
dtype=dtype,
)
```
## Inference
TPUs usually have 8 devices working in parallel, so let's use the same prompt for each device. This means you can perform inference on 8 devices at once, with each device generating one image. As a result, you'll get 8 images in the same amount of time it takes for one chip to generate a single image!
<Tip>
Learn more details in the [How does parallelization work?](#how-does-parallelization-work) section.
</Tip>
After replicating the prompt, get the tokenized text ids by calling the `prepare_inputs` function on the pipeline. The length of the tokenized text is set to 77 tokens as required by the configuration of the underlying CLIP text model.
```python
prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic"
prompt = [prompt] * jax.device_count()
prompt_ids = pipeline.prepare_inputs(prompt)
prompt_ids.shape
# (8, 77)
```
Model parameters and inputs have to be replicated across the 8 parallel devices. The parameters dictionary is replicated with [`flax.jax_utils.replicate`](https://flax.readthedocs.io/en/latest/api_reference/flax.jax_utils.html#flax.jax_utils.replicate) which traverses the dictionary and changes the shape of the weights so they are repeated 8 times. Arrays are replicated using `shard`.
```python
# parameters
p_params = replicate(params)
# arrays
prompt_ids = shard(prompt_ids)
prompt_ids.shape
# (8, 1, 77)
```
This shape means each one of the 8 devices receives as an input a `jnp` array with shape `(1, 77)`, where `1` is the batch size per device. On TPUs with sufficient memory, you could have a batch size larger than `1` if you want to generate multiple images (per chip) at once.
Next, create a random number generator to pass to the generation function. This is standard procedure in Flax, which is very serious and opinionated about random numbers. All functions that deal with random numbers are expected to receive a generator to ensure reproducibility, even when you're training across multiple distributed devices.
The helper function below uses a seed to initialize a random number generator. As long as you use the same seed, you'll get the exact same results. Feel free to use different seeds when exploring results later in the guide.
```python
def create_key(seed=0):
return jax.random.PRNGKey(seed)
```
The helper function, or `rng`, is split 8 times so each device receives a different generator and generates a different image.
```python
rng = create_key(0)
rng = jax.random.split(rng, jax.device_count())
```
To take advantage of JAX's optimized speed on a TPU, pass `jit=True` to the pipeline to compile the JAX code into an efficient representation and to ensure the model runs in parallel across the 8 devices.
<Tip warning={true}>
You need to ensure all your inputs have the same shape in subsequent calls, otherwise JAX will need to recompile the code which is slower.
</Tip>
The first inference run takes more time because it needs to compile the code, but subsequent calls (even with different inputs) are much faster. For example, it took more than a minute to compile on a TPU v2-8, but then it takes about **7s** on a future inference run!
```py
%%time
images = pipeline(prompt_ids, p_params, rng, jit=True)[0]
# CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s
# Wall time: 1min 29s
```
The returned array has shape `(8, 1, 512, 512, 3)` which should be reshaped to remove the second dimension and get 8 images of `512 ร 512 ร 3`. Then you can use the [`~utils.numpy_to_pil`] function to convert the arrays into images.
```python
from diffusers.utils import make_image_grid
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
make_image_grid(images, rows=2, cols=4)
```

## Using different prompts
You don't necessarily have to use the same prompt on all devices. For example, to generate 8 different prompts:
```python
prompts = [
"Labrador in the style of Hokusai",
"Painting of a squirrel skating in New York",
"HAL-9000 in the style of Van Gogh",
"Times Square under water, with fish and a dolphin swimming around",
"Ancient Roman fresco showing a man working on his laptop",
"Close-up photograph of young black woman against urban background, high quality, bokeh",
"Armchair in the shape of an avocado",
"Clown astronaut in space, with Earth in the background",
]
prompt_ids = pipeline.prepare_inputs(prompts)
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, p_params, rng, jit=True).images
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
make_image_grid(images, 2, 4)
```

## How does parallelization work?
The Flax pipeline in ๐ค Diffusers automatically compiles the model and runs it in parallel on all available devices. Let's take a closer look at how that process works.
JAX parallelization can be done in multiple ways. The easiest one revolves around using the [`jax.pmap`](https://jax.readthedocs.io/en/latest/_autosummary/jax.pmap.html) function to achieve single-program multiple-data (SPMD) parallelization. It means running several copies of the same code, each on different data inputs. More sophisticated approaches are possible, and you can go over to the JAX [documentation](https://jax.readthedocs.io/en/latest/index.html) to explore this topic in more detail if you are interested!
`jax.pmap` does two things:
1. Compiles (or "`jit`s") the code which is similar to `jax.jit()`. This does not happen when you call `pmap`, and only the first time the `pmap`ped function is called.
2. Ensures the compiled code runs in parallel on all available devices.
To demonstrate, call `pmap` on the pipeline's `_generate` method (this is a private method that generates images and may be renamed or removed in future releases of ๐ค Diffusers):
```python
p_generate = pmap(pipeline._generate)
```
After calling `pmap`, the prepared function `p_generate` will:
1. Make a copy of the underlying function, `pipeline._generate`, on each device.
2. Send each device a different portion of the input arguments (this is why it's necessary to call the *shard* function). In this case, `prompt_ids` has shape `(8, 1, 77, 768)` so the array is split into 8 and each copy of `_generate` receives an input with shape `(1, 77, 768)`.
The most important thing to pay attention to here is the batch size (1 in this example), and the input dimensions that make sense for your code. You don't have to change anything else to make the code work in parallel.
The first time you call the pipeline takes more time, but the calls afterward are much faster. The `block_until_ready` function is used to correctly measure inference time because JAX uses asynchronous dispatch and returns control to the Python loop as soon as it can. You don't need to use that in your code; blocking occurs automatically when you want to use the result of a computation that has not yet been materialized.
```py
%%time
images = p_generate(prompt_ids, p_params, rng)
images = images.block_until_ready()
# CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s
# Wall time: 1min 15s
```
Check your image dimensions to see if they're correct:
```python
images.shape
# (8, 1, 512, 512, 3)
```
## Resources
To learn more about how JAX works with Stable Diffusion, you may be interested in reading:
* [Accelerating Stable Diffusion XL Inference with JAX on Cloud TPU v5e](https://hf.co/blog/sdxl_jax)
| diffusers/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md",
"repo_id": "diffusers",
"token_count": 3095
} | 121 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Stable diffusion XL
Stable Diffusion XL์ Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Mรผller, Joe Penna, Robin Rombach์ ์ํด [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://huggingface.co/papers/2307.01952)์์ ์ ์๋์์ต๋๋ค.
๋
ผ๋ฌธ ์ด๋ก์ ๋ค์์ ๋ฐ๋ฆ
๋๋ค:
*text-to-image์ latent diffusion ๋ชจ๋ธ์ธ SDXL์ ์๊ฐํฉ๋๋ค. ์ด์ ๋ฒ์ ์ Stable Diffusion๊ณผ ๋น๊ตํ๋ฉด, SDXL์ ์ธ ๋ฐฐ ๋ํฐ ๊ท๋ชจ์ UNet ๋ฐฑ๋ณธ์ ํฌํจํฉ๋๋ค: ๋ชจ๋ธ ํ๋ผ๋ฏธํฐ์ ์ฆ๊ฐ๋ ๋ง์ attention ๋ธ๋ญ์ ์ฌ์ฉํ๊ณ ๋ ํฐ cross-attention context๋ฅผ SDXL์ ๋ ๋ฒ์งธ ํ
์คํธ ์ธ์ฝ๋์ ์ฌ์ฉํ๊ธฐ ๋๋ฌธ์
๋๋ค. ๋ค์ค ์ข
ํก๋น์ ๋ค์์ ์๋ก์ด conditioning ๋ฐฉ๋ฒ์ ๊ตฌ์ฑํ์ต๋๋ค. ๋ํ ํ์ ์์ ํ๋ image-to-image ๊ธฐ์ ์ ์ฌ์ฉํจ์ผ๋ก์จ SDXL์ ์ํด ์์ฑ๋ ์๊ฐ์ ํ์ง์ ํฅ์ํ๊ธฐ ์ํด ์ ์ ๋ ๋ชจ๋ธ์ ์๊ฐํฉ๋๋ค. SDXL์ ์ด์ ๋ฒ์ ์ Stable Diffusion๋ณด๋ค ์ฑ๋ฅ์ด ํฅ์๋์๊ณ , ์ด๋ฌํ black-box ์ต์ ์ด๋ฏธ์ง ์์ฑ์์ ๊ฒฝ์๋ ฅ์๋ ๊ฒฐ๊ณผ๋ฅผ ๋ฌ์ฑํ์ต๋๋ค.*
## ํ
- Stable Diffusion XL์ ํนํ 786๊ณผ 1024์ฌ์ด์ ์ด๋ฏธ์ง์ ์ ์๋ํฉ๋๋ค.
- Stable Diffusion XL์ ์๋์ ๊ฐ์ด ํ์ต๋ ๊ฐ ํ
์คํธ ์ธ์ฝ๋์ ๋ํด ์๋ก ๋ค๋ฅธ ํ๋กฌํํธ๋ฅผ ์ ๋ฌํ ์ ์์ต๋๋ค. ๋์ผํ ํ๋กฌํํธ์ ๋ค๋ฅธ ๋ถ๋ถ์ ํ
์คํธ ์ธ์ฝ๋์ ์ ๋ฌํ ์๋ ์์ต๋๋ค.
- Stable Diffusion XL ๊ฒฐ๊ณผ ์ด๋ฏธ์ง๋ ์๋์ ๋ณด์ฌ์ง๋ฏ์ด ์ ์ ๊ธฐ(refiner)๋ฅผ ์ฌ์ฉํจ์ผ๋ก์จ ํฅ์๋ ์ ์์ต๋๋ค.
### ์ด์ฉ๊ฐ๋ฅํ ์ฒดํฌํฌ์ธํธ:
- *Text-to-Image (1024x1024 ํด์๋)*: [`StableDiffusionXLPipeline`]์ ์ฌ์ฉํ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
- *Image-to-Image / ์ ์ ๊ธฐ(refiner) (1024x1024 ํด์๋)*: [`StableDiffusionXLImg2ImgPipeline`]๋ฅผ ์ฌ์ฉํ [stabilityai/stable-diffusion-xl-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)
## ์ฌ์ฉ ์์
SDXL์ ์ฌ์ฉํ๊ธฐ ์ ์ `transformers`, `accelerate`, `safetensors` ์ `invisible_watermark`๋ฅผ ์ค์นํ์ธ์.
๋ค์๊ณผ ๊ฐ์ด ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ค์นํ ์ ์์ต๋๋ค:
```sh
pip install transformers
pip install accelerate
pip install safetensors
pip install invisible-watermark>=0.2.0
```
### ์ํฐ๋ง์ปค
Stable Diffusion XL๋ก ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ๋ ์ํฐ๋งํฌ๊ฐ ๋ณด์ด์ง ์๋๋ก ์ถ๊ฐํ๋ ๊ฒ์ ๊ถ์ฅํ๋๋ฐ, ์ด๋ ๋ค์ด์คํธ๋ฆผ(downstream) ์ดํ๋ฆฌ์ผ์ด์
์์ ๊ธฐ๊ณ์ ํฉ์ฑ๋์๋์ง๋ฅผ ์๋ณํ๋๋ฐ ๋์์ ์ค ์ ์์ต๋๋ค. ๊ทธ๋ ๊ฒ ํ๋ ค๋ฉด [invisible_watermark ๋ผ์ด๋ธ๋ฌ๋ฆฌ](https://pypi.org/project/invisible-watermark/)๋ฅผ ํตํด ์ค์นํด์ฃผ์ธ์:
```sh
pip install invisible-watermark>=0.2.0
```
`invisible-watermark` ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ์ค์น๋๋ฉด ์ํฐ๋ง์ปค๊ฐ **๊ธฐ๋ณธ์ ์ผ๋ก** ์ฌ์ฉ๋ ๊ฒ์
๋๋ค.
์์ฑ ๋๋ ์์ ํ๊ฒ ์ด๋ฏธ์ง๋ฅผ ๋ฐฐํฌํ๊ธฐ ์ํด ๋ค๋ฅธ ๊ท์ ์ด ์๋ค๋ฉด, ๋ค์๊ณผ ๊ฐ์ด ์ํฐ๋ง์ปค๋ฅผ ๋นํ์ฑํํ ์ ์์ต๋๋ค:
```py
pipe = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=False)
```
### Text-to-Image
*text-to-image*๋ฅผ ์ํด ๋ค์๊ณผ ๊ฐ์ด SDXL์ ์ฌ์ฉํ ์ ์์ต๋๋ค:
```py
from diffusers import StableDiffusionXLPipeline
import torch
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt=prompt).images[0]
```
### Image-to-image
*image-to-image*๋ฅผ ์ํด ๋ค์๊ณผ ๊ฐ์ด SDXL์ ์ฌ์ฉํ ์ ์์ต๋๋ค:
```py
import torch
from diffusers import StableDiffusionXLImg2ImgPipeline
from diffusers.utils import load_image
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe = pipe.to("cuda")
url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
init_image = load_image(url).convert("RGB")
prompt = "a photo of an astronaut riding a horse on mars"
image = pipe(prompt, image=init_image).images[0]
```
### ์ธํ์ธํ
*inpainting*๋ฅผ ์ํด ๋ค์๊ณผ ๊ฐ์ด SDXL์ ์ฌ์ฉํ ์ ์์ต๋๋ค:
```py
import torch
from diffusers import StableDiffusionXLInpaintPipeline
from diffusers.utils import load_image
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = load_image(img_url).convert("RGB")
mask_image = load_image(mask_url).convert("RGB")
prompt = "A majestic tiger sitting on a bench"
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0]
```
### ์ด๋ฏธ์ง ๊ฒฐ๊ณผ๋ฌผ์ ์ ์ ํ๊ธฐ
[base ๋ชจ๋ธ ์ฒดํฌํฌ์ธํธ](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)์์, StableDiffusion-XL ๋ํ ๊ณ ์ฃผํ ํ์ง์ ํฅ์์ํค๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ธฐ ์ํด ๋ฎ์ ๋
ธ์ด์ฆ ๋จ๊ณ ์ด๋ฏธ์ง๋ฅผ ์ ๊ฑฐํ๋๋ฐ ํนํ๋ [refiner ์ฒดํฌํฌ์ธํธ](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)๋ฅผ ํฌํจํ๊ณ ์์ต๋๋ค. ์ด refiner ์ฒดํฌํฌ์ธํธ๋ ์ด๋ฏธ์ง ํ์ง์ ํฅ์์ํค๊ธฐ ์ํด base ์ฒดํฌํฌ์ธํธ๋ฅผ ์คํํ ํ "๋ ๋ฒ์งธ ๋จ๊ณ" ํ์ดํ๋ผ์ธ์ ์ฌ์ฉ๋ ์ ์์ต๋๋ค.
refiner๋ฅผ ์ฌ์ฉํ ๋, ์ฝ๊ฒ ์ฌ์ฉํ ์ ์์ต๋๋ค
- 1.) base ๋ชจ๋ธ๊ณผ refiner์ ์ฌ์ฉํ๋๋ฐ, ์ด๋ *Denoisers์ ์์๋ธ*์ ์ํ ์ฒซ ๋ฒ์งธ ์ ์๋ [eDiff-I](https://research.nvidia.com/labs/dir/eDiff-I/)๋ฅผ ์ฌ์ฉํ๊ฑฐ๋
- 2.) base ๋ชจ๋ธ์ ๊ฑฐ์น ํ [SDEdit](https://huggingface.co/papers/2108.01073) ๋ฐฉ๋ฒ์ผ๋ก ๋จ์ํ๊ฒ refiner๋ฅผ ์คํ์ํฌ ์ ์์ต๋๋ค.
**์ฐธ๊ณ **: SD-XL base์ refiner๋ฅผ ์์๋ธ๋ก ์ฌ์ฉํ๋ ์์ด๋์ด๋ ์ปค๋ฎค๋ํฐ ๊ธฐ์ฌ์๋ค์ด ์ฒ์์ผ๋ก ์ ์ํ์ผ๋ฉฐ, ์ด๋ ๋ค์๊ณผ ๊ฐ์ `diffusers`๋ฅผ ๊ตฌํํ๋ ๋ฐ๋ ๋์์ ์ฃผ์
จ์ต๋๋ค.
- [SytanSD](https://github.com/SytanSD)
- [bghira](https://github.com/bghira)
- [Birch-san](https://github.com/Birch-san)
- [AmericanPresidentJimmyCarter](https://github.com/AmericanPresidentJimmyCarter)
#### 1.) Denoisers์ ์์๋ธ
base์ refiner ๋ชจ๋ธ์ denoiser์ ์์๋ธ๋ก ์ฌ์ฉํ ๋, base ๋ชจ๋ธ์ ๊ณ ์ฃผํ diffusion ๋จ๊ณ๋ฅผ ์ํ ์ ๋ฌธ๊ฐ์ ์ญํ ์ ํด์ผํ๊ณ , refiner๋ ๋ฎ์ ๋
ธ์ด์ฆ diffusion ๋จ๊ณ๋ฅผ ์ํ ์ ๋ฌธ๊ฐ์ ์ญํ ์ ํด์ผ ํฉ๋๋ค.
2.)์ ๋นํด 1.)์ ์ฅ์ ์ ์ ์ฒด์ ์ผ๋ก denoising ๋จ๊ณ๊ฐ ๋ ํ์ํ๋ฏ๋ก ์๋๊ฐ ํจ์ฌ ๋ ๋นจ๋ผ์ง๋๋ค. ๋จ์ ์ base ๋ชจ๋ธ์ ๊ฒฐ๊ณผ๋ฅผ ๊ฒ์ฌํ ์ ์๋ค๋ ๊ฒ์
๋๋ค. ์ฆ, ์ฌ์ ํ ๋
ธ์ด์ฆ๊ฐ ์ฌํ๊ฒ ์ ๊ฑฐ๋ฉ๋๋ค.
base ๋ชจ๋ธ๊ณผ refiner๋ฅผ denoiser์ ์์๋ธ๋ก ์ฌ์ฉํ๊ธฐ ์ํด ๊ฐ๊ฐ ๊ณ ๋
ธ์ด์ฆ(high-nosise) (*์ฆ* base ๋ชจ๋ธ)์ ์ ๋
ธ์ด์ฆ (*์ฆ* refiner ๋ชจ๋ธ)์ ๋
ธ์ด์ฆ๋ฅผ ์ ๊ฑฐํ๋ ๋จ๊ณ๋ฅผ ๊ฑฐ์ณ์ผํ๋ ํ์์คํ
์ ๊ธฐ๊ฐ์ ์ ์ํด์ผ ํฉ๋๋ค.
base ๋ชจ๋ธ์ [`denoising_end`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end)์ refiner ๋ชจ๋ธ์ [`denoising_start`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start)๋ฅผ ์ฌ์ฉํด ๊ฐ๊ฒฉ์ ์ ํฉ๋๋ค.
`denoising_end`์ `denoising_start` ๋ชจ๋ 0๊ณผ 1์ฌ์ด์ ์ค์ ๊ฐ์ผ๋ก ์ ๋ฌ๋์ด์ผ ํฉ๋๋ค.
์ ๋ฌ๋๋ฉด ๋
ธ์ด์ฆ ์ ๊ฑฐ์ ๋๊ณผ ์์์ ๋ชจ๋ธ ์ค์ผ์ค์ ์ํด ์ ์๋ ์ด์ฐ์ (discrete) ์๊ฐ ๊ฐ๊ฒฉ์ ๋น์จ๋ก ์ ์๋ฉ๋๋ค.
๋
ธ์ด์ฆ ์ ๊ฑฐ ๋จ๊ณ์ ์๋ ๋ชจ๋ธ์ด ํ์ต๋ ๋ถ์ฐ์์ ์ธ ์๊ฐ ๊ฐ๊ฒฉ๊ณผ ์ ์ธ๋ fractional cutoff์ ์ํด ๊ฒฐ์ ๋๋ฏ๋ก '๊ฐ๋' ๋ํ ์ ์ธ๋ ๊ฒฝ์ฐ ์ด ๊ฐ์ด '๊ฐ๋'๋ฅผ ์ฌ์ ์ํฉ๋๋ค.
์์๋ฅผ ๋ค์ด๋ณด๊ฒ ์ต๋๋ค.
์ฐ์ , ๋ ๊ฐ์ ํ์ดํ๋ผ์ธ์ ๊ฐ์ ธ์ต๋๋ค. ํ
์คํธ ์ธ์ฝ๋์ variational autoencoder๋ ๋์ผํ๋ฏ๋ก refiner๋ฅผ ์ํด ๋ค์ ๋ถ๋ฌ์ค์ง ์์๋ ๋ฉ๋๋ค.
```py
from diffusers import DiffusionPipeline
import torch
base = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
refiner = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=base.text_encoder_2,
vae=base.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner.to("cuda")
```
์ด์ ์ถ๋ก ๋จ๊ณ์ ์์ ๊ณ ๋
ธ์ด์ฆ์์ ๋
ธ์ด์ฆ๋ฅผ ์ ๊ฑฐํ๋ ๋จ๊ณ(*์ฆ* base ๋ชจ๋ธ)๋ฅผ ๊ฑฐ์ณ ์คํ๋๋ ์ง์ ์ ์ ์ํฉ๋๋ค.
```py
n_steps = 40
high_noise_frac = 0.8
```
Stable Diffusion XL base ๋ชจ๋ธ์ ํ์์คํ
0-999์ ํ์ต๋๋ฉฐ Stable Diffusion XL refiner๋ ํฌ๊ด์ ์ธ ๋ฎ์ ๋
ธ์ด์ฆ ํ์์คํ
์ธ 0-199์ base ๋ชจ๋ธ๋ก ๋ถํฐ ํ์ธํ๋๋์ด, ์ฒซ 800 ํ์์คํ
(๋์ ๋
ธ์ด์ฆ)์ base ๋ชจ๋ธ์ ์ฌ์ฉํ๊ณ ๋ง์ง๋ง 200 ํ์
์คํ
(๋ฎ์ ๋
ธ์ด์ฆ)์์ refiner๊ฐ ์ฌ์ฉ๋ฉ๋๋ค. ๋ฐ๋ผ์, `high_noise_frac`๋ 0.8๋ก ์ค์ ํ๊ณ , ๋ชจ๋ 200-999 ์คํ
(๋
ธ์ด์ฆ ์ ๊ฑฐ ํ์์คํ
์ ์ฒซ 80%)์ base ๋ชจ๋ธ์ ์ํด ์ํ๋๋ฉฐ 0-199 ์คํ
(๋
ธ์ด์ฆ ์ ๊ฑฐ ํ์์คํ
์ ๋ง์ง๋ง 20%)์ refiner ๋ชจ๋ธ์ ์ํด ์ํ๋ฉ๋๋ค.
๊ธฐ์ตํ์ธ์, ๋
ธ์ด์ฆ ์ ๊ฑฐ ์ ์ฐจ๋ **๋์ ๊ฐ**(๋์ ๋
ธ์ด์ฆ) ํ์์คํ
์์ ์์๋๊ณ , **๋ฎ์ ๊ฐ** (๋ฎ์ ๋
ธ์ด์ฆ) ํ์์คํ
์์ ๋๋ฉ๋๋ค.
์ด์ ๋ ํ์ดํ๋ผ์ธ์ ์คํํด๋ด
์๋ค. `denoising_end`๊ณผ `denoising_start`๋ฅผ ๊ฐ์ ๊ฐ์ผ๋ก ์ค์ ํ๊ณ `num_inference_steps`๋ ์์๋ก ์ ์งํฉ๋๋ค. ๋ํ base ๋ชจ๋ธ์ ์ถ๋ ฅ์ ์ ์ฌ ๊ณต๊ฐ์ ์์ด์ผ ํ๋ค๋ ์ ์ ๊ธฐ์ตํ์ธ์:
```py
prompt = "A majestic lion jumping from a big stone at night"
image = base(
prompt=prompt,
num_inference_steps=n_steps,
denoising_end=high_noise_frac,
output_type="latent",
).images
image = refiner(
prompt=prompt,
num_inference_steps=n_steps,
denoising_start=high_noise_frac,
image=image,
).images[0]
```
์ด๋ฏธ์ง๋ฅผ ์ดํด๋ณด๊ฒ ์ต๋๋ค.
| ์๋์ ์ด๋ฏธ์ง | Denoiser๋ค์ ์์๋ธ |
|---|---|
|  | 
๋์ผํ 40 ๋จ๊ณ์์ base ๋ชจ๋ธ์ ์คํํ๋ค๋ฉด, ์ด๋ฏธ์ง์ ๋ํ
์ผ(์: ์ฌ์์ ๋๊ณผ ์ฝ)์ด ๋จ์ด์ก์ ๊ฒ์
๋๋ค:
<Tip>
์์๋ธ ๋ฐฉ์์ ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ ์ค์ผ์ค๋ฌ์์ ์ ์๋ํฉ๋๋ค!
</Tip>
#### 2.) ๋
ธ์ด์ฆ๊ฐ ์์ ํ ์ ๊ฑฐ๋ ๊ธฐ๋ณธ ์ด๋ฏธ์ง์์ ์ด๋ฏธ์ง ์ถ๋ ฅ์ ์ ์ ํ๊ธฐ
์ผ๋ฐ์ ์ธ [`StableDiffusionImg2ImgPipeline`] ๋ฐฉ์์์, ๊ธฐ๋ณธ ๋ชจ๋ธ์์ ์์ฑ๋ ์์ ํ ๋
ธ์ด์ฆ๊ฐ ์ ๊ฑฐ๋ ์ด๋ฏธ์ง๋ [refiner checkpoint](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)๋ฅผ ์ฌ์ฉํด ๋ ํฅ์์ํฌ ์ ์์ต๋๋ค.
์ด๋ฅผ ์ํด, ๋ณดํต์ "base" text-to-image ํ์ดํ๋ผ์ธ์ ์ํ ํ์ image-to-image ํ์ดํ๋ผ์ธ์ผ๋ก์จ refiner๋ฅผ ์คํ์ํฌ ์ ์์ต๋๋ค. base ๋ชจ๋ธ์ ์ถ๋ ฅ์ ์ ์ฌ ๊ณต๊ฐ์ ๋จ๊ฒจ๋ ์ ์์ต๋๋ค.
```py
from diffusers import DiffusionPipeline
import torch
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
refiner = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=pipe.text_encoder_2,
vae=pipe.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner.to("cuda")
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0]
image = refiner(prompt=prompt, image=image[None, :]).images[0]
```
| ์๋์ ์ด๋ฏธ์ง | ์ ์ ๋ ์ด๋ฏธ์ง |
|---|---|
|  |  |
<Tip>
refiner๋ ๋ํ ์ธํ์ธํ
์ค์ ์ ์ ์ฌ์ฉ๋ ์ ์์ต๋๋ค. ์๋์ ๋ณด์ฌ์ง๋ฏ์ด [`StableDiffusionXLInpaintPipeline`] ํด๋์ค๋ฅผ ์ฌ์ฉํด์ ๋ง๋ค์ด๋ณด์ธ์.
</Tip>
Denoiser ์์๋ธ ์ค์ ์์ ์ธํ์ธํ
์ refiner๋ฅผ ์ฌ์ฉํ๋ ค๋ฉด ๋ค์์ ์ํํ๋ฉด ๋ฉ๋๋ค:
```py
from diffusers import StableDiffusionXLInpaintPipeline
from diffusers.utils import load_image
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
refiner = StableDiffusionXLInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=pipe.text_encoder_2,
vae=pipe.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner.to("cuda")
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = load_image(img_url).convert("RGB")
mask_image = load_image(mask_url).convert("RGB")
prompt = "A majestic tiger sitting on a bench"
num_inference_steps = 75
high_noise_frac = 0.7
image = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
num_inference_steps=num_inference_steps,
denoising_start=high_noise_frac,
output_type="latent",
).images
image = refiner(
prompt=prompt,
image=image,
mask_image=mask_image,
num_inference_steps=num_inference_steps,
denoising_start=high_noise_frac,
).images[0]
```
์ผ๋ฐ์ ์ธ SDE ์ค์ ์์ ์ธํ์ธํ
์ refiner๋ฅผ ์ฌ์ฉํ๊ธฐ ์ํด, `denoising_end`์ `denoising_start`๋ฅผ ์ ๊ฑฐํ๊ณ refiner์ ์ถ๋ก ๋จ๊ณ์ ์๋ฅผ ์ ๊ฒ ์ ํํ์ธ์.
### ๋จ๋
์ฒดํฌํฌ์ธํธ ํ์ผ / ์๋์ ํ์ผ ํ์์ผ๋ก ๋ถ๋ฌ์ค๊ธฐ
[`~diffusers.loaders.FromSingleFileMixin.from_single_file`]๋ฅผ ์ฌ์ฉํจ์ผ๋ก์จ ์๋์ ํ์ผ ํ์์ `diffusers` ํ์์ผ๋ก ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค:
```py
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
import torch
pipe = StableDiffusionXLPipeline.from_single_file(
"./sd_xl_base_1.0.safetensors", torch_dtype=torch.float16
)
pipe.to("cuda")
refiner = StableDiffusionXLImg2ImgPipeline.from_single_file(
"./sd_xl_refiner_1.0.safetensors", torch_dtype=torch.float16
)
refiner.to("cuda")
```
### ๋ชจ๋ธ offloading์ ํตํด ๋ฉ๋ชจ๋ฆฌ ์ต์ ํํ๊ธฐ
out-of-memory ์๋ฌ๊ฐ ๋๋ค๋ฉด, [`StableDiffusionXLPipeline.enable_model_cpu_offload`]์ ์ฌ์ฉํ๋ ๊ฒ์ ๊ถ์ฅํฉ๋๋ค.
```diff
- pipe.to("cuda")
+ pipe.enable_model_cpu_offload()
```
๊ทธ๋ฆฌ๊ณ
```diff
- refiner.to("cuda")
+ refiner.enable_model_cpu_offload()
```
### `torch.compile`๋ก ์ถ๋ก ์๋๋ฅผ ์ฌ๋ฆฌ๊ธฐ
`torch.compile`๋ฅผ ์ฌ์ฉํจ์ผ๋ก์จ ์ถ๋ก ์๋๋ฅผ ์ฌ๋ฆด ์ ์์ต๋๋ค. ์ด๋ **ca.** 20% ์๋ ํฅ์์ด ๋ฉ๋๋ค.
```diff
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
```
### `torch < 2.0`์ผ ๋ ์คํํ๊ธฐ
**์ฐธ๊ณ ** Stable Diffusion XL์ `torch`๊ฐ 2.0 ๋ฒ์ ๋ฏธ๋ง์์ ์คํ์ํค๊ณ ์ถ์ ๋, xformers ์ดํ
์
์ ์ฌ์ฉํด์ฃผ์ธ์:
```sh
pip install xformers
```
```diff
+pipe.enable_xformers_memory_efficient_attention()
+refiner.enable_xformers_memory_efficient_attention()
```
## StableDiffusionXLPipeline
[[autodoc]] StableDiffusionXLPipeline
- all
- __call__
## StableDiffusionXLImg2ImgPipeline
[[autodoc]] StableDiffusionXLImg2ImgPipeline
- all
- __call__
## StableDiffusionXLInpaintPipeline
[[autodoc]] StableDiffusionXLInpaintPipeline
- all
- __call__
### ๊ฐ ํ
์คํธ ์ธ์ฝ๋์ ๋ค๋ฅธ ํ๋กฌํํธ๋ฅผ ์ ๋ฌํ๊ธฐ
Stable Diffusion XL๋ ๋ ๊ฐ์ ํ
์คํธ ์ธ์ฝ๋์ ํ์ต๋์์ต๋๋ค. ๊ธฐ๋ณธ ๋์์ ๊ฐ ํ๋กฌํํธ์ ๋์ผํ ํ๋กฌํํธ๋ฅผ ์ ๋ฌํ๋ ๊ฒ์
๋๋ค. ๊ทธ๋ฌ๋ [์ผ๋ถ ์ฌ์ฉ์](https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201)๊ฐ ํ์ง์ ํฅ์์ํฌ ์ ์๋ค๊ณ ์ง์ ํ ๊ฒ์ฒ๋ผ ํ
์คํธ ์ธ์ฝ๋๋ง๋ค ๋ค๋ฅธ ํ๋กฌํํธ๋ฅผ ์ ๋ฌํ ์ ์์ต๋๋ค. ๊ทธ๋ ๊ฒ ํ๋ ค๋ฉด, `prompt_2`์ `negative_prompt_2`๋ฅผ `prompt`์ `negative_prompt`์ ์ ๋ฌํด์ผ ํฉ๋๋ค. ๊ทธ๋ ๊ฒ ํจ์ผ๋ก์จ, ์๋์ ํ๋กฌํํธ๋ค(`prompt`)๊ณผ ๋ถ์ ํ๋กฌํํธ๋ค(`negative_prompt`)๋ฅผ `ํ
์คํธ ์ธ์ฝ๋`์ ์ ๋ฌํ ๊ฒ์
๋๋ค.(๊ณต์ SDXL 0.9/1.0์ [OpenAI CLIP-ViT/L-14](https://huggingface.co/openai/clip-vit-large-patch14)์์ ๋ณผ ์ ์์ต๋๋ค.) ๊ทธ๋ฆฌ๊ณ `prompt_2`์ `negative_prompt_2`๋ `text_encoder_2`์ ์ ๋ฌ๋ฉ๋๋ค.(๊ณต์ SDXL 0.9/1.0์ [OpenCLIP-ViT/bigG-14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)์์ ๋ณผ ์ ์์ต๋๋ค.)
```py
from diffusers import StableDiffusionXLPipeline
import torch
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
# OAI CLIP-ViT/L-14์ prompt๊ฐ ์ ๋ฌ๋ฉ๋๋ค
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# OpenCLIP-ViT/bigG-14์ prompt_2๊ฐ ์ ๋ฌ๋ฉ๋๋ค
prompt_2 = "monet painting"
image = pipe(prompt=prompt, prompt_2=prompt_2).images[0]
``` | diffusers/docs/source/ko/api/pipelines/stable_diffusion/stable_diffusion_xl.md/0 | {
"file_path": "diffusers/docs/source/ko/api/pipelines/stable_diffusion/stable_diffusion_xl.md",
"repo_id": "diffusers",
"token_count": 10964
} | 122 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# xFormers ์ค์นํ๊ธฐ
์ถ๋ก ๊ณผ ํ์ต ๋ชจ๋์ [xFormers](https://github.com/facebookresearch/xformers)๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์ด ์ข์ต๋๋ค.
์์ฒด ํ
์คํธ๋ก ์ดํ
์
๋ธ๋ก์์ ์ํ๋ ์ต์ ํ๊ฐ ๋ ๋น ๋ฅธ ์๋์ ์ ์ ๋ฉ๋ชจ๋ฆฌ ์๋น๋ฅผ ํ์ธํ์ต๋๋ค.
2023๋
1์์ ์ถ์๋ xFormers ๋ฒ์ '0.0.16'๋ถํฐ ์ฌ์ ๋น๋๋ pip wheel์ ์ฌ์ฉํ์ฌ ์ฝ๊ฒ ์ค์นํ ์ ์์ต๋๋ค:
```bash
pip install xformers
```
<Tip>
xFormers PIP ํจํค์ง์๋ ์ต์ ๋ฒ์ ์ PyTorch(xFormers 0.0.16์ 1.13.1)๊ฐ ํ์ํฉ๋๋ค. ์ด์ ๋ฒ์ ์ PyTorch๋ฅผ ์ฌ์ฉํด์ผ ํ๋ ๊ฒฝ์ฐ [ํ๋ก์ ํธ ์ง์นจ](https://github.com/facebookresearch/xformers#installing-xformers)์ ์์ค๋ฅผ ์ฌ์ฉํด xFormers๋ฅผ ์ค์นํ๋ ๊ฒ์ด ์ข์ต๋๋ค.
</Tip>
xFormers๋ฅผ ์ค์นํ๋ฉด, [์ฌ๊ธฐ](fp16#memory-efficient-attention)์ ์ค๋ช
ํ ๊ฒ์ฒ๋ผ 'enable_xformers_memory_efficient_attention()'์ ์ฌ์ฉํ์ฌ ์ถ๋ก ์๋๋ฅผ ๋์ด๊ณ ๋ฉ๋ชจ๋ฆฌ ์๋น๋ฅผ ์ค์ผ ์ ์์ต๋๋ค.
<Tip warning={true}>
[์ด ์ด์](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212)์ ๋ฐ๋ฅด๋ฉด xFormers `v0.0.16`์์ GPU๋ฅผ ์ฌ์ฉํ ํ์ต(ํ์ธ ํ๋ ๋๋ Dreambooth)์ ํ ์ ์์ต๋๋ค. ํด๋น ๋ฌธ์ ๊ฐ ๋ฐ๊ฒฌ๋๋ฉด. ํด๋น ์ฝ๋ฉํธ๋ฅผ ์ฐธ๊ณ ํด development ๋ฒ์ ์ ์ค์นํ์ธ์.
</Tip>
| diffusers/docs/source/ko/optimization/xformers.md/0 | {
"file_path": "diffusers/docs/source/ko/optimization/xformers.md",
"repo_id": "diffusers",
"token_count": 1049
} | 123 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Overview
๐งจย Diffusers์ ์ค์ ๊ฑธ ํ์ํฉ๋๋ค! ์ฌ๋ฌ๋ถ์ด diffusion ๋ชจ๋ธ๊ณผ ์์ฑ AI๋ฅผ ์ฒ์ ์ ํ๊ณ , ๋ ๋ง์ ๊ฑธ ๋ฐฐ์ฐ๊ณ ์ถ์ผ์
จ๋ค๋ฉด ์ ๋๋ก ์ฐพ์์ค์
จ์ต๋๋ค. ์ด ํํ ๋ฆฌ์ผ์ diffusion model์ ์ฌ๋ฌ๋ถ์๊ฒ ์ ํํ๊ฒ ์๊ฐํ๊ณ , ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๊ธฐ๋ณธ ์ฌํญ(ํต์ฌ ๊ตฌ์ฑ์์์ ๐งจย Diffusers ์ฌ์ฉ๋ฒ)์ ์ดํดํ๋ ๋ฐ ๋์์ด ๋๋๋ก ์ค๊ณ๋์์ต๋๋ค.
์ฌ๋ฌ๋ถ์ ์ด ํํ ๋ฆฌ์ผ์ ํตํด ๋น ๋ฅด๊ฒ ์์ฑํ๊ธฐ ์ํด์ ์ถ๋ก ํ์ดํ๋ผ์ธ์ ์ด๋ป๊ฒ ์ฌ์ฉํด์ผ ํ๋์ง, ๊ทธ๋ฆฌ๊ณ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ modular toolbox์ฒ๋ผ ์ด์ฉํด์ ์ฌ๋ฌ๋ถ๋ง์ diffusion system์ ๊ตฌ์ถํ ์ ์๋๋ก ํ์ดํ๋ผ์ธ์ ๋ถํดํ๋ ๋ฒ์ ๋ฐฐ์ธ ์ ์์ต๋๋ค. ๋ค์ ๋จ์์์๋ ์ฌ๋ฌ๋ถ์ด ์ํ๋ ๊ฒ์ ์์ฑํ๊ธฐ ์ํด ์์ ๋ง์ diffusion model์ ํ์ตํ๋ ๋ฐฉ๋ฒ์ ๋ฐฐ์ฐ๊ฒ ๋ฉ๋๋ค.
ํํ ๋ฆฌ์ผ์ ์๋ฃํ๋ค๋ฉด ์ฌ๋ฌ๋ถ์ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ง์ ํ์ํ๊ณ , ์์ ์ ํ๋ก์ ํธ์ ์ ํ๋ฆฌ์ผ์ด์
์ ์ ์ฉํ ์คํฌ๋ค์ ์ต๋ํ ์ ์์ ๊ฒ๋๋ค.
[Discord](https://discord.com/invite/JfAtkvEtRb)๋ [ํฌ๋ผ](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) ์ปค๋ฎค๋ํฐ์ ์์ ๋กญ๊ฒ ์ฐธ์ฌํด์ ๋ค๋ฅธ ์ฌ์ฉ์์ ๊ฐ๋ฐ์๋ค๊ณผ ๊ต๋ฅํ๊ณ ํ์
ํด ๋ณด์ธ์!
์ ์ง๊ธ๋ถํฐ diffusing์ ์์ํด ๋ณด๊ฒ ์ต๋๋ค! ๐งจ | diffusers/docs/source/ko/tutorials/tutorial_overview.md/0 | {
"file_path": "diffusers/docs/source/ko/tutorials/tutorial_overview.md",
"repo_id": "diffusers",
"token_count": 1209
} | 124 |
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# JAX / Flax์์์ ๐งจ Stable Diffusion!
[[open-in-colab]]
๐ค Hugging Face [Diffusers] (https://github.com/huggingface/diffusers) ๋ ๋ฒ์ 0.5.1๋ถํฐ Flax๋ฅผ ์ง์ํฉ๋๋ค! ์ด๋ฅผ ํตํด Colab, Kaggle, Google Cloud Platform์์ ์ฌ์ฉํ ์ ์๋ ๊ฒ์ฒ๋ผ Google TPU์์ ์ด๊ณ ์ ์ถ๋ก ์ด ๊ฐ๋ฅํฉ๋๋ค.
์ด ๋
ธํธ๋ถ์ JAX / Flax๋ฅผ ์ฌ์ฉํด ์ถ๋ก ์ ์คํํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. Stable Diffusion์ ์๋ ๋ฐฉ์์ ๋ํ ์์ธํ ๋ด์ฉ์ ์ํ๊ฑฐ๋ GPU์์ ์คํํ๋ ค๋ฉด ์ด [๋
ธํธ๋ถ] ](https://huggingface.co/docs/diffusers/stable_diffusion)์ ์ฐธ์กฐํ์ธ์.
๋จผ์ , TPU ๋ฐฑ์๋๋ฅผ ์ฌ์ฉํ๊ณ ์๋์ง ํ์ธํฉ๋๋ค. Colab์์ ์ด ๋
ธํธ๋ถ์ ์คํํ๋ ๊ฒฝ์ฐ, ๋ฉ๋ด์์ ๋ฐํ์์ ์ ํํ ๋ค์ "๋ฐํ์ ์ ํ ๋ณ๊ฒฝ" ์ต์
์ ์ ํํ ๋ค์ ํ๋์จ์ด ๊ฐ์๊ธฐ ์ค์ ์์ TPU๋ฅผ ์ ํํฉ๋๋ค.
JAX๋ TPU ์ ์ฉ์ ์๋์ง๋ง ๊ฐ TPU ์๋ฒ์๋ 8๊ฐ์ TPU ๊ฐ์๊ธฐ๊ฐ ๋ณ๋ ฌ๋ก ์๋ํ๊ธฐ ๋๋ฌธ์ ํด๋น ํ๋์จ์ด์์ ๋ ๋น์ ๋ฐํ๋ค๋ ์ ์ ์์๋์ธ์.
## Setup
๋จผ์ diffusers๊ฐ ์ค์น๋์ด ์๋์ง ํ์ธํฉ๋๋ค.
```bash
!pip install jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy
!pip install diffusers
```
```python
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
import jax
```
```python
num_devices = jax.device_count()
device_type = jax.devices()[0].device_kind
print(f"Found {num_devices} JAX devices of type {device_type}.")
assert (
"TPU" in device_type
), "Available device is not a TPU, please select TPU from Edit > Notebook settings > Hardware accelerator"
```
```python out
Found 8 JAX devices of type Cloud TPU.
```
๊ทธ๋ฐ ๋ค์ ๋ชจ๋ dependencies๋ฅผ ๊ฐ์ ธ์ต๋๋ค.
```python
import numpy as np
import jax
import jax.numpy as jnp
from pathlib import Path
from jax import pmap
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from PIL import Image
from huggingface_hub import notebook_login
from diffusers import FlaxStableDiffusionPipeline
```
## ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ
TPU ์ฅ์น๋ ํจ์จ์ ์ธ half-float ์ ํ์ธ bfloat16์ ์ง์ํฉ๋๋ค. ํ
์คํธ์๋ ์ด ์ ํ์ ์ฌ์ฉํ์ง๋ง ๋์ float32๋ฅผ ์ฌ์ฉํ์ฌ ์ ์ฒด ์ ๋ฐ๋(full precision)๋ฅผ ์ฌ์ฉํ ์๋ ์์ต๋๋ค.
```python
dtype = jnp.bfloat16
```
Flax๋ ํจ์ํ ํ๋ ์์ํฌ์ด๋ฏ๋ก ๋ชจ๋ธ์ ๋ฌด์ํ(stateless)ํ์ด๋ฉฐ ๋งค๊ฐ๋ณ์๋ ๋ชจ๋ธ ์ธ๋ถ์ ์ ์ฅ๋ฉ๋๋ค. ์ฌ์ ํ์ต๋ Flax ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ฉด ํ์ดํ๋ผ์ธ ์์ฒด์ ๋ชจ๋ธ ๊ฐ์ค์น(๋๋ ๋งค๊ฐ๋ณ์)๊ฐ ๋ชจ๋ ๋ฐํ๋ฉ๋๋ค. ์ ํฌ๋ bf16 ๋ฒ์ ์ ๊ฐ์ค์น๋ฅผ ์ฌ์ฉํ๊ณ ์์ผ๋ฏ๋ก ์ ํ ๊ฒฝ๊ณ ๊ฐ ํ์๋์ง๋ง ๋ฌด์ํด๋ ๋ฉ๋๋ค.
```python
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
variant="bf16",
dtype=dtype,
)
```
## ์ถ๋ก
TPU์๋ ์ผ๋ฐ์ ์ผ๋ก 8๊ฐ์ ๋๋ฐ์ด์ค๊ฐ ๋ณ๋ ฌ๋ก ์๋ํ๋ฏ๋ก ๋ณด์ ํ ๋๋ฐ์ด์ค ์๋งํผ ํ๋กฌํํธ๋ฅผ ๋ณต์ ํฉ๋๋ค. ๊ทธ๋ฐ ๋ค์ ๊ฐ๊ฐ ํ๋์ ์ด๋ฏธ์ง ์์ฑ์ ๋ด๋นํ๋ 8๊ฐ์ ๋๋ฐ์ด์ค์์ ํ ๋ฒ์ ์ถ๋ก ์ ์ํํฉ๋๋ค. ๋ฐ๋ผ์ ํ๋์ ์นฉ์ด ํ๋์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ๊ฑธ๋ฆฌ๋ ์๊ฐ๊ณผ ๋์ผํ ์๊ฐ์ 8๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ์ป์ ์ ์์ต๋๋ค.
ํ๋กฌํํธ๋ฅผ ๋ณต์ ํ๊ณ ๋๋ฉด ํ์ดํ๋ผ์ธ์ `prepare_inputs` ํจ์๋ฅผ ํธ์ถํ์ฌ ํ ํฐํ๋ ํ
์คํธ ID๋ฅผ ์ป์ต๋๋ค. ํ ํฐํ๋ ํ
์คํธ์ ๊ธธ์ด๋ ๊ธฐ๋ณธ CLIP ํ
์คํธ ๋ชจ๋ธ์ ๊ตฌ์ฑ์ ๋ฐ๋ผ 77ํ ํฐ์ผ๋ก ์ค์ ๋ฉ๋๋ค.
```python
prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic"
prompt = [prompt] * jax.device_count()
prompt_ids = pipeline.prepare_inputs(prompt)
prompt_ids.shape
```
```python out
(8, 77)
```
### ๋ณต์ฌ(Replication) ๋ฐ ์ ๋ ฌํ
๋ชจ๋ธ ๋งค๊ฐ๋ณ์์ ์
๋ ฅ๊ฐ์ ์ฐ๋ฆฌ๊ฐ ๋ณด์ ํ 8๊ฐ์ ๋ณ๋ ฌ ์ฅ์น์ ๋ณต์ฌ(Replication)๋์ด์ผ ํฉ๋๋ค. ๋งค๊ฐ๋ณ์ ๋์
๋๋ฆฌ๋ `flax.jax_utils.replicate`(๋์
๋๋ฆฌ๋ฅผ ์ํํ๋ฉฐ ๊ฐ์ค์น์ ๋ชจ์์ ๋ณ๊ฒฝํ์ฌ 8๋ฒ ๋ฐ๋ณตํ๋ ํจ์)๋ฅผ ์ฌ์ฉํ์ฌ ๋ณต์ฌ๋ฉ๋๋ค. ๋ฐฐ์ด์ `shard`๋ฅผ ์ฌ์ฉํ์ฌ ๋ณต์ ๋ฉ๋๋ค.
```python
p_params = replicate(params)
```
```python
prompt_ids = shard(prompt_ids)
prompt_ids.shape
```
```python out
(8, 1, 77)
```
์ด shape์ 8๊ฐ์ ๋๋ฐ์ด์ค ๊ฐ๊ฐ์ด shape `(1, 77)`์ jnp ๋ฐฐ์ด์ ์
๋ ฅ๊ฐ์ผ๋ก ๋ฐ๋๋ค๋ ์๋ฏธ์
๋๋ค. ์ฆ 1์ ๋๋ฐ์ด์ค๋น batch(๋ฐฐ์น) ํฌ๊ธฐ์
๋๋ค. ๋ฉ๋ชจ๋ฆฌ๊ฐ ์ถฉ๋ถํ TPU์์๋ ํ ๋ฒ์ ์ฌ๋ฌ ์ด๋ฏธ์ง(์นฉ๋น)๋ฅผ ์์ฑํ๋ ค๋ ๊ฒฝ์ฐ 1๋ณด๋ค ํด ์ ์์ต๋๋ค.
์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ค๋น๊ฐ ๊ฑฐ์ ์๋ฃ๋์์ต๋๋ค! ์ด์ ์์ฑ ํจ์์ ์ ๋ฌํ ๋์ ์์ฑ๊ธฐ๋ง ๋ง๋ค๋ฉด ๋ฉ๋๋ค. ์ด๊ฒ์ ๋์๋ฅผ ๋ค๋ฃจ๋ ๋ชจ๋ ํจ์์ ๋์ ์์ฑ๊ธฐ๊ฐ ์์ด์ผ ํ๋ค๋, ๋์์ ๋ํด ๋งค์ฐ ์ง์งํ๊ณ ๋
๋จ์ ์ธ Flax์ ํ์ค ์ ์ฐจ์
๋๋ค. ์ด๋ ๊ฒ ํ๋ฉด ์ฌ๋ฌ ๋ถ์ฐ๋ ๊ธฐ๊ธฐ์์ ํ๋ จํ ๋์๋ ์ฌํ์ฑ์ด ๋ณด์ฅ๋ฉ๋๋ค.
์๋ ํฌํผ ํจ์๋ ์๋๋ฅผ ์ฌ์ฉํ์ฌ ๋์ ์์ฑ๊ธฐ๋ฅผ ์ด๊ธฐํํฉ๋๋ค. ๋์ผํ ์๋๋ฅผ ์ฌ์ฉํ๋ ํ ์ ํํ ๋์ผํ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค. ๋์ค์ ๋
ธํธ๋ถ์์ ๊ฒฐ๊ณผ๋ฅผ ํ์ํ ๋์ ๋ค๋ฅธ ์๋๋ฅผ ์์ ๋กญ๊ฒ ์ฌ์ฉํ์ธ์.
```python
def create_key(seed=0):
return jax.random.PRNGKey(seed)
```
rng๋ฅผ ์ป์ ๋ค์ 8๋ฒ '๋ถํ 'ํ์ฌ ๊ฐ ๋๋ฐ์ด์ค๊ฐ ๋ค๋ฅธ ์ ๋๋ ์ดํฐ๋ฅผ ์์ ํ๋๋ก ํฉ๋๋ค. ๋ฐ๋ผ์ ๊ฐ ๋๋ฐ์ด์ค๋ง๋ค ๋ค๋ฅธ ์ด๋ฏธ์ง๊ฐ ์์ฑ๋๋ฉฐ ์ ์ฒด ํ๋ก์ธ์ค๋ฅผ ์ฌํํ ์ ์์ต๋๋ค.
```python
rng = create_key(0)
rng = jax.random.split(rng, jax.device_count())
```
JAX ์ฝ๋๋ ๋งค์ฐ ๋น ๋ฅด๊ฒ ์คํ๋๋ ํจ์จ์ ์ธ ํํ์ผ๋ก ์ปดํ์ผํ ์ ์์ต๋๋ค. ํ์ง๋ง ํ์ ํธ์ถ์์ ๋ชจ๋ ์
๋ ฅ์ด ๋์ผํ ๋ชจ์์ ๊ฐ๋๋ก ํด์ผ ํ๋ฉฐ, ๊ทธ๋ ์ง ์์ผ๋ฉด JAX๊ฐ ์ฝ๋๋ฅผ ๋ค์ ์ปดํ์ผํด์ผ ํ๋ฏ๋ก ์ต์ ํ๋ ์๋๋ฅผ ํ์ฉํ ์ ์์ต๋๋ค.
`jit = True`๋ฅผ ์ธ์๋ก ์ ๋ฌํ๋ฉด Flax ํ์ดํ๋ผ์ธ์ด ์ฝ๋๋ฅผ ์ปดํ์ผํ ์ ์์ต๋๋ค. ๋ํ ๋ชจ๋ธ์ด ์ฌ์ฉ ๊ฐ๋ฅํ 8๊ฐ์ ๋๋ฐ์ด์ค์์ ๋ณ๋ ฌ๋ก ์คํ๋๋๋ก ๋ณด์ฅํฉ๋๋ค.
๋ค์ ์
์ ์ฒ์ ์คํํ๋ฉด ์ปดํ์ผํ๋ ๋ฐ ์๊ฐ์ด ์ค๋ ๊ฑธ๋ฆฌ์ง๋ง ์ดํ ํธ์ถ(์
๋ ฅ์ด ๋ค๋ฅธ ๊ฒฝ์ฐ์๋)์ ํจ์ฌ ๋นจ๋ผ์ง๋๋ค. ์๋ฅผ ๋ค์ด, ํ
์คํธํ์ ๋ TPU v2-8์์ ์ปดํ์ผํ๋ ๋ฐ 1๋ถ ์ด์ ๊ฑธ๋ฆฌ์ง๋ง ์ดํ ์ถ๋ก ์คํ์๋ ์ฝ 7์ด๊ฐ ๊ฑธ๋ฆฝ๋๋ค.
```
%%time
images = pipeline(prompt_ids, p_params, rng, jit=True)[0]
```
```python out
CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s
Wall time: 1min 29s
```
๋ฐํ๋ ๋ฐฐ์ด์ shape์ `(8, 1, 512, 512, 3)`์
๋๋ค. ์ด๋ฅผ ์ฌ๊ตฌ์ฑํ์ฌ ๋ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ 512 ร 512 ร 3์ ์ด๋ฏธ์ง 8๊ฐ๋ฅผ ์ป์ ๋ค์ PIL๋ก ๋ณํํฉ๋๋ค.
```python
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
```
### ์๊ฐํ
์ด๋ฏธ์ง๋ฅผ ๊ทธ๋ฆฌ๋์ ํ์ํ๋ ๋์ฐ๋ฏธ ํจ์๋ฅผ ๋ง๋ค์ด ๋ณด๊ฒ ์ต๋๋ค.
```python
def image_grid(imgs, rows, cols):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
```
```python
image_grid(images, 2, 4)
```

## ๋ค๋ฅธ ํ๋กฌํํธ ์ฌ์ฉ
๋ชจ๋ ๋๋ฐ์ด์ค์์ ๋์ผํ ํ๋กฌํํธ๋ฅผ ๋ณต์ ํ ํ์๋ ์์ต๋๋ค. ํ๋กฌํํธ 2๊ฐ๋ฅผ ๊ฐ๊ฐ 4๋ฒ์ฉ ์์ฑํ๊ฑฐ๋ ํ ๋ฒ์ 8๊ฐ์ ์๋ก ๋ค๋ฅธ ํ๋กฌํํธ๋ฅผ ์์ฑํ๋ ๋ฑ ์ํ๋ ๊ฒ์ ๋ฌด์์ด๋ ํ ์ ์์ต๋๋ค. ํ๋ฒ ํด๋ณด์ธ์!
๋จผ์ ์
๋ ฅ ์ค๋น ์ฝ๋๋ฅผ ํธ๋ฆฌํ ํจ์๋ก ๋ฆฌํฉํฐ๋งํ๊ฒ ์ต๋๋ค:
```python
prompts = [
"Labrador in the style of Hokusai",
"Painting of a squirrel skating in New York",
"HAL-9000 in the style of Van Gogh",
"Times Square under water, with fish and a dolphin swimming around",
"Ancient Roman fresco showing a man working on his laptop",
"Close-up photograph of young black woman against urban background, high quality, bokeh",
"Armchair in the shape of an avocado",
"Clown astronaut in space, with Earth in the background",
]
```
```python
prompt_ids = pipeline.prepare_inputs(prompts)
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, p_params, rng, jit=True).images
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
image_grid(images, 2, 4)
```

## ๋ณ๋ ฌํ(parallelization)๋ ์ด๋ป๊ฒ ์๋ํ๋๊ฐ?
์์ `diffusers` Flax ํ์ดํ๋ผ์ธ์ด ๋ชจ๋ธ์ ์๋์ผ๋ก ์ปดํ์ผํ๊ณ ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ ๊ธฐ๊ธฐ์์ ๋ณ๋ ฌ๋ก ์คํํ๋ค๊ณ ๋ง์๋๋ ธ์ต๋๋ค. ์ด์ ๊ทธ ํ๋ก์ธ์ค๋ฅผ ๊ฐ๋ตํ๊ฒ ์ดํด๋ณด๊ณ ์๋ ๋ฐฉ์์ ๋ณด์ฌ๋๋ฆฌ๊ฒ ์ต๋๋ค.
JAX ๋ณ๋ ฌํ๋ ์ฌ๋ฌ ๊ฐ์ง ๋ฐฉ๋ฒ์ผ๋ก ์ํํ ์ ์์ต๋๋ค. ๊ฐ์ฅ ์ฌ์ด ๋ฐฉ๋ฒ์ jax.pmap ํจ์๋ฅผ ์ฌ์ฉํ์ฌ ๋จ์ผ ํ๋ก๊ทธ๋จ, ๋ค์ค ๋ฐ์ดํฐ(SPMD) ๋ณ๋ ฌํ๋ฅผ ๋ฌ์ฑํ๋ ๊ฒ์
๋๋ค. ์ฆ, ๋์ผํ ์ฝ๋์ ๋ณต์ฌ๋ณธ์ ๊ฐ๊ฐ ๋ค๋ฅธ ๋ฐ์ดํฐ ์
๋ ฅ์ ๋ํด ์ฌ๋ฌ ๊ฐ ์คํํ๋ ๊ฒ์
๋๋ค. ๋ ์ ๊ตํ ์ ๊ทผ ๋ฐฉ์๋ ๊ฐ๋ฅํ๋ฏ๋ก ๊ด์ฌ์ด ์์ผ์๋ค๋ฉด [JAX ๋ฌธ์](https://jax.readthedocs.io/en/latest/index.html)์ [`pjit` ํ์ด์ง](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html?highlight=pjit)์์ ์ด ์ฃผ์ ๋ฅผ ์ดํด๋ณด์๊ธฐ ๋ฐ๋๋๋ค!
`jax.pmap`์ ๋ ๊ฐ์ง ๊ธฐ๋ฅ์ ์ํํฉ๋๋ค:
- `jax.jit()`๋ฅผ ํธ์ถํ ๊ฒ์ฒ๋ผ ์ฝ๋๋ฅผ ์ปดํ์ผ(๋๋ `jit`)ํฉ๋๋ค. ์ด ์์
์ `pmap`์ ํธ์ถํ ๋๊ฐ ์๋๋ผ pmapped ํจ์๊ฐ ์ฒ์ ํธ์ถ๋ ๋ ์ํ๋ฉ๋๋ค.
- ์ปดํ์ผ๋ ์ฝ๋๊ฐ ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ ๊ธฐ๊ธฐ์์ ๋ณ๋ ฌ๋ก ์คํ๋๋๋ก ํฉ๋๋ค.
์๋ ๋ฐฉ์์ ๋ณด์ฌ๋๋ฆฌ๊ธฐ ์ํด ์ด๋ฏธ์ง ์์ฑ์ ์คํํ๋ ๋น๊ณต๊ฐ ๋ฉ์๋์ธ ํ์ดํ๋ผ์ธ์ `_generate` ๋ฉ์๋๋ฅผ `pmap`ํฉ๋๋ค. ์ด ๋ฉ์๋๋ ํฅํ `Diffusers` ๋ฆด๋ฆฌ์ค์์ ์ด๋ฆ์ด ๋ณ๊ฒฝ๋๊ฑฐ๋ ์ ๊ฑฐ๋ ์ ์๋ค๋ ์ ์ ์ ์ํ์ธ์.
```python
p_generate = pmap(pipeline._generate)
```
`pmap`์ ์ฌ์ฉํ ํ ์ค๋น๋ ํจ์ `p_generate`๋ ๊ฐ๋
์ ์ผ๋ก ๋ค์์ ์ํํฉ๋๋ค:
* ๊ฐ ์ฅ์น์์ ๊ธฐ๋ณธ ํจ์ `pipeline._generate`์ ๋ณต์ฌ๋ณธ์ ํธ์ถํฉ๋๋ค.
* ๊ฐ ์ฅ์น์ ์
๋ ฅ ์ธ์์ ๋ค๋ฅธ ๋ถ๋ถ์ ๋ณด๋
๋๋ค. ์ด๊ฒ์ด ๋ฐ๋ก ์ค๋ฉ์ด ์ฌ์ฉ๋๋ ์ด์ ์
๋๋ค. ์ด ๊ฒฝ์ฐ `prompt_ids`์ shape์ `(8, 1, 77, 768)`์
๋๋ค. ์ด ๋ฐฐ์ด์ 8๊ฐ๋ก ๋ถํ ๋๊ณ `_generate`์ ๊ฐ ๋ณต์ฌ๋ณธ์ `(1, 77, 768)`์ shape์ ๊ฐ์ง ์
๋ ฅ์ ๋ฐ๊ฒ ๋ฉ๋๋ค.
๋ณ๋ ฌ๋ก ํธ์ถ๋๋ค๋ ์ฌ์ค์ ์์ ํ ๋ฌด์ํ๊ณ `_generate`๋ฅผ ์ฝ๋ฉํ ์ ์์ต๋๋ค. batch(๋ฐฐ์น) ํฌ๊ธฐ(์ด ์์ ์์๋ `1`)์ ์ฝ๋์ ์ ํฉํ ์ฐจ์๋ง ์ ๊ฒฝ ์ฐ๋ฉด ๋๋ฉฐ, ๋ณ๋ ฌ๋ก ์๋ํ๊ธฐ ์ํด ์๋ฌด๊ฒ๋ ๋ณ๊ฒฝํ ํ์๊ฐ ์์ต๋๋ค.
ํ์ดํ๋ผ์ธ ํธ์ถ์ ์ฌ์ฉํ ๋์ ๋ง์ฐฌ๊ฐ์ง๋ก, ๋ค์ ์
์ ์ฒ์ ์คํํ ๋๋ ์๊ฐ์ด ๊ฑธ๋ฆฌ์ง๋ง ๊ทธ ์ดํ์๋ ํจ์ฌ ๋นจ๋ผ์ง๋๋ค.
```
%%time
images = p_generate(prompt_ids, p_params, rng)
images = images.block_until_ready()
images.shape
```
```python out
CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s
Wall time: 1min 15s
```
```python
images.shape
```
```python out
(8, 1, 512, 512, 3)
```
JAX๋ ๋น๋๊ธฐ ๋์คํจ์น๋ฅผ ์ฌ์ฉํ๊ณ ๊ฐ๋ฅํ ํ ๋นจ๋ฆฌ ์ ์ด๊ถ์ Python ๋ฃจํ์ ๋ฐํํ๊ธฐ ๋๋ฌธ์ ์ถ๋ก ์๊ฐ์ ์ ํํ๊ฒ ์ธก์ ํ๊ธฐ ์ํด `block_until_ready()`๋ฅผ ์ฌ์ฉํฉ๋๋ค. ์์ง ๊ตฌ์ฒดํ๋์ง ์์ ๊ณ์ฐ ๊ฒฐ๊ณผ๋ฅผ ์ฌ์ฉํ๋ ค๋ ๊ฒฝ์ฐ ์๋์ผ๋ก ์ฐจ๋จ์ด ์ํ๋๋ฏ๋ก ์ฝ๋์์ ์ด ํจ์๋ฅผ ์ฌ์ฉํ ํ์๊ฐ ์์ต๋๋ค. | diffusers/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.md",
"repo_id": "diffusers",
"token_count": 8474
} | 125 |
# ๆททๅๆจ็ API ๅ่
## ่ฟ็จ่งฃ็
[[autodoc]] utils.remote_utils.remote_decode
## ่ฟ็จ็ผ็
[[autodoc]] utils.remote_utils.remote_encode | diffusers/docs/source/zh/hybrid_inference/api_reference.md/0 | {
"file_path": "diffusers/docs/source/zh/hybrid_inference/api_reference.md",
"repo_id": "diffusers",
"token_count": 83
} | 126 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.