text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Generated content DO NOT EDIT from .. import onnx ONNXModel = onnx.ONNXModel ONNXTensorDescription = onnx.ONNXTensorDescription
candle/candle-pyo3/py_src/candle/onnx/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/onnx/__init__.py", "repo_id": "candle", "token_count": 46 }
57
import candle from candle import Tensor from candle.nn import Linear def test_linear_layer_can_be_constructed(): linear = Linear(10, 10) assert linear is not None def test_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536) def test_quantized_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_quantized_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536)
candle/candle-pyo3/tests/bindings/test_linear.py/0
{ "file_path": "candle/candle-pyo3/tests/bindings/test_linear.py", "repo_id": "candle", "token_count": 431 }
58
//! Implementation of the ChatGLM2/3 models from THUDM. //! //! - 💻 [Github](https://github.com/THUDM/ChatGLM3) ChatGLM3: Advancing Multilingual Conversational Language Models with High-Quality Data //! - 💻 [Github](https://github.com/THUDM/ChatGLM2-6B) ChatGLM2-6B. //! use crate::models::with_tracing::{linear_b as linear, Linear}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug, Clone)] pub struct Config { pub num_layers: usize, pub padded_vocab_size: usize, pub hidden_size: usize, pub ffn_hidden_size: usize, pub kv_channels: usize, pub num_attention_heads: usize, pub seq_length: usize, pub layernorm_epsilon: f64, pub rmsnorm: bool, pub apply_residual_connection_post_layernorm: bool, pub post_layer_norm: bool, pub add_bias_linear: bool, pub add_qkv_bias: bool, pub bias_dropout_fusion: bool, pub multi_query_attention: bool, pub multi_query_group_num: usize, pub apply_query_key_layer_scaling: bool, pub attention_softmax_in_fp32: bool, pub fp32_residual_connection: bool, } impl Config { pub fn glm3_6b() -> Self { Self { num_layers: 28, padded_vocab_size: 65024, hidden_size: 4096, ffn_hidden_size: 13696, kv_channels: 128, num_attention_heads: 32, seq_length: 8192, layernorm_epsilon: 1e-5, rmsnorm: true, apply_residual_connection_post_layernorm: false, post_layer_norm: true, add_bias_linear: false, add_qkv_bias: true, bias_dropout_fusion: true, multi_query_attention: true, multi_query_group_num: 2, apply_query_key_layer_scaling: true, attention_softmax_in_fp32: true, fp32_residual_connection: false, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { cache: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> { let rotary_dim = cfg.kv_channels; let n_elem = rotary_dim / 2; let inv_freq: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10_000f64.powf(i as f64 / n_elem as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)? .to_dtype(dtype)? .reshape((cfg.seq_length, 1))?; let freqs = t.matmul(&inv_freq)?; let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?; Ok(Self { cache }) } fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (seqlen, _b, np, _hn) = xs.dims4()?; let cache = self.cache.narrow(0, seqlen_offset, seqlen)?; let rot_dim = cache.dim(D::Minus2)? * 2; let (xs, xs_pass) = ( xs.narrow(D::Minus1, 0, rot_dim)?, xs.narrow(D::Minus1, rot_dim, rot_dim)?, ); let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?; let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?; let (xshaped0, xshaped1) = ( xshaped.i((.., .., .., .., 0))?, xshaped.i((.., .., .., .., 1))?, ); let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?); let xs_out = Tensor::stack( &[ (xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?, (xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?, ], D::Minus1, )?; let xs_out = xs_out.flatten_from(3)?; Tensor::cat(&[xs_out, xs_pass], D::Minus1) } } #[derive(Debug, Clone)] struct CoreAttention { coeff: Option<f64>, norm_factor: f64, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } impl CoreAttention { fn new(layer_number: usize, cfg: &Config) -> Result<Self> { let norm_factor = (cfg.kv_channels as f64).sqrt(); let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling { let coeff = f64::max(1.0, layer_number as f64); (norm_factor * coeff, Some(coeff)) } else { (norm_factor, None) }; Ok(Self { coeff, norm_factor }) } fn forward( &self, query_layer: &Tensor, key_layer: &Tensor, value_layer: &Tensor, attention_mask: &Option<Tensor>, ) -> Result<Tensor> { let output_size = ( query_layer.dim(1)?, // b query_layer.dim(2)?, // np query_layer.dim(0)?, // sq key_layer.dim(0)?, // sk ); let query_layer = query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?; let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?; let matmul_result = Tensor::matmul( &query_layer.transpose(0, 1)?, &key_layer.transpose(0, 1)?.transpose(1, 2)?, )?; let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?; let matmul_result = match self.coeff { None => matmul_result, Some(coeff) => (matmul_result * coeff)?, }; let attention_scores = match attention_mask { Some(mask) => masked_fill( &matmul_result, &mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?, f32::NEG_INFINITY, )?, None => matmul_result, }; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let output_size = ( value_layer.dim(1)?, value_layer.dim(2)?, query_layer.dim(0)?, value_layer.dim(3)?, ); let value_layer = value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?; let attention_probs = attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?; let context_layer = Tensor::matmul(&attention_probs, &value_layer.transpose(0, 1)?)?; let context_layer = context_layer.reshape(output_size)?; let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?; context_layer.flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SelfAttention { query_key_value: Linear, core_attention: CoreAttention, dense: Linear, multi_query_attention: bool, num_attention_heads_per_partition: usize, num_multi_query_groups_per_partition: usize, hidden_size_per_attention_head: usize, kv_cache: Option<(Tensor, Tensor)>, } impl SelfAttention { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let projection_size = cfg.kv_channels * cfg.num_attention_heads; let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads; let qkv_hidden_size = if cfg.multi_query_attention { projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num } else { 3 * projection_size }; let query_key_value = linear( cfg.hidden_size, qkv_hidden_size, cfg.add_bias_linear || cfg.add_qkv_bias, vb.pp("query_key_value"), )?; let core_attention = CoreAttention::new(layer_number, cfg)?; let dense = linear( cfg.hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense"), )?; Ok(Self { query_key_value, core_attention, dense, multi_query_attention: cfg.multi_query_attention, num_attention_heads_per_partition: cfg.num_attention_heads, num_multi_query_groups_per_partition: cfg.multi_query_group_num, hidden_size_per_attention_head: cfg.kv_channels, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let mixed_x_layer = xs.apply(&self.query_key_value)?; if !self.multi_query_attention { candle::bail!("only multi_query_attention=true is supported") } let hpa = self.hidden_size_per_attention_head; let query_layer = mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?; let key_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let value_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa + self.num_multi_query_groups_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let query_layer = query_layer.reshape(( query_layer.dim(0)?, query_layer.dim(1)?, self.num_attention_heads_per_partition, hpa, ))?; let key_layer = key_layer.reshape(( key_layer.dim(0)?, key_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; let value_layer = value_layer.reshape(( value_layer.dim(0)?, value_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; // Rotary embeddings. let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(0)?, }; let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?; let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?; // KV cache. let (key_layer, value_layer) = match &self.kv_cache { None => (key_layer, value_layer), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key_layer], 0)?; let v = Tensor::cat(&[prev_v, &value_layer], 0)?; (k, v) } }; self.kv_cache = Some((key_layer.clone(), value_layer.clone())); // Repeat KV. let ratio = self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition; let key_layer = { let (d0, d1, d2, d3) = key_layer.dims4()?; key_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let value_layer = { let (d0, d1, d2, d3) = value_layer.dims4()?; value_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let context_layer = self.core_attention .forward(&query_layer, &key_layer, &value_layer, attention_mask)?; let output = context_layer.apply(&self.dense)?; Ok(output) } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] struct MLP { dense_h_to_4h: Linear, dense_4h_to_h: Linear, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense_h_to_4h = linear( cfg.hidden_size, cfg.ffn_hidden_size * 2, cfg.add_bias_linear, vb.pp("dense_h_to_4h"), )?; let dense_4h_to_h = linear( cfg.ffn_hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense_4h_to_h"), )?; Ok(Self { dense_4h_to_h, dense_h_to_4h, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense_h_to_4h)? .apply(&candle_nn::Activation::Swiglu)? .apply(&self.dense_4h_to_h) } } #[derive(Debug, Clone)] struct Block { input_layernorm: candle_nn::LayerNorm, self_attention: SelfAttention, post_attention_layernorm: candle_nn::LayerNorm, mlp: MLP, apply_residual_connection_post_layernorm: bool, } impl Block { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let input_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? }; let post_attention_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? }; let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { input_layernorm, self_attention, post_attention_layernorm, mlp, apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm, }) } fn reset_kv_cache(&mut self) { self.self_attention.reset_kv_cache() } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let layernorm_output = xs.apply(&self.input_layernorm)?; let attention_output = self.self_attention .forward(&layernorm_output, attention_mask, rotary_emb)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { xs }; let layernorm_input = (residual + attention_output)?; let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?; let mlp_output = layernorm_output.apply(&self.mlp)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { &layernorm_input }; mlp_output + residual } } #[derive(Debug, Clone)] struct Transformer { layers: Vec<Block>, final_layernorm: Option<candle_nn::LayerNorm>, rotary_emb: RotaryEmbedding, } impl Transformer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_layers); for layer_index in 0..cfg.num_layers { let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?; layers.push(block) } let final_layernorm = if cfg.post_layer_norm { let ln = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? }; Some(ln) } else { None }; let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?; Ok(Self { layers, final_layernorm, rotary_emb, }) } fn reset_kv_cache(&mut self) { for block in self.layers.iter_mut() { block.reset_kv_cache() } } fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for block in self.layers.iter_mut() { xs = block.forward(&xs, attention_mask, &self.rotary_emb)? } match self.final_layernorm.as_ref() { None => Ok(xs), Some(ln) => xs.apply(ln), } } } #[derive(Debug, Clone)] struct Embedding { word_embeddings: candle_nn::Embedding, fp32_residual_connection: bool, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let word_embeddings = candle_nn::embedding( cfg.padded_vocab_size, cfg.hidden_size, vb.pp("word_embeddings"), )?; Ok(Self { word_embeddings, fp32_residual_connection: cfg.fp32_residual_connection, }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h if self.fp32_residual_connection { xs.to_dtype(candle::DType::F32) } else { xs.contiguous() } } } #[derive(Debug, Clone)] pub struct Model { embedding: Embedding, encoder: Transformer, output_layer: Linear, } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embedding"))?; let encoder = Transformer::new(cfg, vb.pp("encoder"))?; let output_layer = linear( cfg.hidden_size, cfg.padded_vocab_size, false, vb.pp("output_layer"), )?; Ok(Self { embedding, encoder, output_layer, }) } pub fn reset_kv_cache(&mut self) { self.encoder.reset_kv_cache() } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let input_embeds = xs.apply(&self.embedding)?; let attention_mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; let xs = self.encoder.forward(&input_embeds, &attention_mask)?; let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?; Ok(lm_logits) } }
candle/candle-transformers/src/models/chatglm.rs/0
{ "file_path": "candle/candle-transformers/src/models/chatglm.rs", "repo_id": "candle", "token_count": 10447 }
59
//! Implementation of the DINOv2 models from Meta Research. //! //! This module implements the DINOv2 vision transformer model from Meta AI Research. //! DINOv2 is a self-supervised learning model that can learn visual features //! without using any labeled data. See: ["DINOv2: Learning Robust Visual Features without Supervision"](https://github.com/facebookresearch/dinov2) //! //! ## Running an example with color map and CUDA //! //! ```bash //! cargo run \ //! --features cuda,depth_anything_v2 \ //! --package candle-examples \ //! --example depth_anything_v2 \ //! -- --color-map \ //! --image candle-examples/examples/yolo-v8/assets/bike.jpg //! ``` //! //! ## Running as an ImageNet classifier //! //! The model returns the probability for the image to belong to each of the 1000 ImageNet categories. //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640> //! </div> //! //! ```bash //! cargo run \ //! --example dinov2 \ //! --release \ //! -- --image candle-examples/examples/yolo-v8/assets/bike.jpg //! //! > mountain bike, all-terrain bike, off-roader: 43.67% //! > bicycle-built-for-two, tandem bicycle, tandem: 33.20% //! > crash helmet : 13.23% //! > unicycle, monocycle : 2.44% //! > maillot : 2.42% //! ``` //! use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 518; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 1000; fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { qkv: Linear, proj: Linear, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, ) -> Result<Self> { let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { qkv, proj, num_heads, scale, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = self .qkv .forward(xs)? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = (qkv.i(0)? * self.scale)?; let k = qkv.i(1)?.contiguous()?; let v = qkv.i(2)?.contiguous()?; let attn = candle_nn::ops::softmax(&q.matmul(&k.t()?)?, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct LayerScale { gamma: Tensor, } impl LayerScale { fn new(vb: VarBuilder, dim: usize) -> Result<Self> { let gamma = vb.get(dim, "gamma")?; Ok(Self { gamma }) } } impl Module for LayerScale { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.broadcast_mul(&self.gamma) } } #[derive(Debug)] struct Mlp { fc1: Linear, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1, fc2 }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?.gelu()?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, ls1: LayerScale, norm2: LayerNorm, mlp: Mlp, ls2: LayerScale, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?; let ls1 = LayerScale::new(vb.pp("ls1"), dim)?; let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?; let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?; let ls2 = LayerScale::new(vb.pp("ls2"), dim)?; Ok(Self { norm1, attn, ls1, norm2, mlp, ls2, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self .ls1 .forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = self .ls2 .forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct DinoVisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl DinoVisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let num_tokens = 1; let pos_embed = vb.get( (1, patch_embed.num_patches + num_tokens, embed_dim), "pos_embed", )?; let head = linear(vb.pp("head"), 2 * embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-5, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding(&self, xs: &Tensor, w: usize, h: usize) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(xs.clone()); } let class_pos_embed = self.pos_embed.i((.., ..1))?; let patch_pos_embed = self.pos_embed.i((.., 1..))?; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); let patch_pos_embed = patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim))?; Tensor::cat(&[&class_pos_embed, &patch_pos_embed], 1) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; let xs = self.patch_embed.forward(xs)?; let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?; &xs + &self.interpolate_pos_encoding(&xs, w, h)? } fn get_intermediate_layers_not_chunked( &self, xs: &Tensor, blocks_to_take: &[usize], ) -> Result<Vec<Tensor>> { let mut xs = self.prepare_tokens_with_mask(xs)?; let mut output = Vec::new(); for (i, blk) in self.blocks.iter().enumerate() { xs = blk.forward(&xs)?; if blocks_to_take.contains(&i) { output.push(xs.clone()); } } if output.len() != blocks_to_take.len() { candle::bail!( "only {} / {} blocks found", output.len(), blocks_to_take.len() ); } Ok(output) } pub fn get_intermediate_layers( &self, xs: &Tensor, blocks_to_take: &[usize], reshape: bool, return_class_token: bool, norm: bool, ) -> Result<Tensor> { let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?; let outputs = if norm { outputs .iter() .map(|out| self.norm.forward(out)) .collect::<Result<Vec<_>>>()? } else { outputs }; let class_tokens = outputs .iter() .map(|out| out.i((.., 0))) .collect::<Result<Vec<_>>>()?; let outputs = outputs .iter() .map(|out| out.i((.., 1..))) .collect::<Result<Vec<_>>>()?; let outputs = if reshape { let (b, _c, w, h) = xs.dims4()?; let patch_size = self.patch_embed.patch_size.0; let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size)); outputs .iter() .map(|out| { out.reshape((b, w / patch_size, h / patch_size, num_channels))? .transpose(2, 3)? .transpose(1, 2) }) .collect::<Result<Vec<_>>>()? } else { outputs }; let outputs = if return_class_token { outputs .iter() .zip(class_tokens.iter()) .map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1)) .collect::<Result<Vec<_>>>()? } else { outputs }; Tensor::stack(&outputs[..], 0) } } impl Module for DinoVisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs = self.norm.forward(&xs)?; let xs_norm_clstoken = xs.i((.., 0))?; let xs_norm_patchtokens = xs.i((.., 1..))?.mean(1)?; let xs = Tensor::cat(&[xs_norm_clstoken, xs_norm_patchtokens], D::Minus1)?; self.head.forward(&xs) } } pub fn vit_small(vb: VarBuilder) -> Result<DinoVisionTransformer> { DinoVisionTransformer::new(vb, 12, 384, 6) }
candle/candle-transformers/src/models/dinov2.rs/0
{ "file_path": "candle/candle-transformers/src/models/dinov2.rs", "repo_id": "candle", "token_count": 6312 }
60
//! Gemma LLM architecture (Google) inference implementation. //! //! See ["Introducing Gemma 3: The most capable model you can run on a single GPU or TPU"](https://blog.google/technology/developers/gemma-3/) //! //! Based on implementations from HuggingFace transformers. use std::sync::Arc; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{linear_b as linear, Activation, Linear, VarBuilder}; #[derive(serde::Deserialize, Debug, Clone)] pub struct Config { pub attention_bias: bool, pub head_dim: usize, pub hidden_activation: Activation, pub hidden_size: usize, pub intermediate_size: usize, pub num_attention_heads: usize, pub num_hidden_layers: usize, pub num_key_value_heads: usize, pub rms_norm_eps: f64, pub rope_theta: f64, pub rope_local_base_freq: f64, pub vocab_size: usize, pub final_logit_softcapping: Option<f64>, pub attn_logit_softcapping: Option<f64>, pub query_pre_attn_scalar: usize, pub sliding_window: usize, pub sliding_window_pattern: usize, pub max_position_embeddings: usize, } #[derive(Debug, Clone)] struct RmsNorm { weight: Tensor, eps: f64, } impl RmsNorm { fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(dim, "weight")?; Ok(Self { weight, eps }) } } impl Module for RmsNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?; x_normed .to_dtype(x_dtype)? .broadcast_mul(&(&self.weight + 1.0)?) } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new( dtype: DType, cfg: &Config, dev: &Device, sliding_window: Option<usize>, ) -> Result<Self> { let dim = cfg.head_dim; let max_seq_len = cfg.max_position_embeddings; let rope_freq = if sliding_window.is_some() { cfg.rope_local_base_freq } else { cfg.rope_theta }; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / rope_freq.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?; let up_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?; let down_proj = linear(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_activation, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] enum KvCache { Normal(candle_nn::kv_cache::KvCache), Rotating(candle_nn::kv_cache::RotatingKvCache), } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, q_norm: RmsNorm, k_norm: RmsNorm, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, attn_logit_softcapping: Option<f64>, rotary_emb: Arc<RotaryEmbedding>, kv_cache: KvCache, use_flash_attn: bool, } impl Attention { fn new( rotary_emb: Arc<RotaryEmbedding>, use_flash_attn: bool, cfg: &Config, sliding_window: Option<usize>, vb: VarBuilder, ) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = cfg.head_dim; let bias = cfg.attention_bias; let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?; let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?; let q_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("q_norm"))?; let k_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("k_norm"))?; let kv_cache = if let Some(sliding_window) = sliding_window { KvCache::Rotating(candle_nn::kv_cache::RotatingKvCache::new(2, sliding_window)) } else { KvCache::Normal(candle_nn::kv_cache::KvCache::new( 2, cfg.max_position_embeddings, )) }; Ok(Self { q_proj, k_proj, v_proj, o_proj, q_norm, k_norm, num_heads, num_kv_heads, num_kv_groups, head_dim, attn_logit_softcapping: cfg.attn_logit_softcapping, rotary_emb, kv_cache, use_flash_attn, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let query_states = self.q_norm.forward(&query_states)?; let key_states = self.k_norm.forward(&key_states)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &mut self.kv_cache { KvCache::Normal(cache) => cache.append(&key_states, &value_states)?, KvCache::Rotating(cache) => cache.append(&key_states, &value_states)?, }; let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = query_states.transpose(1, 2)?; let k = key_states.transpose(1, 2)?; let v = value_states.transpose(1, 2)?; let scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, scale, attention_mask.is_some())?.transpose(1, 2)? } else { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match self.attn_logit_softcapping { None => attn_weights, Some(sc) => ((attn_weights / sc)?.tanh()? * sc)?, }; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, ()))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { match &mut self.kv_cache { KvCache::Normal(c) => c.reset(), KvCache::Rotating(c) => c.reset(), } } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, pre_feedforward_layernorm: RmsNorm, post_feedforward_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, sliding_window: Option<usize>, } impl DecoderLayer { fn new( use_flash_attn: bool, cfg: &Config, vb: VarBuilder, sliding_window: Option<usize>, ) -> Result<Self> { let rotary_emb = Arc::new(RotaryEmbedding::new( vb.dtype(), cfg, vb.device(), sliding_window, )?); let self_attn = Attention::new( rotary_emb, use_flash_attn, cfg, sliding_window, vb.pp("self_attn"), )?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let pre_feedforward_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("pre_feedforward_layernorm"), )?; let post_feedforward_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_feedforward_layernorm"), )?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, pre_feedforward_layernorm, post_feedforward_layernorm, post_attention_layernorm, sliding_window, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = xs.apply(&self.post_attention_layernorm)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.pre_feedforward_layernorm)?; let xs = xs.apply(&self.mlp)?; let xs = xs.apply(&self.post_feedforward_layernorm)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } fn prepare_decoder_attention_mask( b_size: usize, tgt_len: usize, seqlen_offset: usize, sliding_window: Option<usize>, dtype: DType, device: &Device, ) -> Result<Tensor> { let mask: Vec<_> = if let Some(sliding_window) = sliding_window { (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect() } else { (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0f32 })) .collect() }; let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(dtype) } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, final_logit_softcapping: Option<f64>, device: Device, dtype: DType, hidden_size: usize, sliding_window: usize, } impl Model { pub fn new(use_flash_attn: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let sliding_window = (layer_idx + 1) % cfg.sliding_window_pattern > 0; let layer = DecoderLayer::new( use_flash_attn, cfg, vb_l.pp(layer_idx), sliding_window.then_some(cfg.sliding_window), )?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = Linear::new(embed_tokens.embeddings().clone(), None); Ok(Self { embed_tokens, layers, norm, lm_head, final_logit_softcapping: cfg.final_logit_softcapping, device: vb.device().clone(), dtype: vb.dtype(), hidden_size: cfg.hidden_size, sliding_window: cfg.sliding_window, }) } fn create_attention_masks( &self, batch_size: usize, seq_len: usize, seqlen_offset: usize, ) -> Result<(Option<Tensor>, Option<Tensor>)> { if seq_len <= 1 { return Ok((None, None)); } let mask = prepare_decoder_attention_mask( batch_size, seq_len, seqlen_offset, None, self.dtype, &self.device, )?; let sliding_mask = prepare_decoder_attention_mask( batch_size, seq_len, seqlen_offset, Some(self.sliding_window), self.dtype, &self.device, )?; Ok((Some(mask), Some(sliding_mask))) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let xs = self.embed_tokens.forward(input_ids)?; let mut xs = (xs * (self.hidden_size as f64).sqrt())?; let (attention_mask, sliding_attention_mask) = self.create_attention_masks(b_size, seq_len, seqlen_offset)?; for layer in self.layers.iter_mut() { let mask = if layer.sliding_window.is_some() { &sliding_attention_mask } else { &attention_mask }; xs = layer.forward(&xs, mask.as_ref(), seqlen_offset)? } let logits = xs .narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head)?; let logits = match self.final_logit_softcapping { None => logits, Some(sc) => ((logits / sc)?.tanh()? * sc)?, }; Ok(logits) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/gemma3.rs/0
{ "file_path": "candle/candle-transformers/src/models/gemma3.rs", "repo_id": "candle", "token_count": 9001 }
61
// Copyright (c) Kyutai, all rights reserved. // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. use candle::{Module, Result, StreamTensor, StreamingModule, Tensor, D}; use candle_nn::{Conv1d, VarBuilder}; #[allow(clippy::enum_variant_names)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Norm { WeightNorm, SpectralNorm, TimeGroupNorm, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum PadMode { Constant, Reflect, Replicate, } // Applies weight norm for inference by recomputing the weight tensor. This // does not apply to training. // https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html fn conv1d_weight_norm( in_c: usize, out_c: usize, kernel_size: usize, bias: bool, config: candle_nn::Conv1dConfig, vb: VarBuilder, ) -> Result<Conv1d> { let weight = if vb.contains_tensor("weight") { vb.get((out_c, in_c, kernel_size), "weight")? } else { let weight_g = vb.get((out_c, 1, 1), "weight_g")?; let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?; let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?; weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)? }; let bias = if bias { Some(vb.get(out_c, "bias")?) } else { None }; Ok(Conv1d::new(weight, bias, config)) } #[derive(Debug, Clone)] pub struct NormConv1d { conv: Conv1d, norm: Option<candle_nn::GroupNorm>, span: tracing::Span, } impl NormConv1d { #[allow(clippy::too_many_arguments)] pub fn new( in_c: usize, out_c: usize, k_size: usize, causal: bool, norm: Option<Norm>, bias: bool, cfg: candle_nn::Conv1dConfig, vb: VarBuilder, ) -> Result<Self> { let conv = match norm { None | Some(Norm::TimeGroupNorm) => { if bias { candle_nn::conv1d(in_c, out_c, k_size, cfg, vb.pp("conv"))? } else { candle_nn::conv1d_no_bias(in_c, out_c, k_size, cfg, vb.pp("conv"))? } } Some(Norm::WeightNorm) => { conv1d_weight_norm(in_c, out_c, k_size, bias, cfg, vb.pp("conv"))? } Some(Norm::SpectralNorm) => candle::bail!("SpectralNorm is not supported yet."), }; let norm = match norm { None | Some(Norm::WeightNorm) | Some(Norm::SpectralNorm) => None, Some(Norm::TimeGroupNorm) => { if causal { candle::bail!("GroupNorm doesn't support causal evaluation.") } let norm = candle_nn::group_norm(1, out_c, 1e-5, vb.pp("norm"))?; Some(norm) } }; Ok(Self { conv, norm, span: tracing::span!(tracing::Level::TRACE, "norm-conv1d"), }) } } impl Module for NormConv1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = xs.apply(&self.conv)?; match self.norm.as_ref() { None => Ok(xs), Some(norm) => xs.apply(norm), } } } #[derive(Debug, Clone)] pub struct NormConvTranspose1d { ws: Tensor, bs: Option<Tensor>, k_size: usize, stride: usize, groups: usize, norm: Option<candle_nn::GroupNorm>, span: tracing::Span, } impl NormConvTranspose1d { #[allow(clippy::too_many_arguments)] pub fn new( in_c: usize, out_c: usize, k_size: usize, causal: bool, norm: Option<Norm>, bias: bool, stride: usize, groups: usize, vb: VarBuilder, ) -> Result<Self> { let vb = vb.pp("conv"); let bs = if bias { Some(vb.get(out_c, "bias")?) } else { None }; let ws = match norm { None | Some(Norm::TimeGroupNorm) => vb.get((in_c, out_c / groups, k_size), "weight")?, Some(Norm::WeightNorm) => { if vb.contains_tensor("weight") { vb.get((in_c, out_c, k_size), "weight")? } else { let weight_g = vb.get((in_c, 1, 1), "weight_g")?; let weight_v = vb.get((in_c, out_c, k_size), "weight_v")?; let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?; weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)? } } Some(Norm::SpectralNorm) => candle::bail!("SpectralNorm is not supported yet."), }; let (ws, groups) = if groups == out_c && in_c == out_c { let eye = Tensor::eye(out_c, ws.dtype(), ws.device())?; let ws = ws .repeat((1, out_c, 1))? .mul(&eye.unsqueeze(2)?.repeat((1, 1, k_size))?)?; (ws, 1) } else { (ws, groups) }; let norm = match norm { None | Some(Norm::WeightNorm) | Some(Norm::SpectralNorm) => None, Some(Norm::TimeGroupNorm) => { if causal { candle::bail!("GroupNorm doesn't support causal evaluation.") } let norm = candle_nn::group_norm(1, out_c, 1e-5, vb.pp("norm"))?; Some(norm) } }; Ok(Self { ws, bs, k_size, stride, groups, norm, span: tracing::span!(tracing::Level::TRACE, "norm-conv-tr1d"), }) } } impl Module for NormConvTranspose1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); // conv-transpose1d seems to be broken on metal after enough iterations. Causing // the following error: // _status < MTLCommandBufferStatusCommitted > // -[IOGPUMetalCommandBuffer setCurrentCommandEncoder:] // This is now fixed in candle. let xs = Tensor::conv_transpose1d(xs, &self.ws, 0, 0, self.stride, 1, self.groups)?; let xs = match &self.bs { None => xs, Some(bias) => { let b = bias.dims1()?; let bias = bias.reshape((1, b, 1))?; xs.broadcast_add(&bias)? } }; match self.norm.as_ref() { None => Ok(xs), Some(norm) => xs.apply(norm), } } } fn get_extra_padding_for_conv1d( xs: &Tensor, k_size: usize, stride: usize, padding_total: usize, ) -> Result<usize> { let len = xs.dim(D::Minus1)?; let n_frames = (len + padding_total).saturating_sub(k_size) as f64 / stride as f64 + 1.0; let ideal_len = ((n_frames.ceil() as usize - 1) * stride + k_size).saturating_sub(padding_total); Ok(ideal_len.saturating_sub(len)) } fn pad1d(xs: &Tensor, pad_l: usize, pad_r: usize, mode: PadMode) -> Result<Tensor> { match mode { PadMode::Constant => xs.pad_with_zeros(D::Minus1, pad_l, pad_r), PadMode::Reflect => candle::bail!("pad-mode 'reflect' is not supported"), PadMode::Replicate => xs.pad_with_same(D::Minus1, pad_l, pad_r), } } fn unpad1d(xs: &Tensor, unpad_l: usize, unpad_r: usize) -> Result<Tensor> { let len = xs.dim(D::Minus1)?; if len < unpad_l + unpad_r { candle::bail!("unpad1d: tensor len {len} is too low, {unpad_l} + {unpad_r}") } xs.narrow(D::Minus1, unpad_l, len - (unpad_l + unpad_r)) } #[derive(Debug, Clone)] pub struct StreamableConv1d { conv: NormConv1d, causal: bool, pad_mode: PadMode, state_prev_xs: StreamTensor, left_pad_applied: bool, kernel_size: usize, span: tracing::Span, } impl StreamableConv1d { #[allow(clippy::too_many_arguments)] pub fn new( in_c: usize, out_c: usize, k_size: usize, stride: usize, dilation: usize, groups: usize, bias: bool, causal: bool, norm: Option<Norm>, pad_mode: PadMode, vb: VarBuilder, ) -> Result<Self> { let cfg = candle_nn::Conv1dConfig { padding: 0, stride, dilation, groups, cudnn_fwd_algo: None, }; let conv = NormConv1d::new(in_c, out_c, k_size, causal, norm, bias, cfg, vb)?; if k_size < stride { candle::bail!("kernel-size {k_size} is smaller than stride {stride}") } Ok(Self { conv, causal, pad_mode, state_prev_xs: StreamTensor::empty(), left_pad_applied: false, kernel_size: k_size, span: tracing::span!(tracing::Level::TRACE, "streamable-conv1d"), }) } } impl Module for StreamableConv1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_b, _t, _c) = xs.dims3()?; let k_size = self.conv.conv.weight().dim(D::Minus1)?; let conv_cfg = self.conv.conv.config(); // Effective kernel size with dilations. let k_size = (k_size - 1) * conv_cfg.dilation + 1; let padding_total = k_size - conv_cfg.stride; let extra_padding = get_extra_padding_for_conv1d(xs, k_size, conv_cfg.stride, padding_total)?; let xs = if self.causal { pad1d(xs, padding_total, extra_padding, self.pad_mode)? } else { let padding_right = padding_total / 2; let padding_left = padding_total - padding_right; pad1d( xs, padding_left, padding_right + extra_padding, self.pad_mode, )? }; xs.apply(&self.conv) } } impl StreamingModule for StreamableConv1d { fn reset_state(&mut self) { self.state_prev_xs.reset(); self.left_pad_applied = false; } fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { let _enter = self.span.enter(); let xs = match xs.as_option() { None => return Ok(().into()), Some(xs) => xs.clone(), }; let xs = if self.left_pad_applied { xs } else { self.left_pad_applied = true; let k_size = self.conv.conv.weight().dim(D::Minus1)?; let conv_cfg = self.conv.conv.config(); let k_size = (k_size - 1) * conv_cfg.dilation + 1; let padding_total = k_size - conv_cfg.stride; pad1d(&xs, padding_total, 0, self.pad_mode)? }; let cfg = self.conv.conv.config(); let stride = cfg.stride; let dilation = cfg.dilation; let kernel = (self.kernel_size - 1) * dilation + 1; let xs = StreamTensor::cat2(&self.state_prev_xs, &xs.into(), D::Minus1)?; let seq_len = xs.seq_len(D::Minus1)?; let num_frames = (seq_len + stride).saturating_sub(kernel) / stride; if num_frames > 0 { let offset = num_frames * stride; self.state_prev_xs = xs.narrow(D::Minus1, offset, seq_len - offset)?; let in_l = (num_frames - 1) * stride + kernel; let xs = xs.narrow(D::Minus1, 0, in_l)?; // We apply the underlying convtr directly rather than through forward so as // not to apply any padding here. xs.apply(&self.conv.conv) } else { self.state_prev_xs = xs; Ok(StreamTensor::empty()) } } } #[derive(Debug, Clone)] pub struct StreamableConvTranspose1d { convtr: NormConvTranspose1d, causal: bool, state_prev_ys: StreamTensor, kernel_size: usize, span: tracing::Span, } impl StreamableConvTranspose1d { #[allow(clippy::too_many_arguments)] pub fn new( in_c: usize, out_c: usize, k_size: usize, stride: usize, groups: usize, bias: bool, causal: bool, norm: Option<Norm>, vb: VarBuilder, ) -> Result<Self> { let convtr = NormConvTranspose1d::new(in_c, out_c, k_size, causal, norm, bias, stride, groups, vb)?; Ok(Self { convtr, causal, kernel_size: k_size, state_prev_ys: StreamTensor::empty(), span: tracing::span!(tracing::Level::TRACE, "streamable-conv-tr1d"), }) } } impl Module for StreamableConvTranspose1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let k_size = self.convtr.k_size; let stride = self.convtr.stride; let padding_total = k_size.saturating_sub(stride); let xs = xs.apply(&self.convtr)?; if self.causal { // This corresponds to trim_right_ratio = 1. unpad1d(&xs, 0, padding_total) } else { let padding_right = padding_total / 2; let padding_left = padding_total - padding_right; unpad1d(&xs, padding_left, padding_right) } } } impl StreamingModule for StreamableConvTranspose1d { fn reset_state(&mut self) { self.state_prev_ys.reset() } fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { let _enter = self.span.enter(); let xs = match xs.as_option() { Some(xs) => xs, None => return Ok(StreamTensor::empty()), }; let stride = self.convtr.stride; // We apply the underlying convtr directly rather than through forward so as // not to apply any padding here. let ys = self.convtr.forward(xs)?; let ot = ys.dim(D::Minus1)?; let ys = match self.state_prev_ys.as_option() { None => ys, Some(prev_ys) => { let pt = prev_ys.dim(D::Minus1)?; // Remove the bias as it will be applied multiple times. let prev_ys = match &self.convtr.bs { None => prev_ys.clone(), Some(bias) => { let bias = bias.reshape((1, (), 1))?; prev_ys.broadcast_sub(&bias)? } }; let ys1 = (ys.narrow(D::Minus1, 0, pt)? + prev_ys)?; let ys2 = ys.narrow(D::Minus1, pt, ot - pt)?; Tensor::cat(&[ys1, ys2], D::Minus1)? } }; let invalid_steps = self.kernel_size - stride; let (ys, prev_ys) = StreamTensor::from(ys).split(D::Minus1, ot - invalid_steps)?; self.state_prev_ys = prev_ys; Ok(ys) } } #[derive(Debug, Clone)] pub struct ConvDownsample1d { conv: StreamableConv1d, } impl ConvDownsample1d { pub fn new( stride: usize, dim: usize, causal: bool, learnt: bool, vb: VarBuilder, ) -> Result<Self> { if !learnt { candle::bail!("only learnt=true is supported") } let conv = StreamableConv1d::new( /* in_c */ dim, /* out_c */ dim, /* k_size_c */ 2 * stride, /* stride */ stride, /* dilation */ 1, /* groups */ 1, // channel_wise = false /* bias */ false, /* causal */ causal, /* norm */ None, /* pad_mode */ PadMode::Replicate, vb, )?; Ok(Self { conv }) } } impl Module for ConvDownsample1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.conv) } } impl StreamingModule for ConvDownsample1d { fn reset_state(&mut self) { self.conv.reset_state() } fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { self.conv.step(xs) } } #[derive(Debug, Clone)] pub struct ConvTrUpsample1d { convtr: StreamableConvTranspose1d, } impl ConvTrUpsample1d { pub fn new( stride: usize, dim: usize, causal: bool, learnt: bool, vb: VarBuilder, ) -> Result<Self> { if !learnt { candle::bail!("only learnt=true is supported") } let convtr = StreamableConvTranspose1d::new( dim, dim, /* k_size */ 2 * stride, /* stride */ stride, /* groups */ dim, /* bias */ false, /* causal */ causal, /* norm */ None, vb, )?; Ok(Self { convtr }) } } impl Module for ConvTrUpsample1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.convtr) } } impl StreamingModule for ConvTrUpsample1d { fn reset_state(&mut self) { self.convtr.reset_state() } fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { self.convtr.step(xs) } } #[cfg(test)] mod tests { use super::*; use candle::IndexOp; fn run_conv1d( k_size: usize, stride: usize, dilation: usize, step_size: usize, len: usize, bias: bool, ) -> Result<()> { // TODO: We should ensure for the seed to be constant when running these tests. let dev = &candle::Device::Cpu; let vm = candle_nn::VarMap::new(); let vb = VarBuilder::from_varmap(&vm, candle::DType::F32, dev); let conv1d = StreamableConv1d::new( /* in_c */ 2, /* out_c */ 3, /* k_size */ k_size, /* stride */ stride, /* dilation */ dilation, /* groups */ 1, /* bias */ bias, /* causal */ true, /* norm */ None, /* pad_mode */ PadMode::Constant, vb, )?; let xs = Tensor::randn(0f32, 1., (1, 2, step_size * len), dev)?; let ys = conv1d.forward(&xs)?; let mut conv1d = conv1d; let mut ys_steps = vec![]; for idx in 0..len { let xs = xs.i((.., .., step_size * idx..step_size * (idx + 1)))?; let ys = conv1d.step(&xs.into())?; if let Some(ys) = ys.as_option() { ys_steps.push(ys.clone()) } } let ys_steps = Tensor::cat(&ys_steps, D::Minus1)?; let diff = (&ys - &ys_steps)? .abs()? .flatten_all()? .max(0)? .to_vec0::<f32>()?; if diff > 1e-5 { println!("{xs}"); println!("{ys}"); println!("{ys_steps}"); candle::bail!("larger diff than expected {diff}") } Ok(()) } fn run_conv_tr1d( k_size: usize, stride: usize, step_size: usize, len: usize, bias: bool, ) -> Result<()> { // TODO: We should ensure for the seed to be constant when running these tests. let dev = &candle::Device::Cpu; let vm = candle_nn::VarMap::new(); let vb = VarBuilder::from_varmap(&vm, candle::DType::F32, dev); let conv1d = StreamableConvTranspose1d::new( /* in_c */ 2, /* out_c */ 3, /* k_size */ k_size, /* stride */ stride, /* groups */ 1, /* bias */ bias, /* causal */ true, /* norm */ None, vb, )?; let xs = Tensor::randn(0f32, 1., (1, 2, step_size * len), dev)?; let ys = conv1d.forward(&xs)?; let mut conv1d = conv1d; let mut ys_steps = vec![]; for idx in 0..len { let xs = xs.i((.., .., step_size * idx..step_size * (idx + 1)))?; let ys = conv1d.step(&xs.into())?; if let Some(ys) = ys.as_option() { ys_steps.push(ys.clone()) } } let ys_steps = Tensor::cat(&ys_steps, D::Minus1)?; let diff = (&ys - &ys_steps)? .abs()? .flatten_all()? .max(0)? .to_vec0::<f32>()?; if diff > 1e-5 { println!("{xs}"); println!("{ys}"); println!("{ys_steps}"); candle::bail!("larger diff than expected {diff}") } Ok(()) } #[test] fn conv1d() -> Result<()> { for step_size in [1, 2, 3] { for bias in [false, true] { run_conv1d(1, 1, 1, step_size, 5, bias)?; run_conv1d(2, 1, 1, step_size, 5, bias)?; run_conv1d(2, 2, 1, step_size, 6, bias)?; run_conv1d(3, 2, 1, step_size, 8, bias)?; run_conv1d(3, 2, 2, step_size, 8, bias)?; } } Ok(()) } #[test] fn conv_tr1d() -> Result<()> { for step_size in [1, 2, 3] { for bias in [false, true] { run_conv_tr1d(1, 1, step_size, 5, bias)?; run_conv_tr1d(2, 1, step_size, 5, bias)?; run_conv_tr1d(3, 1, step_size, 5, bias)?; run_conv_tr1d(3, 2, step_size, 5, bias)?; } } Ok(()) } }
candle/candle-transformers/src/models/mimi/conv.rs/0
{ "file_path": "candle/candle-transformers/src/models/mimi/conv.rs", "repo_id": "candle", "token_count": 11137 }
62
//! # MobileOne //! //! MobileOne inference implementation based on timm and candle-repvgg //! //! See ["MobileOne: An Improved One millisecond Mobile Backbone"](https://arxiv.org/abs/2206.04040) use candle::{DType, Result, Tensor, D}; use candle_nn::{ batch_norm, conv2d, conv2d_no_bias, linear, ops::sigmoid, BatchNorm, Conv2d, Conv2dConfig, Func, VarBuilder, }; struct StageConfig { blocks: usize, channels: usize, } // The architecture in the paper has 6 stages. The timm implementation uses an equivalent form // by concatenating the 5th stage (starts with stride 1) to the previous one. const STAGES: [StageConfig; 5] = [ StageConfig { blocks: 1, channels: 64, }, StageConfig { blocks: 2, channels: 64, }, StageConfig { blocks: 8, channels: 128, }, StageConfig { blocks: 10, channels: 256, }, StageConfig { blocks: 1, channels: 512, }, ]; #[derive(Clone)] pub struct Config { /// overparameterization factor k: usize, /// per-stage channel number multipliers alphas: [f32; 5], } impl Config { pub fn s0() -> Self { Self { k: 4, alphas: [0.75, 0.75, 1.0, 1.0, 2.0], } } pub fn s1() -> Self { Self { k: 1, alphas: [1.5, 1.5, 1.5, 2.0, 2.5], } } pub fn s2() -> Self { Self { k: 1, alphas: [1.5, 1.5, 2.0, 2.5, 4.0], } } pub fn s3() -> Self { Self { k: 1, alphas: [2.0, 2.0, 2.5, 3.0, 4.0], } } pub fn s4() -> Self { Self { k: 1, alphas: [3.0, 3.0, 3.5, 3.5, 4.0], } } } // SE blocks are used in the last stages of the s4 variant. fn squeeze_and_excitation( in_channels: usize, squeeze_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { ..Default::default() }; let fc1 = conv2d(in_channels, squeeze_channels, 1, conv2d_cfg, vb.pp("fc1"))?; let fc2 = conv2d(squeeze_channels, in_channels, 1, conv2d_cfg, vb.pp("fc2"))?; Ok(Func::new(move |xs| { let residual = xs; let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?; let xs = sigmoid(&xs.apply(&fc1)?.relu()?.apply(&fc2)?)?; residual.broadcast_mul(&xs) })) } // fuses a convolutional kernel and a batchnorm layer into a convolutional layer // based on the _fuse_bn_tensor method in timm // see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602 fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> { let (gamma, beta) = bn.weight_and_bias().unwrap(); let mu = bn.running_mean(); let sigma = (bn.running_var() + bn.eps())?.sqrt(); let gps = (gamma / sigma)?; let bias = (beta - mu * &gps)?; let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?; Ok((weights, bias)) } // A mobileone block has a different training time and inference time architecture. // The latter is a simple and efficient equivalent transformation of the former // realized by a structural reparameterization technique, where convolutions // along with identity branches and batchnorm layers are fused into a single convolution. #[allow(clippy::too_many_arguments)] fn mobileone_block( has_identity: bool, k: usize, dim: usize, stride: usize, padding: usize, groups: usize, kernel: usize, in_channels: usize, out_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride, padding, groups, ..Default::default() }; let mut w = Tensor::zeros( (out_channels, in_channels / groups, kernel, kernel), DType::F32, vb.device(), )?; let mut b = Tensor::zeros(dim, DType::F32, vb.device())?; // k is the training-time overparameterization factor, larger than 1 only in the s0 variant for i in 0..k { let conv_kxk_bn = batch_norm(dim, 1e-5, vb.pp(format!("conv_kxk.{i}.bn")))?; let conv_kxk = conv2d_no_bias( in_channels, out_channels, kernel, conv2d_cfg, vb.pp(format!("conv_kxk.{i}.conv")), )?; let (wk, bk) = fuse_conv_bn(conv_kxk.weight(), conv_kxk_bn)?; w = (w + wk)?; b = (b + bk)?; } if kernel > 1 { let conv_scale_bn = batch_norm(dim, 1e-5, vb.pp("conv_scale.bn"))?; let conv_scale = conv2d_no_bias( in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv_scale.conv"), )?; let (mut ws, bs) = fuse_conv_bn(conv_scale.weight(), conv_scale_bn)?; // resize to 3x3 ws = ws.pad_with_zeros(D::Minus1, 1, 1)?; ws = ws.pad_with_zeros(D::Minus2, 1, 1)?; w = (w + ws)?; b = (b + bs)?; } // Use SE blocks if present (last layers of the s4 variant) let se = squeeze_and_excitation(out_channels, out_channels / 16, vb.pp("attn")); // read and reparameterize the identity bn into wi and bi if has_identity { let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"))?; let mut weights: Vec<f32> = vec![0.0; w.elem_count()]; let id = in_channels / groups; // See https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L809 for i in 0..in_channels { if kernel > 1 { weights[i * kernel * kernel + 4] = 1.0; } else { weights[i * (id + 1)] = 1.0; } } let weights = &Tensor::from_vec(weights, w.shape(), w.device())?; let (wi, bi) = fuse_conv_bn(weights, identity_bn)?; w = (w + wi)?; b = (b + bi)?; } let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg); Ok(Func::new(move |xs| { let mut xs = xs.apply(&reparam_conv)?; if let Ok(f) = &se { xs = xs.apply(f)?; } xs = xs.relu()?; Ok(xs) })) } // Get the number of output channels per stage taking into account the multipliers fn output_channels_per_stage(cfg: &Config, stage: usize) -> usize { let channels = STAGES[stage].channels as f32; let alpha = cfg.alphas[stage]; match stage { 0 => std::cmp::min(64, (channels * alpha) as usize), _ => (channels * alpha) as usize, } } // Each stage is made of blocks. The first layer always downsamples with stride 2. // All but the first block have a residual connection. fn mobileone_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> { let nblocks = STAGES[idx].blocks; let mut blocks = Vec::with_capacity(nblocks); let mut in_channels = output_channels_per_stage(cfg, idx - 1); for block_idx in 0..nblocks { let out_channels = output_channels_per_stage(cfg, idx); let (has_identity, stride) = if block_idx == 0 { (false, 2) } else { (true, 1) }; // depthwise convolution layer blocks.push(mobileone_block( has_identity, cfg.k, in_channels, stride, 1, in_channels, 3, in_channels, in_channels, vb.pp(block_idx * 2), )?); // pointwise convolution layer blocks.push(mobileone_block( has_identity, cfg.k, out_channels, 1, // stride 0, // padding 1, // groups 1, // kernel in_channels, out_channels, vb.pp(block_idx * 2 + 1), )?); in_channels = out_channels; } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for block in blocks.iter() { xs = xs.apply(block)? } Ok(xs) })) } // Build a mobileone model for a given configuration. fn mobileone_model( config: &Config, nclasses: Option<usize>, vb: VarBuilder, ) -> Result<Func<'static>> { let cls = match nclasses { None => None, Some(nclasses) => { let outputs = output_channels_per_stage(config, 4); let linear = linear(outputs, nclasses, vb.pp("head.fc"))?; Some(linear) } }; let stem_dim = output_channels_per_stage(config, 0); let stem = mobileone_block(false, 1, stem_dim, 2, 1, 1, 3, 3, stem_dim, vb.pp("stem"))?; let vb = vb.pp("stages"); let stage1 = mobileone_stage(config, 1, vb.pp(0))?; let stage2 = mobileone_stage(config, 2, vb.pp(1))?; let stage3 = mobileone_stage(config, 3, vb.pp(2))?; let stage4 = mobileone_stage(config, 4, vb.pp(3))?; Ok(Func::new(move |xs| { let xs = xs .apply(&stem)? .apply(&stage1)? .apply(&stage2)? .apply(&stage3)? .apply(&stage4)? .mean(D::Minus2)? .mean(D::Minus1)?; match &cls { None => Ok(xs), Some(cls) => xs.apply(cls), } })) } pub fn mobileone(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { mobileone_model(cfg, Some(nclasses), vb) } pub fn mobileone_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { mobileone_model(cfg, None, vb) }
candle/candle-transformers/src/models/mobileone.rs/0
{ "file_path": "candle/candle-transformers/src/models/mobileone.rs", "repo_id": "candle", "token_count": 4729 }
63
//! Microsoft Phi-3 model implementation //! //! See Phi model details at: //! - [Phi-3 Model](https://huggingface.co/microsoft/phi-3) //! //! The Phi series are decoder-only transformers designed for code and language tasks. //! Key characteristics: //! - Decoder-only transformer architecture //! - RoPE embeddings //! - Layer normalization //! - QK normalization //! - Mixed activation functions //! - Improved context window handling //! //! References: //! - [Hugging Face Implementation](https://huggingface.co/microsoft/phi-3) //! - [Alternative Implementation](https://huggingface.co/microsoft/phi-3/tree/main) //! // This implementation is based on: // https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/modeling_phi3.py use crate::models::with_tracing::{linear_no_bias as linear, Linear, RmsNorm}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; use std::sync::Arc; #[derive(Debug, Clone, serde::Deserialize)] pub enum RopeScalingType { #[serde(rename = "longrope")] LongRope, } #[derive(Debug, Clone, serde::Deserialize)] pub struct RopeScaling { pub short_factor: Vec<f32>, pub long_factor: Vec<f32>, #[serde(rename = "type")] pub type_: RopeScalingType, } // https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/config.json #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_act: candle_nn::Activation, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub rms_norm_eps: f64, pub rope_theta: f64, pub bos_token_id: Option<u32>, pub eos_token_id: Option<u32>, pub rope_scaling: Option<RopeScaling>, pub max_position_embeddings: usize, pub original_max_position_embeddings: Option<usize>, pub partial_rotary_factor: Option<f64>, #[serde(default)] pub tie_word_embeddings: bool, } impl Config { pub fn head_dim(&self) -> usize { self.hidden_size / self.num_attention_heads } } #[derive(Debug, Clone)] pub struct RotaryEmbedding { partial_dim: Option<usize>, sin: Tensor, cos: Tensor, } impl RotaryEmbedding { pub fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let partial_dim = cfg .partial_rotary_factor .as_ref() .map(|v| (v * cfg.head_dim() as f64) as usize); let dim = partial_dim.unwrap_or(cfg.head_dim()); let freqs = match cfg.rope_scaling.as_ref() { None => { let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq = Tensor::from_vec(inv_freq, (1, ()), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; t.matmul(&inv_freq)? } Some(rope_scaling) => { let inv_freq_s: Vec<_> = (0..dim) .step_by(2) .zip(rope_scaling.short_factor.iter()) .map(|(i, &f)| f / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_s = Tensor::from_vec(inv_freq_s, (1, ()), dev)?.to_dtype(dtype)?; let max_seq_len = cfg.max_position_embeddings; match cfg.original_max_position_embeddings { None => { let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; t.matmul(&inv_freq_s)? } Some(original_max_seq_len) => { let t_s = Tensor::arange(0u32, original_max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((original_max_seq_len, 1))?; let freq_s = t_s.matmul(&inv_freq_s)?; let inv_freq_l: Vec<_> = (0..dim) .step_by(2) .zip(rope_scaling.long_factor.iter()) .map(|(i, &f)| f / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_l = Tensor::from_vec(inv_freq_l, (1, ()), dev)?.to_dtype(dtype)?; let t_l = Tensor::arange(original_max_seq_len as u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape(((), 1))?; let freq_l = t_l.matmul(&inv_freq_l)?; Tensor::cat(&[&freq_s, &freq_l], 0)? } } } }; Ok(Self { partial_dim, sin: freqs.sin()?, cos: freqs.cos()?, }) } fn rope(&self, xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let x = match self.partial_dim { None => candle_nn::rotary_emb::rope(&xs.contiguous()?, cos, sin)?, Some(dim) => { let xs_rot = xs.i((.., .., .., ..dim))?.contiguous()?; let xs_pass = xs.i((.., .., .., dim..))?; let xs_rot = candle_nn::rotary_emb::rope(&xs_rot, cos, sin)?; Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1)?.contiguous()? } }; Ok(x) } pub fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = self.rope(&q.contiguous()?, &cos, &sin)?; let k_embed = self.rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] struct Attention { qkv_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let head_dim = cfg.head_dim(); let op_size = num_heads * head_dim + 2 * num_kv_heads * head_dim; let qkv_proj = linear(cfg.hidden_size, op_size, vb.pp("qkv_proj"))?; let o_proj = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("o_proj"))?; Ok(Self { qkv_proj, o_proj, rotary_emb, kv_cache: None, num_heads, num_kv_heads, num_kv_groups: num_heads / num_kv_heads, head_dim, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let qkv = self.qkv_proj.forward(xs)?; let query_pos = self.num_heads * self.head_dim; let query_states = qkv.narrow(D::Minus1, 0, query_pos)?; let key_states = qkv.narrow(D::Minus1, query_pos, self.num_kv_heads * self.head_dim)?; let value_states = qkv.narrow( D::Minus1, query_pos + self.num_kv_heads * self.head_dim, self.num_kv_heads * self.head_dim, )?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, ()))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct Mlp { gate_up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, i_size: usize, } impl Mlp { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size; let i_size = cfg.intermediate_size; let gate_up_proj = linear(hidden_size, 2 * i_size, vb.pp("gate_up_proj"))?; let down_proj = linear(i_size, hidden_size, vb.pp("down_proj"))?; Ok(Self { gate_up_proj, down_proj, act_fn: cfg.hidden_act, i_size, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let up_states = xs.apply(&self.gate_up_proj)?; let gate = up_states.narrow(D::Minus1, 0, self.i_size)?; let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?; let up_states = (up_states * gate.apply(&self.act_fn))?; up_states.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: Mlp, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = Mlp::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = if cfg.tie_word_embeddings { Linear::from_weights(embed_tokens.embeddings().clone(), None) } else { linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))? }; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/phi3.rs/0
{ "file_path": "candle/candle-transformers/src/models/phi3.rs", "repo_id": "candle", "token_count": 8089 }
64
//! Qwen2 model implementation with quantization support. //! //! Qwen2 is a chat-optimized language model that supports 8-bit quantization //! for reduced memory usage and faster inference. //! //! Key characteristics: //! - Group Query Attention (GQA) //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - [Model Card](https://huggingface.co/Qwen/Qwen2) //! use crate::{quantized_nn::RmsNorm, utils::repeat_kv}; use candle::{ quantized::{gguf_file, QMatMul}, DType, Device, IndexOp, Result, Tensor, }; use candle_nn::{Embedding, Module}; use std::collections::HashMap; #[derive(Debug, Clone)] struct Mlp { feed_forward_w1: QMatMul, feed_forward_w2: QMatMul, feed_forward_w3: QMatMul, } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let w1 = self.feed_forward_w1.forward(xs)?; let w3 = self.feed_forward_w3.forward(xs)?; self.feed_forward_w2 .forward(&(candle_nn::ops::silu(&w1)? * w3)?) } } #[derive(Debug, Clone)] struct LayerWeights { attention_wq: QMatMul, attention_wk: QMatMul, attention_wv: QMatMul, attention_bq: Tensor, attention_bk: Tensor, attention_bv: Tensor, attention_wo: QMatMul, attention_norm: RmsNorm, mlp: Mlp, ffn_norm: RmsNorm, n_head: usize, n_kv_head: usize, head_dim: usize, cos: Tensor, sin: Tensor, neg_inf: Tensor, kv_cache: Option<(Tensor, Tensor)>, span_attn: tracing::Span, span_rot: tracing::Span, span_mlp: tracing::Span, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> { let shape = mask.shape(); let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?; Ok(m) } impl LayerWeights { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let _enter = self.span_rot.enter(); let (_b_sz, _n_head, seq_len, _n_embd) = x.dims4()?; let cos = self.cos.narrow(0, index_pos, seq_len)?; let sin = self.sin.narrow(0, index_pos, seq_len)?; candle_nn::rotary_emb::rope(&x.contiguous()?, &cos, &sin) } fn forward_attn( &mut self, x: &Tensor, mask: Option<&Tensor>, index_pos: usize, ) -> Result<Tensor> { let _enter = self.span_attn.enter(); let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.attention_wq.forward(x)?; let k = self.attention_wk.forward(x)?; let v = self.attention_wv.forward(x)?; let q = q.broadcast_add(&self.attention_bq)?; let k = k.broadcast_add(&self.attention_bk)?; let v = v.broadcast_add(&self.attention_bv)?; let q = q .reshape((b_sz, seq_len, self.n_head, self.head_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))? .transpose(1, 2)? .contiguous()?; let v = v .reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))? .transpose(1, 2)? .contiguous()?; // let (q, k) = self // .rotary_embedding // .apply_rotary_emb_qkv(&q, &k, index_pos)?; let q = self.apply_rotary_emb(&q, index_pos)?; let k = self.apply_rotary_emb(&k, index_pos)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((k_cache, v_cache)) => { if index_pos == 0 { (k, v) } else { let k = Tensor::cat(&[k_cache, &k], 2)?; let v = Tensor::cat(&[v_cache, &v], 2)?; (k, v) } } }; self.kv_cache = Some((k.clone(), v.clone())); // Support for MQA, useful for 70B models and mistral. let k = repeat_kv(k, self.n_head / self.n_kv_head)?; let v = repeat_kv(v, self.n_head / self.n_kv_head)?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = match mask { None => att, Some(mask) => { let mask = mask.broadcast_as(att.shape())?; masked_fill(&att, &mask, &self.neg_inf)? } }; let att = candle_nn::ops::softmax_last_dim(&att)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.attention_wo.forward(&y)?; Ok(y) } } pub struct ModelWeights { tok_embeddings: Embedding, layers: Vec<LayerWeights>, norm: RmsNorm, output: QMatMul, masks: HashMap<usize, Tensor>, span: tracing::Span, span_output: tracing::Span, } fn precomput_freqs_cis( head_dim: usize, freq_base: f32, context_length: usize, device: &Device, ) -> Result<(Tensor, Tensor)> { let theta: Vec<_> = (0..head_dim) .step_by(2) .map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, context_length as u32, device)? .to_dtype(DType::F32)? .reshape((context_length, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let cos = idx_theta.cos()?; let sin = idx_theta.sin()?; Ok((cos, sin)) } impl ModelWeights { pub fn from_gguf<R: std::io::Seek + std::io::Read>( ct: gguf_file::Content, reader: &mut R, device: &Device, ) -> Result<Self> { let md_get = |s: &str| match ct.metadata.get(s) { None => candle::bail!("cannot find {s} in metadata"), Some(v) => Ok(v), }; let head_count = md_get("qwen2.attention.head_count")?.to_u32()? as usize; let head_count_kv = md_get("qwen2.attention.head_count_kv")?.to_u32()? as usize; let embedding_length = md_get("qwen2.embedding_length")?.to_u32()? as usize; let context_length = md_get("qwen2.context_length")?.to_u32()? as usize; let block_count = md_get("qwen2.block_count")?.to_u32()? as usize; let rms_norm_eps = md_get("qwen2.attention.layer_norm_rms_epsilon")?.to_f32()? as f64; let rope_freq_base = md_get("qwen2.rope.freq_base") .and_then(|m| m.to_f32()) .unwrap_or(10000f32); let head_dim = embedding_length / head_count; let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?; let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; let tok_embeddings = tok_embeddings.dequantize(device)?; let norm = RmsNorm::from_qtensor( ct.tensor(reader, "output_norm.weight", device)?, rms_norm_eps, )?; let output = match ct.tensor(reader, "output.weight", device) { Ok(v) => QMatMul::from_qtensor(v)?, _ => { // use tie_word_embeddings QMatMul::from_qtensor(ct.tensor(reader, "token_embd.weight", device)?)? } }; let (cos, sin) = precomput_freqs_cis(head_dim, rope_freq_base, context_length, device)?; let mut layers = Vec::with_capacity(block_count); for layer_idx in 0..block_count { let prefix = format!("blk.{layer_idx}"); let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?; let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?; let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?; let attention_bq = ct.tensor(reader, &format!("{prefix}.attn_q.bias"), device)?; let attention_bk = ct.tensor(reader, &format!("{prefix}.attn_k.bias"), device)?; let attention_bv = ct.tensor(reader, &format!("{prefix}.attn_v.bias"), device)?; let attention_wo = ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?; let mlp = { let feed_forward_w1 = ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?; let feed_forward_w2 = ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?; let feed_forward_w3 = ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?; Mlp { feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?, feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?, feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?, } }; let attention_norm = ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?; let ffn_norm = ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?; let span_attn = tracing::span!(tracing::Level::TRACE, "attn"); let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot"); let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp"); layers.push(LayerWeights { attention_wq: QMatMul::from_qtensor(attention_wq)?, attention_wk: QMatMul::from_qtensor(attention_wk)?, attention_wv: QMatMul::from_qtensor(attention_wv)?, attention_bq: attention_bq.dequantize(device)?, attention_bk: attention_bk.dequantize(device)?, attention_bv: attention_bv.dequantize(device)?, attention_wo: QMatMul::from_qtensor(attention_wo)?, attention_norm: RmsNorm::from_qtensor(attention_norm, rms_norm_eps)?, cos: cos.clone(), sin: sin.clone(), mlp, ffn_norm: RmsNorm::from_qtensor(ffn_norm, rms_norm_eps)?, n_head: head_count, n_kv_head: head_count_kv, head_dim, neg_inf: neg_inf.clone(), kv_cache: None, span_attn, span_rot, span_mlp, }); } let span = tracing::span!(tracing::Level::TRACE, "model"); let span_output = tracing::span!(tracing::Level::TRACE, "output"); Ok(Self { tok_embeddings: Embedding::new(tok_embeddings, embedding_length), layers, norm, output, masks: HashMap::new(), span, span_output, }) } fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = x.dims2()?; let mask = if seq_len == 1 { None } else { Some(self.mask(seq_len, x.device())?) }; let _enter = self.span.enter(); let mut layer_in = self.tok_embeddings.forward(x)?; for layer in self.layers.iter_mut() { let x = layer_in; let residual = &x; let x = layer.attention_norm.forward(&x)?; let attn = layer.forward_attn(&x, mask.as_ref(), index_pos)?; let x = (attn + residual)?; // MLP let _enter = layer.span_mlp.enter(); let residual = &x; let x = layer.ffn_norm.forward(&x)?; let x = layer.mlp.forward(&x)?; let x = (x + residual)?; layer_in = x } let x = self.norm.forward(&layer_in)?; let x = x.i((.., seq_len - 1, ..))?; let _enter = self.span_output.enter(); self.output.forward(&x) } }
candle/candle-transformers/src/models/quantized_qwen2.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_qwen2.rs", "repo_id": "candle", "token_count": 6400 }
65
//! Segformer model implementation for semantic segmentation and image classification. //! //! Segformer is a transformer-based model designed for vision tasks. It uses a hierarchical //! structure that progressively generates features at different scales. //! //! Key characteristics: //! - Efficient self-attention with sequence reduction //! - Hierarchical feature generation //! - Mix-FFN for local and global feature interaction //! - Lightweight all-MLP decode head //! //! References: //! - [SegFormer Paper](https://arxiv.org/abs/2105.15203) //! - [Model Card](https://huggingface.co/nvidia/mit-b0) //! use crate::models::with_tracing::{conv2d, linear, Conv2d, Linear}; use candle::{Context, Module, ModuleT, Result, Tensor, D}; use candle_nn::{conv2d_no_bias, layer_norm, Activation, Conv2dConfig, VarBuilder}; use serde::Deserialize; use std::collections::HashMap; // https://github.com/huggingface/transformers/blob/main/src/transformers/models/segformer/configuration_segformer.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { #[serde(default)] pub id2label: HashMap<String, String>, pub num_channels: usize, pub num_encoder_blocks: usize, pub depths: Vec<usize>, pub sr_ratios: Vec<usize>, pub hidden_sizes: Vec<usize>, pub patch_sizes: Vec<usize>, pub strides: Vec<usize>, pub num_attention_heads: Vec<usize>, pub mlp_ratios: Vec<usize>, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, pub decoder_hidden_size: usize, } #[derive(Debug, Clone)] struct SegformerOverlapPatchEmbeddings { projection: Conv2d, layer_norm: candle_nn::LayerNorm, } impl SegformerOverlapPatchEmbeddings { fn new( config: &Config, patch_size: usize, stride: usize, num_channels: usize, hidden_size: usize, vb: VarBuilder, ) -> Result<Self> { let projection = conv2d( num_channels, hidden_size, patch_size, Conv2dConfig { stride, padding: patch_size / 2, ..Default::default() }, vb.pp("proj"), )?; let layer_norm = candle_nn::layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm"))?; Ok(Self { projection, layer_norm, }) } } impl Module for SegformerOverlapPatchEmbeddings { fn forward(&self, x: &Tensor) -> Result<Tensor> { let embeddings = self.projection.forward(x)?; let shape = embeddings.shape(); // [B, C, H, W] -> [B, H * W, C] let embeddings = embeddings.flatten_from(2)?.transpose(1, 2)?; let embeddings = self.layer_norm.forward(&embeddings)?; // [B, H * W, C] -> [B, C, H, W] let embeddings = embeddings.transpose(1, 2)?.reshape(shape)?; Ok(embeddings) } } #[derive(Debug, Clone)] struct SegformerEfficientSelfAttention { num_attention_heads: usize, attention_head_size: usize, query: Linear, key: Linear, value: Linear, sr: Option<Conv2d>, layer_norm: Option<layer_norm::LayerNorm>, } impl SegformerEfficientSelfAttention { fn new( config: &Config, hidden_size: usize, num_attention_heads: usize, sequence_reduction_ratio: usize, vb: VarBuilder, ) -> Result<Self> { if hidden_size % num_attention_heads != 0 { candle::bail!( "The hidden size {} is not a multiple of the number of attention heads {}", hidden_size, num_attention_heads ) } let attention_head_size = hidden_size / num_attention_heads; let all_head_size = num_attention_heads * attention_head_size; let query = linear(hidden_size, all_head_size, vb.pp("query"))?; let key = linear(hidden_size, all_head_size, vb.pp("key"))?; let value = linear(hidden_size, all_head_size, vb.pp("value"))?; let (sr, layer_norm) = if sequence_reduction_ratio > 1 { ( Some(conv2d( hidden_size, hidden_size, sequence_reduction_ratio, Conv2dConfig { stride: sequence_reduction_ratio, ..Default::default() }, vb.pp("sr"), )?), Some(candle_nn::layer_norm( hidden_size, config.layer_norm_eps, vb.pp("layer_norm"), )?), ) } else { (None, None) }; Ok(Self { num_attention_heads, attention_head_size, query, key, value, sr, layer_norm, }) } fn transpose_for_scores(&self, hidden_states: Tensor) -> Result<Tensor> { let (batch, seq_length, _) = hidden_states.shape().dims3()?; let new_shape = &[ batch, seq_length, self.num_attention_heads, self.attention_head_size, ]; let hidden_states = hidden_states.reshape(new_shape)?; let hidden_states = hidden_states.permute((0, 2, 1, 3))?; Ok(hidden_states) } } impl Module for SegformerEfficientSelfAttention { fn forward(&self, x: &Tensor) -> Result<Tensor> { // [B, C, H, W] -> [B, H * W, C] let hidden_states = x.flatten_from(2)?.permute((0, 2, 1))?; let query = self .transpose_for_scores(self.query.forward(&hidden_states)?)? .contiguous()?; let hidden_states = if let (Some(sr), Some(layer_norm)) = (&self.sr, &self.layer_norm) { let hidden_states = sr.forward(x)?; // [B, C, H, W] -> [B, H * W, C] let hidden_states = hidden_states.flatten_from(2)?.permute((0, 2, 1))?; layer_norm.forward(&hidden_states)? } else { // already [B, H * W, C] hidden_states }; // standard self-attention let key = self .transpose_for_scores(self.key.forward(&hidden_states)?)? .contiguous()?; let value = self .transpose_for_scores(self.value.forward(&hidden_states)?)? .contiguous()?; let attention_scores = (query.matmul(&key.t()?)? / f64::sqrt(self.attention_head_size as f64))?; let attention_scores = candle_nn::ops::softmax_last_dim(&attention_scores)?; let result = attention_scores.matmul(&value)?; let result = result.permute((0, 2, 1, 3))?.contiguous()?; result.flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SegformerSelfOutput { dense: Linear, } impl SegformerSelfOutput { fn new(hidden_size: usize, vb: VarBuilder) -> Result<Self> { let dense = linear(hidden_size, hidden_size, vb.pp("dense"))?; Ok(Self { dense }) } } impl Module for SegformerSelfOutput { fn forward(&self, x: &Tensor) -> Result<Tensor> { self.dense.forward(x) } } #[derive(Debug, Clone)] struct SegformerAttention { attention: SegformerEfficientSelfAttention, output: SegformerSelfOutput, } impl SegformerAttention { fn new( config: &Config, hidden_size: usize, num_attention_heads: usize, sequence_reduction_ratio: usize, vb: VarBuilder, ) -> Result<Self> { let attention = SegformerEfficientSelfAttention::new( config, hidden_size, num_attention_heads, sequence_reduction_ratio, vb.pp("self"), )?; let output = SegformerSelfOutput::new(hidden_size, vb.pp("output"))?; Ok(Self { attention, output }) } } impl Module for SegformerAttention { fn forward(&self, x: &Tensor) -> Result<Tensor> { let attention_output = self.attention.forward(x)?; self.output.forward(&attention_output) } } #[derive(Debug, Clone)] struct SegformerDWConv { dw_conv: Conv2d, } impl SegformerDWConv { fn new(dim: usize, vb: VarBuilder) -> Result<Self> { let dw_conv = conv2d( dim, dim, 3, Conv2dConfig { stride: 1, padding: 1, groups: dim, ..Default::default() }, vb.pp("dwconv"), )?; Ok(Self { dw_conv }) } } impl Module for SegformerDWConv { fn forward(&self, x: &Tensor) -> Result<Tensor> { self.dw_conv.forward(x) } } #[derive(Debug, Clone)] struct SegformerMixFFN { dense1: Linear, dw_conv: SegformerDWConv, act: Activation, dense2: Linear, } impl SegformerMixFFN { fn new( config: &Config, in_features: usize, hidden_features: usize, out_features: usize, vb: VarBuilder, ) -> Result<Self> { let dense1 = linear(in_features, hidden_features, vb.pp("dense1"))?; let dw_conv = SegformerDWConv::new(hidden_features, vb.pp("dwconv"))?; let act = config.hidden_act; let dense2 = linear(hidden_features, out_features, vb.pp("dense2"))?; Ok(Self { dense1, dw_conv, act, dense2, }) } } impl Module for SegformerMixFFN { fn forward(&self, x: &Tensor) -> Result<Tensor> { let (batch, _, height, width) = x.shape().dims4()?; let hidden_states = self .dense1 .forward(&x.flatten_from(2)?.permute((0, 2, 1))?)?; let channels = hidden_states.dim(2)?; let hidden_states = self.dw_conv.forward( &hidden_states .permute((0, 2, 1))? .reshape((batch, channels, height, width))?, )?; let hidden_states = self.act.forward(&hidden_states)?; let hidden_states = self .dense2 .forward(&hidden_states.flatten_from(2)?.permute((0, 2, 1))?)?; let channels = hidden_states.dim(2)?; hidden_states .permute((0, 2, 1))? .reshape((batch, channels, height, width)) } } #[derive(Debug, Clone)] struct SegformerLayer { layer_norm_1: candle_nn::LayerNorm, attention: SegformerAttention, layer_norm_2: candle_nn::LayerNorm, mlp: SegformerMixFFN, } impl SegformerLayer { fn new( config: &Config, hidden_size: usize, num_attention_heads: usize, sequence_reduction_ratio: usize, mlp_ratio: usize, vb: VarBuilder, ) -> Result<Self> { let layer_norm_1 = layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm_1"))?; let attention = SegformerAttention::new( config, hidden_size, num_attention_heads, sequence_reduction_ratio, vb.pp("attention"), )?; let layer_norm_2 = layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm_2"))?; let mlp = SegformerMixFFN::new( config, hidden_size, hidden_size * mlp_ratio, hidden_size, vb.pp("mlp"), )?; Ok(Self { layer_norm_1, attention, layer_norm_2, mlp, }) } } impl Module for SegformerLayer { fn forward(&self, x: &Tensor) -> Result<Tensor> { let shape = x.shape().dims4()?; // [B, C, H, W] -> [B, H * W, C] let hidden_states = x.flatten_from(2)?.permute((0, 2, 1))?; let layer_norm_output = self.layer_norm_1.forward(&hidden_states)?; let layer_norm_output = layer_norm_output.permute((0, 2, 1))?.reshape(shape)?; // attention takes in [B, C, H, W] in order to properly do conv2d (and output [B, H * W, C]) let attention_output = self.attention.forward(&layer_norm_output)?; let hidden_states = (attention_output + hidden_states)?; let layer_norm_output = self.layer_norm_2.forward(&hidden_states)?; let mlp_output = self .mlp .forward(&layer_norm_output.permute((0, 2, 1))?.reshape(shape)?)?; hidden_states.permute((0, 2, 1))?.reshape(shape)? + mlp_output } } #[derive(Debug, Clone)] struct SegformerEncoder { /// config file config: Config, /// a list of embeddings patch_embeddings: Vec<SegformerOverlapPatchEmbeddings>, /// a list of attention blocks, each consisting of layers blocks: Vec<Vec<SegformerLayer>>, /// a final list of layer norms layer_norms: Vec<candle_nn::LayerNorm>, } impl SegformerEncoder { fn new(config: Config, vb: VarBuilder) -> Result<Self> { let mut patch_embeddings = Vec::with_capacity(config.num_encoder_blocks); let mut blocks = Vec::with_capacity(config.num_encoder_blocks); let mut layer_norms = Vec::with_capacity(config.num_encoder_blocks); for i in 0..config.num_encoder_blocks { let patch_size = config.patch_sizes[i]; let stride = config.strides[i]; let hidden_size = config.hidden_sizes[i]; let num_channels = if i == 0 { config.num_channels } else { config.hidden_sizes[i - 1] }; patch_embeddings.push(SegformerOverlapPatchEmbeddings::new( &config, patch_size, stride, num_channels, hidden_size, vb.pp(format!("patch_embeddings.{i}")), )?); let mut layers = Vec::with_capacity(config.depths[i]); for j in 0..config.depths[i] { let sequence_reduction_ratio = config.sr_ratios[i]; let num_attention_heads = config.num_attention_heads[i]; let mlp_ratio = config.mlp_ratios[i]; layers.push(SegformerLayer::new( &config, hidden_size, num_attention_heads, sequence_reduction_ratio, mlp_ratio, vb.pp(format!("block.{i}.{j}")), )?); } blocks.push(layers); layer_norms.push(layer_norm( hidden_size, config.layer_norm_eps, vb.pp(format!("layer_norm.{i}")), )?); } Ok(Self { config, patch_embeddings, blocks, layer_norms, }) } } impl ModuleWithHiddenStates for SegformerEncoder { fn forward(&self, x: &Tensor) -> Result<Vec<Tensor>> { let mut all_hidden_states = Vec::with_capacity(self.config.num_encoder_blocks); let mut hidden_states = x.clone(); for i in 0..self.config.num_encoder_blocks { hidden_states = self.patch_embeddings[i].forward(&hidden_states)?; for layer in &self.blocks[i] { hidden_states = layer.forward(&hidden_states)?; } let shape = hidden_states.shape().dims4()?; hidden_states = self.layer_norms[i].forward(&hidden_states.flatten_from(2)?.permute((0, 2, 1))?)?; hidden_states = hidden_states.permute((0, 2, 1))?.reshape(shape)?; all_hidden_states.push(hidden_states.clone()); } Ok(all_hidden_states) } } #[derive(Debug, Clone)] struct SegformerModel { encoder: SegformerEncoder, } impl SegformerModel { fn new(config: &Config, vb: VarBuilder) -> Result<Self> { let encoder = SegformerEncoder::new(config.clone(), vb.pp("encoder"))?; Ok(Self { encoder }) } } impl ModuleWithHiddenStates for SegformerModel { fn forward(&self, x: &Tensor) -> Result<Vec<Tensor>> { self.encoder.forward(x) } } #[derive(Debug, Clone)] struct SegformerMLP { proj: Linear, } impl SegformerMLP { fn new(config: &Config, input_dim: usize, vb: VarBuilder) -> Result<Self> { let proj = linear(input_dim, config.decoder_hidden_size, vb.pp("proj"))?; Ok(Self { proj }) } } impl Module for SegformerMLP { fn forward(&self, x: &Tensor) -> Result<Tensor> { self.proj.forward(x) } } #[derive(Debug, Clone)] struct SegformerDecodeHead { linear_c: Vec<SegformerMLP>, linear_fuse: candle_nn::Conv2d, batch_norm: candle_nn::BatchNorm, classifier: candle_nn::Conv2d, } impl SegformerDecodeHead { fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> { let mut linear_c = Vec::with_capacity(config.num_encoder_blocks); for i in 0..config.num_encoder_blocks { let hidden_size = config.hidden_sizes[i]; linear_c.push(SegformerMLP::new( config, hidden_size, vb.pp(format!("linear_c.{i}")), )?); } let linear_fuse = conv2d_no_bias( config.decoder_hidden_size * config.num_encoder_blocks, config.decoder_hidden_size, 1, Conv2dConfig::default(), vb.pp("linear_fuse"), )?; let batch_norm = candle_nn::batch_norm( config.decoder_hidden_size, config.layer_norm_eps, vb.pp("batch_norm"), )?; let classifier = conv2d_no_bias( config.decoder_hidden_size, num_labels, 1, Conv2dConfig::default(), vb.pp("classifier"), )?; Ok(Self { linear_c, linear_fuse, batch_norm, classifier, }) } fn forward(&self, encoder_hidden_states: &[Tensor]) -> Result<Tensor> { if encoder_hidden_states.len() != self.linear_c.len() { candle::bail!( "The number of encoder hidden states {} is not equal to the number of linear layers {}", encoder_hidden_states.len(), self.linear_c.len() ) } // most fine layer let (_, _, upsample_height, upsample_width) = encoder_hidden_states[0].shape().dims4()?; let mut hidden_states = Vec::with_capacity(self.linear_c.len()); for (hidden_state, mlp) in encoder_hidden_states.iter().zip(&self.linear_c) { let (batch, _, height, width) = hidden_state.shape().dims4()?; let hidden_state = mlp.forward(&hidden_state.flatten_from(2)?.permute((0, 2, 1))?)?; let hidden_state = hidden_state.permute((0, 2, 1))?.reshape(( batch, hidden_state.dim(2)?, height, width, ))?; let hidden_state = hidden_state.upsample_nearest2d(upsample_height, upsample_width)?; hidden_states.push(hidden_state); } hidden_states.reverse(); let hidden_states = Tensor::cat(&hidden_states, 1)?; let hidden_states = self.linear_fuse.forward(&hidden_states)?; let hidden_states = self.batch_norm.forward_t(&hidden_states, false)?; let hidden_states = hidden_states.relu()?; self.classifier.forward(&hidden_states) } } trait ModuleWithHiddenStates { fn forward(&self, xs: &Tensor) -> Result<Vec<Tensor>>; } #[derive(Debug, Clone)] pub struct SemanticSegmentationModel { segformer: SegformerModel, decode_head: SegformerDecodeHead, } impl SemanticSegmentationModel { pub fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> { let segformer = SegformerModel::new(config, vb.pp("segformer"))?; let decode_head = SegformerDecodeHead::new(config, num_labels, vb.pp("decode_head"))?; Ok(Self { segformer, decode_head, }) } } impl Module for SemanticSegmentationModel { fn forward(&self, x: &Tensor) -> Result<Tensor> { let hidden_states = self.segformer.forward(x)?; self.decode_head.forward(&hidden_states) } } #[derive(Debug, Clone)] pub struct ImageClassificationModel { segformer: SegformerModel, classifier: Linear, } impl ImageClassificationModel { pub fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> { let segformer = SegformerModel::new(config, vb.pp("segformer"))?; let classifier = linear(config.decoder_hidden_size, num_labels, vb.pp("classifier"))?; Ok(Self { segformer, classifier, }) } } impl Module for ImageClassificationModel { fn forward(&self, x: &Tensor) -> Result<Tensor> { let all_hidden_states = self.segformer.forward(x)?; let hidden_states = all_hidden_states.last().context("no last")?; let hidden_states = hidden_states.flatten_from(2)?.permute((0, 2, 1))?; let mean = hidden_states.mean(1)?; self.classifier.forward(&mean) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_config_json_load() { let raw_json = r#"{ "architectures": [ "SegformerForImageClassification" ], "attention_probs_dropout_prob": 0.0, "classifier_dropout_prob": 0.1, "decoder_hidden_size": 256, "depths": [ 2, 2, 2, 2 ], "downsampling_rates": [ 1, 4, 8, 16 ], "drop_path_rate": 0.1, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_sizes": [ 32, 64, 160, 256 ], "image_size": 224, "initializer_range": 0.02, "layer_norm_eps": 1e-06, "mlp_ratios": [ 4, 4, 4, 4 ], "model_type": "segformer", "num_attention_heads": [ 1, 2, 5, 8 ], "num_channels": 3, "num_encoder_blocks": 4, "patch_sizes": [ 7, 3, 3, 3 ], "sr_ratios": [ 8, 4, 2, 1 ], "strides": [ 4, 2, 2, 2 ], "torch_dtype": "float32", "transformers_version": "4.12.0.dev0" }"#; let config: Config = serde_json::from_str(raw_json).unwrap(); assert_eq!(vec![4, 2, 2, 2], config.strides); assert_eq!(1e-6, config.layer_norm_eps); } }
candle/candle-transformers/src/models/segformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/segformer.rs", "repo_id": "candle", "token_count": 11539 }
66
//! Stable Diffusion //! //! Stable Diffusion is a latent text-to-image diffusion model capable of //! generating photo-realistic images given any text input. //! //! - 💻 [Original Repository](https://github.com/CompVis/stable-diffusion) //! - 🤗 [Hugging Face](https://huggingface.co/runwayml/stable-diffusion-v1-5) //! - The default scheduler for the v1.5, v2.1 and XL 1.0 version is the Denoising Diffusion Implicit Model scheduler (DDIM). The original paper and some code can be found in the [associated repo](https://github.com/ermongroup/ddim). The default scheduler for the XL Turbo version is the Euler Ancestral scheduler. //! //! //! # Example //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg" alt="rusty robot holding a candle" width=320> //! </div> //! //! _"A rusty robot holding a fire torch in its hand."_ Generated by Stable Diffusion XL using Rust and [candle](https://github.com/huggingface/candle). //! //! ```bash //! # example running with cuda //! # see the candle-examples/examples/stable-diffusion for all options //! cargo run --example stable-diffusion --release --features=cuda,cudnn \ //! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" //! //! # with sd-turbo //! cargo run --example stable-diffusion --release --features=cuda,cudnn \ //! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" \ //! --sd-version turbo //! //! # with flash attention. //! # feature flag: `--features flash-attn` //! # cli flag: `--use-flash-attn`. //! # flash-attention-v2 is only compatible with Ampere, Ada, \ //! # or Hopper GPUs (e.g., A100/H100, RTX 3090/4090). //! cargo run --example stable-diffusion --release --features=cuda,cudnn \ //! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" \ //! --use-flash-attn //! ``` pub mod attention; pub mod clip; pub mod ddim; pub mod ddpm; pub mod embeddings; pub mod euler_ancestral_discrete; pub mod resnet; pub mod schedulers; pub mod unet_2d; pub mod unet_2d_blocks; pub mod uni_pc; pub mod utils; pub mod vae; use std::sync::Arc; use candle::{DType, Device, Result}; use candle_nn as nn; use self::schedulers::{Scheduler, SchedulerConfig}; #[derive(Clone, Debug)] pub struct StableDiffusionConfig { pub width: usize, pub height: usize, pub clip: clip::Config, pub clip2: Option<clip::Config>, autoencoder: vae::AutoEncoderKLConfig, unet: unet_2d::UNet2DConditionModelConfig, scheduler: Arc<dyn SchedulerConfig>, } impl StableDiffusionConfig { pub fn v1_5( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, Some(1), 8), bc(640, Some(1), 8), bc(1280, Some(1), 8), bc(1280, None, 8), ], center_input_sample: false, cross_attention_dim: 768, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: false, }; let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 512 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 512 }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type: schedulers::PredictionType::Epsilon, ..Default::default() }); StableDiffusionConfig { width, height, clip: clip::Config::v1_5(), clip2: None, autoencoder, scheduler, unet, } } fn v2_1_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, Some(1), 5), bc(640, Some(1), 10), bc(1280, Some(1), 20), bc(1280, None, 20), ], center_input_sample: false, cross_attention_dim: 1024, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type, ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 768 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 768 }; StableDiffusionConfig { width, height, clip: clip::Config::v2_1(), clip2: None, autoencoder, scheduler, unet, } } pub fn v2_1( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/scheduler/scheduler_config.json Self::v2_1_( sliced_attention_size, height, width, schedulers::PredictionType::VPrediction, ) } fn sdxl_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type, ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 1024 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 1024 }; StableDiffusionConfig { width, height, clip: clip::Config::sdxl(), clip2: Some(clip::Config::sdxl2()), autoencoder, scheduler, unet, } } fn sdxl_turbo_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new( euler_ancestral_discrete::EulerAncestralDiscreteSchedulerConfig { prediction_type, timestep_spacing: schedulers::TimestepSpacing::Trailing, ..Default::default() }, ); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 512 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 512 }; Self { width, height, clip: clip::Config::sdxl(), clip2: Some(clip::Config::sdxl2()), autoencoder, scheduler, unet, } } pub fn sdxl( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { Self::sdxl_( sliced_attention_size, height, width, // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/scheduler/scheduler_config.json schedulers::PredictionType::Epsilon, ) } pub fn sdxl_turbo( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { Self::sdxl_turbo_( sliced_attention_size, height, width, // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/scheduler/scheduler_config.json schedulers::PredictionType::Epsilon, ) } pub fn ssd1b( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 1024 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 1024 }; Self { width, height, clip: clip::Config::ssd1b(), clip2: Some(clip::Config::ssd1b2()), autoencoder, scheduler, unet, } } pub fn build_vae<P: AsRef<std::path::Path>>( &self, vae_weights: P, device: &Device, dtype: DType, ) -> Result<vae::AutoEncoderKL> { let vs_ae = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[vae_weights], dtype, device)? }; // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKL::new(vs_ae, 3, 3, self.autoencoder.clone())?; Ok(autoencoder) } pub fn build_unet<P: AsRef<std::path::Path>>( &self, unet_weights: P, device: &Device, in_channels: usize, use_flash_attn: bool, dtype: DType, ) -> Result<unet_2d::UNet2DConditionModel> { let vs_unet = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[unet_weights], dtype, device)? }; let unet = unet_2d::UNet2DConditionModel::new( vs_unet, in_channels, 4, use_flash_attn, self.unet.clone(), )?; Ok(unet) } pub fn build_scheduler(&self, n_steps: usize) -> Result<Box<dyn Scheduler>> { self.scheduler.build(n_steps) } } pub fn build_clip_transformer<P: AsRef<std::path::Path>>( clip: &clip::Config, clip_weights: P, device: &Device, dtype: DType, ) -> Result<clip::ClipTextTransformer> { let vs = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[clip_weights], dtype, device)? }; let text_model = clip::ClipTextTransformer::new(vs, clip)?; Ok(text_model) }
candle/candle-transformers/src/models/stable_diffusion/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/mod.rs", "repo_id": "candle", "token_count": 8553 }
67
pub mod audio; pub mod model; pub mod voxtral_llama; pub use audio::extract_features; pub use model::{ VoxtralCache, VoxtralConfig, VoxtralEncoder, VoxtralEncoderConfig, VoxtralForConditionalGeneration, VoxtralGenerationConfig, VoxtralMultiModalProjector, }; pub use voxtral_llama::{VoxtralLlama, VoxtralLlamaCache, VoxtralLlamaConfig}; pub const N_FFT: usize = 400; pub const HOP_LENGTH: usize = 160; pub const N_MELS: usize = 128;
candle/candle-transformers/src/models/voxtral/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/voxtral/mod.rs", "repo_id": "candle", "token_count": 170 }
68
//! Yi model implementation. //! //! This candle implementation uses a pre-trained Yi decoder-only large language model for inference. //! The model was trained by 01.AI and follows a standard transformer architecture similar to LLaMA. //! //! Original code: //! - 💻 [Yi Model](https://huggingface.co/01-ai/Yi-6B) //! - 💻 [Yi Modeling Code](https://huggingface.co/01-ai/Yi-6B/blob/main/modeling_yi.py) //! - 📝 [Technical Report](https://arxiv.org/abs/2403.04652) Yi: Open Foundation Models by 01.AI //! //! Key characteristics: //! - Multi-head attention with rotary positional embeddings //! - RMS normalization //! - SwiGLU activation in feed-forward layers //! - Grouped-query attention for efficient inference //! use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, PartialEq)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) hidden_size: usize, pub(crate) intermediate_size: usize, pub(crate) num_hidden_layers: usize, pub(crate) num_attention_heads: usize, pub(crate) num_key_value_heads: usize, pub(crate) hidden_act: Activation, pub(crate) max_position_embeddings: usize, pub(crate) rms_norm_eps: f64, pub(crate) rope_theta: f64, } impl Config { pub fn config_6b() -> Self { Self { vocab_size: 64000, hidden_size: 4096, intermediate_size: 11008, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 4, hidden_act: Activation::Silu, max_position_embeddings: 4096, rms_norm_eps: 1e-5, rope_theta: 5_000_000., } } pub fn config_34b() -> Self { Self { vocab_size: 64000, hidden_size: 7168, intermediate_size: 20480, num_hidden_layers: 60, num_attention_heads: 56, num_key_value_heads: 8, hidden_act: Activation::Silu, max_position_embeddings: 4096, rms_norm_eps: 1e-5, rope_theta: 5_000_000., } } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, ln1: RmsNorm, ln2: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let ln2 = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, ln1, ln2, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.ln1.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.ln2)?.apply(&self.mlp)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/yi.rs/0
{ "file_path": "candle/candle-transformers/src/models/yi.rs", "repo_id": "candle", "token_count": 6426 }
69
export async function getEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } const MODELS = { intfloat_e5_small_v2: { base_url: "https://huggingface.co/intfloat/e5-small-v2/resolve/main/", search_prefix: "query: ", document_prefix: "passage: ", }, intfloat_e5_base_v2: { base_url: "https://huggingface.co/intfloat/e5-base-v2/resolve/main/", search_prefix: "query: ", document_prefix: "passage:", }, intfloat_multilingual_e5_small: { base_url: "https://huggingface.co/intfloat/multilingual-e5-small/resolve/main/", search_prefix: "query: ", document_prefix: "passage: ", }, sentence_transformers_all_MiniLM_L6_v2: { base_url: "https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/refs%2Fpr%2F21/", search_prefix: "", document_prefix: "", }, sentence_transformers_all_MiniLM_L12_v2: { base_url: "https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/refs%2Fpr%2F4/", search_prefix: "", document_prefix: "", }, }; export function getModelInfo(id) { return { modelURL: MODELS[id].base_url + "model.safetensors", configURL: MODELS[id].base_url + "config.json", tokenizerURL: MODELS[id].base_url + "tokenizer.json", search_prefix: MODELS[id].search_prefix, document_prefix: MODELS[id].document_prefix, }; } export function cosineSimilarity(vec1, vec2) { const dot = vec1.reduce((acc, val, i) => acc + val * vec2[i], 0); const a = Math.sqrt(vec1.reduce((acc, val) => acc + val * val, 0)); const b = Math.sqrt(vec2.reduce((acc, val) => acc + val * val, 0)); return dot / (a * b); } export async function getWikiText(article) { // thanks to wikipedia for the API const URL = `https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exlimit=1&titles=${article}&explaintext=1&exsectionformat=plain&format=json&origin=*`; return fetch(URL, { method: "GET", headers: { Accept: "application/json", }, }) .then((r) => r.json()) .then((data) => { const pages = data.query.pages; const pageId = Object.keys(pages)[0]; const extract = pages[pageId].extract; if (extract === undefined || extract === "") { throw new Error("No article found"); } return extract; }) .catch((error) => console.error("Error:", error)); }
candle/candle-wasm-examples/bert/utils.js/0
{ "file_path": "candle/candle-wasm-examples/bert/utils.js", "repo_id": "candle", "token_count": 1250 }
70
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web wasm-bindgen ../../target/wasm32-unknown-unknown/release/m-quantized.wasm --out-dir build --target web
candle/candle-wasm-examples/t5/build-lib.sh/0
{ "file_path": "candle/candle-wasm-examples/t5/build-lib.sh", "repo_id": "candle", "token_count": 84 }
71
use yew_agent::PublicWorker; fn main() { candle_wasm_example_whisper::Worker::register(); }
candle/candle-wasm-examples/whisper/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/worker.rs", "repo_id": "candle", "token_count": 38 }
72
# syntax=docker/dockerfile:1 ARG INCLUDE_DB=false FROM node:20-slim AS base ENV PLAYWRIGHT_SKIP_BROWSER_GC=1 # install dotenv-cli RUN npm install -g dotenv-cli # switch to a user that works for spaces RUN userdel -r node RUN useradd -m -u 1000 user USER user ENV HOME=/home/user \ PATH=/home/user/.local/bin:$PATH WORKDIR /app # add a .env.local if the user doesn't bind a volume to it RUN touch /app/.env.local RUN npm i --no-package-lock --no-save playwright@1.52.0 USER root RUN mkdir -p /data/models RUN chown -R 1000:1000 /data/models RUN apt-get update RUN apt-get install gnupg curl git cmake clang libgomp1 -y RUN npx playwright install --with-deps chromium RUN chown -R 1000:1000 /home/user/.npm USER user COPY --chown=1000 .env /app/.env COPY --chown=1000 entrypoint.sh /app/entrypoint.sh COPY --chown=1000 gcp-*.json /app/ COPY --chown=1000 package.json /app/package.json COPY --chown=1000 package-lock.json /app/package-lock.json RUN chmod +x /app/entrypoint.sh FROM node:20 AS builder WORKDIR /app COPY --link --chown=1000 package-lock.json package.json ./ ARG APP_BASE= ARG PUBLIC_APP_COLOR=blue ARG SKIP_LLAMA_CPP_BUILD ENV BODY_SIZE_LIMIT=15728640 ENV SKIP_LLAMA_CPP_BUILD=$SKIP_LLAMA_CPP_BUILD RUN --mount=type=cache,target=/app/.npm \ npm set cache /app/.npm && \ npm ci COPY --link --chown=1000 . . RUN git config --global --add safe.directory /app && \ npm run build # mongo image FROM mongo:7 AS mongo # image to be used if INCLUDE_DB is false FROM base AS local_db_false # image to be used if INCLUDE_DB is true FROM base AS local_db_true # copy mongo from the other stage COPY --from=mongo /usr/bin/mongo* /usr/bin/ ENV MONGODB_URL=mongodb://localhost:27017 USER root RUN mkdir -p /data/db RUN chown -R 1000:1000 /data/db USER user # final image FROM local_db_${INCLUDE_DB} AS final # build arg to determine if the database should be included ARG INCLUDE_DB=false ENV INCLUDE_DB=${INCLUDE_DB} # svelte requires APP_BASE at build time so it must be passed as a build arg ARG APP_BASE= # tailwind requires the primary theme to be known at build time so it must be passed as a build arg ARG PUBLIC_APP_COLOR=blue ARG PUBLIC_COMMIT_SHA= ENV PUBLIC_COMMIT_SHA=${PUBLIC_COMMIT_SHA} ENV BODY_SIZE_LIMIT=15728640 ENV MODELS_STORAGE_PATH=/data/models #import the build & dependencies COPY --from=builder --chown=1000 /app/build /app/build COPY --from=builder --chown=1000 /app/node_modules /app/node_modules CMD ["/bin/bash", "-c", "/app/entrypoint.sh"]
chat-ui/Dockerfile/0
{ "file_path": "chat-ui/Dockerfile", "repo_id": "chat-ui", "token_count": 991 }
73
{{- if $.Values.monitoring.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: {{ include "labels.standard" . | nindent 4 }} name: {{ include "name" . }} namespace: {{ .Release.Namespace }} spec: selector: matchLabels: {{ include "labels.standard" . | nindent 6 }} endpoints: - port: metrics path: /metrics interval: 15s {{- end }}
chat-ui/chart/templates/service-monitor.yaml/0
{ "file_path": "chat-ui/chart/templates/service-monitor.yaml", "repo_id": "chat-ui", "token_count": 144 }
74
# Ollama | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | We also support the Ollama inference server. Spin up a model with ```bash ollama run mistral ``` Then specify the endpoints like so: ```ini MODELS=`[ { "name": "Ollama Mistral", "chatPromptTemplate": "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s> {{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "endpoints": [ { "type": "ollama", "url" : "http://127.0.0.1:11434", "ollamaName" : "mistral" } ] } ]` ```
chat-ui/docs/source/configuration/models/providers/ollama.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/ollama.md", "repo_id": "chat-ui", "token_count": 468 }
75
export function clickOutside(element: HTMLElement, callbackFunction: () => void) { function onClick(event: MouseEvent) { if (!element.contains(event.target as Node)) { callbackFunction(); } } document.body.addEventListener("click", onClick); return { update(newCallbackFunction: () => void) { callbackFunction = newCallbackFunction; }, destroy() { document.body.removeEventListener("click", onClick); }, }; }
chat-ui/src/lib/actions/clickOutside.ts/0
{ "file_path": "chat-ui/src/lib/actions/clickOutside.ts", "repo_id": "chat-ui", "token_count": 144 }
76
<script lang="ts"> import CarbonEarth from "~icons/carbon/earth"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import BIMeta from "~icons/bi/meta"; import CarbonCode from "~icons/carbon/code"; import type { Model } from "$lib/types/Model"; interface Props { model: Pick< Model, "name" | "datasetName" | "websiteUrl" | "modelUrl" | "datasetUrl" | "hasInferenceAPI" >; variant?: "light" | "dark"; } let { model, variant = "light" }: Props = $props(); </script> <div class="flex items-center gap-5 rounded-xl bg-gray-100 px-3 py-2 text-xs sm:text-sm {variant === 'dark' ? 'text-gray-600 dark:bg-gray-800 dark:text-gray-300' : 'text-gray-800 dark:bg-gray-100 dark:text-gray-600'}" > <a href={model.modelUrl || "https://huggingface.co/" + model.name} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Model <div class="max-sm:hidden">&nbsp;page</div></a > {#if model.datasetName || model.datasetUrl} <a href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Dataset <div class="max-sm:hidden">&nbsp;page</div></a > {/if} {#if model.hasInferenceAPI} <a href={"https://huggingface.co/playground?modelId=" + model.name} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonCode class="mr-1.5 shrink-0 text-xs text-gray-400" /> API </a> {/if} {#if model.websiteUrl} <a href={model.websiteUrl} target="_blank" class="ml-auto flex items-center hover:underline" rel="noreferrer" > {#if model.name.startsWith("meta-llama/Meta-Llama")} <BIMeta class="mr-1.5 shrink-0 text-xs text-gray-400" /> Built with Llama {:else} <CarbonEarth class="mr-1.5 shrink-0 text-xs text-gray-400" /> Website {/if} </a> {/if} </div>
chat-ui/src/lib/components/ModelCardMetadata.svelte/0
{ "file_path": "chat-ui/src/lib/components/ModelCardMetadata.svelte", "repo_id": "chat-ui", "token_count": 901 }
77
<script lang="ts"> import ToolLogo from "./ToolLogo.svelte"; import { base } from "$app/paths"; import { browser } from "$app/environment"; import { handleResponse, useAPIClient } from "$lib/APIClient"; interface Props { toolId: string; } let { toolId }: Props = $props(); const client = useAPIClient(); </script> <div class="relative flex items-center justify-center space-x-2 rounded border border-gray-300 bg-gray-200 px-2 py-1" > {#if browser} {#await client.tools({ id: toolId }).get().then(handleResponse) then value} {#key value.color + value.icon} <ToolLogo color={value.color} icon={value.icon} size="sm" /> {/key} <div class="flex flex-col items-center justify-center py-1"> <a href={`${base}/tools/${value._id}`} target="_blank" class="line-clamp-1 truncate font-semibold text-blue-600 hover:underline" >{value.displayName}</a > {#if value.createdByName} <p class="text-center text-xs text-gray-500"> Created by <a class="underline" href="{base}/tools?user={value.createdByName}" target="_blank" >{value.createdByName}</a > </p> {:else} <p class="text-center text-xs text-gray-500">Official HuggingChat tool</p> {/if} </div> {/await} {/if} </div>
chat-ui/src/lib/components/ToolBadge.svelte/0
{ "file_path": "chat-ui/src/lib/components/ToolBadge.svelte", "repo_id": "chat-ui", "token_count": 547 }
78
<script lang="ts"> import MarkdownRenderer from "./MarkdownRenderer.svelte"; import CarbonCaretDown from "~icons/carbon/caret-down"; interface Props { summary: string; content: string; loading?: boolean; } let { summary, content, loading = false }: Props = $props(); let isOpen = $state(loading); $effect(() => { isOpen = loading; }); </script> <details bind:open={isOpen} class="group flex w-fit max-w-full flex-col rounded-xl border border-gray-200 bg-white shadow-sm dark:border-gray-800 dark:bg-gray-900" > <summary class=" grid min-w-72 cursor-pointer select-none grid-cols-[40px,1fr,24px] items-center gap-2.5 rounded-xl p-2 group-open:rounded-b-none hover:bg-gray-500/10" > <div class="relative grid aspect-square place-content-center overflow-hidden rounded-lg bg-gray-100 dark:bg-gray-800" > <div class="grid h-dvh place-items-center"> <svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 32 32"> <path class="stroke-gray-600 dark:stroke-gray-400" style="stroke-width: 1.9; fill: none; stroke-linecap: round; stroke-linejoin: round;" d="M16 6v3.33M16 6c0-2.65 3.25-4.3 5.4-2.62 1.2.95 1.6 2.65.95 4.04a3.63 3.63 0 0 1 4.61.16 3.45 3.45 0 0 1 .46 4.37 5.32 5.32 0 0 1 1.87 4.75c-.22 1.66-1.39 3.6-3.07 4.14M16 6c0-2.65-3.25-4.3-5.4-2.62a3.37 3.37 0 0 0-.95 4.04 3.65 3.65 0 0 0-4.6.16 3.37 3.37 0 0 0-.49 4.27 5.57 5.57 0 0 0-1.85 4.85 5.3 5.3 0 0 0 3.07 4.15M16 9.33v17.34m0-17.34c0 2.18 1.82 4 4 4m6.22 7.5c.67 1.3.56 2.91-.27 4.11a4.05 4.05 0 0 1-4.62 1.5c0 1.53-1.05 2.9-2.66 2.9A2.7 2.7 0 0 1 16 26.66m10.22-5.83a4.05 4.05 0 0 0-3.55-2.17m-16.9 2.18a4.05 4.05 0 0 0 .28 4.1c1 1.44 2.92 2.09 4.59 1.5 0 1.52 1.12 2.88 2.7 2.88A2.7 2.7 0 0 0 16 26.67M5.78 20.85a4.04 4.04 0 0 1 3.55-2.18" /> {#if loading} <path class="animate-pulse stroke-purple-700" style="stroke-width: 2; fill: none; stroke-linecap: round; stroke-linejoin: round; stroke-dasharray: 50;" d="M16 6v3.33M16 6c0-2.65 3.25-4.3 5.4-2.62 1.2.95 1.6 2.65.95 4.04a3.63 3.63 0 0 1 4.61.16 3.45 3.45 0 0 1 .46 4.37 5.32 5.32 0 0 1 1.87 4.75c-.22 1.66-1.39 3.6-3.07 4.14M16 6c0-2.65-3.25-4.3-5.4-2.62a3.37 3.37 0 0 0-.95 4.04 3.65 3.65 0 0 0-4.6.16 3.37 3.37 0 0 0-.49 4.27 5.57 5.57 0 0 0-1.85 4.85 5.3 5.3 0 0 0 3.07 4.15M16 9.33v17.34m0-17.34c0 2.18 1.82 4 4 4m6.22 7.5c.67 1.3.56 2.91-.27 4.11a4.05 4.05 0 0 1-4.62 1.5c0 1.53-1.05 2.9-2.66 2.9A2.7 2.7 0 0 1 16 26.66m10.22-5.83a4.05 4.05 0 0 0-3.55-2.17m-16.9 2.18a4.05 4.05 0 0 0 .28 4.1c1 1.44 2.92 2.09 4.59 1.5 0 1.52 1.12 2.88 2.7 2.88A2.7 2.7 0 0 0 16 26.67M5.78 20.85a4.04 4.04 0 0 1 3.55-2.18" > <animate attributeName="stroke-dashoffset" values="0;500" dur="12s" repeatCount="indefinite" /> </path> {/if} </svg> </div> </div> <dl class="leading-4"> <dd class="text-sm">Reasoning</dd> <dt class="flex items-center gap-1 truncate whitespace-nowrap text-[.82rem] text-gray-400" class:animate-pulse={loading} > {summary.length > 33 ? summary.substring(0, 33) + "..." : summary.endsWith("...") ? summary : summary + "..."} </dt> </dl> <CarbonCaretDown class="size-6 text-gray-400 transition-transform group-open:rotate-180" /> </summary> <div class="space-y-4 border-t border-gray-200 px-5 pb-2 pt-2 text-sm text-gray-600 dark:border-gray-800 dark:text-gray-400" > {#key content} <MarkdownRenderer {content} /> {/key} </div> </details> <style> details summary::-webkit-details-marker { display: none; } </style>
chat-ui/src/lib/components/chat/OpenReasoningResults.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/OpenReasoningResults.svelte", "repo_id": "chat-ui", "token_count": 1873 }
79
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { ReviewStatus } from "$lib/types/Review"; const updateFeaturedToReview: Migration = { _id: new ObjectId("000000000000000000000008"), name: "Update featured to review", up: async () => { const { assistants, tools } = collections; // Update assistants await assistants.updateMany({ featured: true }, { $set: { review: ReviewStatus.APPROVED } }); await assistants.updateMany( { featured: { $ne: true } }, { $set: { review: ReviewStatus.PRIVATE } } ); await assistants.updateMany({}, { $unset: { featured: "" } }); // Update tools await tools.updateMany({ featured: true }, { $set: { review: ReviewStatus.APPROVED } }); await tools.updateMany({ featured: { $ne: true } }, { $set: { review: ReviewStatus.PRIVATE } }); await tools.updateMany({}, { $unset: { featured: "" } }); return true; }, runEveryTime: false, }; export default updateFeaturedToReview;
chat-ui/src/lib/migrations/routines/08-update-featured-to-review.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/08-update-featured-to-review.ts", "repo_id": "chat-ui", "token_count": 326 }
80
import { env as publicEnv } from "$env/dynamic/public"; import { env as serverEnv } from "$env/dynamic/private"; import { building } from "$app/environment"; import type { Collection } from "mongodb"; import type { ConfigKey as ConfigKeyType } from "$lib/types/ConfigKey"; import type { Semaphore } from "$lib/types/Semaphore"; import { Semaphores } from "$lib/types/Semaphore"; export type PublicConfigKey = keyof typeof publicEnv; const keysFromEnv = { ...publicEnv, ...serverEnv }; export type ConfigKey = keyof typeof keysFromEnv; class ConfigManager { private keysFromDB: Partial<Record<ConfigKey, string>> = {}; private isInitialized = false; private configCollection: Collection<ConfigKeyType> | undefined; private semaphoreCollection: Collection<Semaphore> | undefined; private lastConfigUpdate: Date | undefined; async init() { if (this.isInitialized) return; if (import.meta.env.MODE === "test") { this.isInitialized = true; return; } const { getCollectionsEarly } = await import("./database"); const collections = await getCollectionsEarly(); this.configCollection = collections.config; this.semaphoreCollection = collections.semaphores; await this.checkForUpdates().then(() => { this.isInitialized = true; }); } get ConfigManagerEnabled() { return serverEnv.ENABLE_CONFIG_MANAGER === "true" && import.meta.env.MODE !== "test"; } get isHuggingChat() { return this.get("PUBLIC_APP_ASSETS") === "huggingchat"; } async checkForUpdates() { if (await this.isConfigStale()) { await this.updateConfig(); } } async isConfigStale(): Promise<boolean> { if (!this.lastConfigUpdate || !this.isInitialized) { return true; } const count = await this.semaphoreCollection?.countDocuments({ key: Semaphores.CONFIG_UPDATE, updatedAt: { $gt: this.lastConfigUpdate }, }); return count !== undefined && count > 0; } async updateConfig() { const configs = (await this.configCollection?.find({}).toArray()) ?? []; this.keysFromDB = configs.reduce( (acc, curr) => { acc[curr.key as ConfigKey] = curr.value; return acc; }, {} as Record<ConfigKey, string> ); this.lastConfigUpdate = new Date(); } get(key: ConfigKey): string { if (!this.ConfigManagerEnabled) { return keysFromEnv[key] || ""; } return this.keysFromDB[key] || keysFromEnv[key] || ""; } async updateSemaphore() { await this.semaphoreCollection?.updateOne( { key: Semaphores.CONFIG_UPDATE }, { $set: { updatedAt: new Date(), }, $setOnInsert: { createdAt: new Date(), }, }, { upsert: true } ); } async set(key: ConfigKey, value: string) { if (!this.ConfigManagerEnabled) throw new Error("Config manager is disabled"); await this.configCollection?.updateOne({ key }, { $set: { value } }, { upsert: true }); this.keysFromDB[key] = value; await this.updateSemaphore(); } async delete(key: ConfigKey) { if (!this.ConfigManagerEnabled) throw new Error("Config manager is disabled"); await this.configCollection?.deleteOne({ key }); delete this.keysFromDB[key]; await this.updateSemaphore(); } async clear() { if (!this.ConfigManagerEnabled) throw new Error("Config manager is disabled"); await this.configCollection?.deleteMany({}); this.keysFromDB = {}; await this.updateSemaphore(); } getPublicConfig() { let config = { ...Object.fromEntries( Object.entries(keysFromEnv).filter(([key]) => key.startsWith("PUBLIC_")) ), } as Record<PublicConfigKey, string>; if (this.ConfigManagerEnabled) { config = { ...config, ...Object.fromEntries( Object.entries(this.keysFromDB).filter(([key]) => key.startsWith("PUBLIC_")) ), }; } const publicEnvKeys = Object.keys(publicEnv); return Object.fromEntries( Object.entries(config).filter(([key]) => publicEnvKeys.includes(key)) ) as Record<PublicConfigKey, string>; } } // Create the instance and initialize it. const configManager = new ConfigManager(); export const ready = (async () => { if (!building) { await configManager.init(); } })(); type ConfigProxy = ConfigManager & { [K in ConfigKey]: string }; export const config: ConfigProxy = new Proxy(configManager, { get(target, prop, receiver) { if (prop in target) { return Reflect.get(target, prop, receiver); } if (typeof prop === "string") { return target.get(prop as ConfigKey); } return undefined; }, set(target, prop, value, receiver) { if (prop in target) { return Reflect.set(target, prop, value, receiver); } if (typeof prop === "string") { target.set(prop as ConfigKey, value); return true; } return false; }, }) as ConfigProxy;
chat-ui/src/lib/server/config.ts/0
{ "file_path": "chat-ui/src/lib/server/config.ts", "repo_id": "chat-ui", "token_count": 1685 }
81
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import type { TextGenerationStreamOutput, TextGenerationStreamToken } from "@huggingface/inference"; import { endpointTgi, endpointTgiParametersSchema } from "./tgi/endpointTgi"; import { z } from "zod"; import endpointAws, { endpointAwsParametersSchema } from "./aws/endpointAws"; import { endpointOAIParametersSchema, endpointOai } from "./openai/endpointOai"; import endpointLlamacpp, { endpointLlamacppParametersSchema } from "./llamacpp/endpointLlamacpp"; import endpointOllama, { endpointOllamaParametersSchema } from "./ollama/endpointOllama"; import endpointVertex, { endpointVertexParametersSchema } from "./google/endpointVertex"; import endpointGenAI, { endpointGenAIParametersSchema } from "./google/endpointGenAI"; import { endpointBedrock, endpointBedrockParametersSchema } from "./aws/endpointBedrock"; import { endpointAnthropic, endpointAnthropicParametersSchema, } from "./anthropic/endpointAnthropic"; import { endpointAnthropicVertex, endpointAnthropicVertexParametersSchema, } from "./anthropic/endpointAnthropicVertex"; import type { Model } from "$lib/types/Model"; import endpointCloudflare, { endpointCloudflareParametersSchema, } from "./cloudflare/endpointCloudflare"; import { endpointCohere, endpointCohereParametersSchema } from "./cohere/endpointCohere"; import endpointLangserve, { endpointLangserveParametersSchema, } from "./langserve/endpointLangserve"; import type { Tool, ToolCall, ToolResult } from "$lib/types/Tool"; import type { ObjectId } from "mongodb"; import { endpointLocal, endpointLocalParametersSchema } from "./local/endpointLocal"; import { endpointInferenceClient, endpointInferenceClientParametersSchema, } from "./inference-client/endpointInferenceClient"; export type EndpointMessage = Omit<Message, "id">; // parameters passed when generating text export interface EndpointParameters { messages: EndpointMessage[]; preprompt?: Conversation["preprompt"]; continueMessage?: boolean; // used to signal that the last message will be extended generateSettings?: Partial<Model["parameters"]>; tools?: Tool[]; toolResults?: ToolResult[]; isMultimodal?: boolean; conversationId?: ObjectId; } interface CommonEndpoint { weight: number; } export type TextGenerationStreamOutputWithToolsAndWebSources = TextGenerationStreamOutput & { token: TextGenerationStreamToken & { toolCalls?: ToolCall[] }; webSources?: { uri: string; title: string }[]; }; // type signature for the endpoint export type Endpoint = ( params: EndpointParameters ) => Promise<AsyncGenerator<TextGenerationStreamOutputWithToolsAndWebSources, void, void>>; // generator function that takes in parameters for defining the endpoint and return the endpoint export type EndpointGenerator<T extends CommonEndpoint> = (parameters: T) => Endpoint; // list of all endpoint generators export const endpoints = { tgi: endpointTgi, anthropic: endpointAnthropic, anthropicvertex: endpointAnthropicVertex, bedrock: endpointBedrock, aws: endpointAws, openai: endpointOai, llamacpp: endpointLlamacpp, ollama: endpointOllama, vertex: endpointVertex, genai: endpointGenAI, cloudflare: endpointCloudflare, cohere: endpointCohere, langserve: endpointLangserve, local: endpointLocal, inferenceClient: endpointInferenceClient, }; export const endpointSchema = z.discriminatedUnion("type", [ endpointAnthropicParametersSchema, endpointAnthropicVertexParametersSchema, endpointAwsParametersSchema, endpointBedrockParametersSchema, endpointOAIParametersSchema, endpointTgiParametersSchema, endpointLlamacppParametersSchema, endpointOllamaParametersSchema, endpointVertexParametersSchema, endpointGenAIParametersSchema, endpointCloudflareParametersSchema, endpointCohereParametersSchema, endpointLangserveParametersSchema, endpointLocalParametersSchema, endpointInferenceClientParametersSchema, ]); export default endpoints;
chat-ui/src/lib/server/endpoints/endpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/endpoints.ts", "repo_id": "chat-ui", "token_count": 1200 }
82
import type { Conversation } from "$lib/types/Conversation"; import type { MessageFile } from "$lib/types/Message"; import { sha256 } from "$lib/utils/sha256"; import { fileTypeFromBuffer } from "file-type"; import { collections } from "$lib/server/database"; export async function uploadFile(file: File, conv: Conversation): Promise<MessageFile> { const sha = await sha256(await file.text()); const buffer = await file.arrayBuffer(); // Attempt to detect the mime type of the file, fallback to the uploaded mime const mime = await fileTypeFromBuffer(buffer).then((fileType) => fileType?.mime ?? file.type); const upload = collections.bucket.openUploadStream(`${conv._id}-${sha}`, { metadata: { conversation: conv._id.toString(), mime }, }); upload.write((await file.arrayBuffer()) as unknown as Buffer); upload.end(); // only return the filename when upload throws a finish event or a 20s time out occurs return new Promise((resolve, reject) => { upload.once("finish", () => resolve({ type: "hash", value: sha, mime: file.type, name: file.name }) ); upload.once("error", reject); setTimeout(() => reject(new Error("Upload timed out")), 20_000); }); }
chat-ui/src/lib/server/files/uploadFile.ts/0
{ "file_path": "chat-ui/src/lib/server/files/uploadFile.ts", "repo_id": "chat-ui", "token_count": 364 }
83
import { config } from "$lib/server/config"; import type { ChatTemplateInput } from "$lib/types/Template"; import { compileTemplate } from "$lib/utils/template"; import { z } from "zod"; import endpoints, { endpointSchema, type Endpoint } from "./endpoints/endpoints"; import { endpointTgi } from "./endpoints/tgi/endpointTgi"; import { sum } from "$lib/utils/sum"; import { embeddingModels, validateEmbeddingModelByName } from "./embeddingModels"; import type { PreTrainedTokenizer } from "@huggingface/transformers"; import JSON5 from "json5"; import { getTokenizer } from "$lib/utils/getTokenizer"; import { logger } from "$lib/server/logger"; import { type ToolInput } from "$lib/types/Tool"; import { fetchJSON } from "$lib/utils/fetchJSON"; import { join, dirname } from "path"; import { fileURLToPath } from "url"; import { findRepoRoot } from "./findRepoRoot"; import { Template } from "@huggingface/jinja"; import { readdirSync } from "fs"; export const MODELS_FOLDER = config.MODELS_STORAGE_PATH || join(findRepoRoot(dirname(fileURLToPath(import.meta.url))), "models"); type Optional<T, K extends keyof T> = Pick<Partial<T>, K> & Omit<T, K>; const reasoningSchema = z.union([ z.object({ type: z.literal("regex"), // everything is reasoning, extract the answer from the regex regex: z.string(), }), z.object({ type: z.literal("tokens"), // use beginning and end tokens that define the reasoning portion of the answer beginToken: z.string(), // empty string means the model starts in reasoning mode endToken: z.string(), }), z.object({ type: z.literal("summarize"), // everything is reasoning, summarize the answer }), ]); const modelConfig = z.object({ /** Used as an identifier in DB */ id: z.string().optional(), /** Used to link to the model page, and for inference */ name: z.string().default(""), displayName: z.string().min(1).optional(), description: z.string().min(1).optional(), logoUrl: z.string().url().optional(), websiteUrl: z.string().url().optional(), modelUrl: z.string().url().optional(), tokenizer: z .union([ z.string(), z.object({ tokenizerUrl: z.string().url(), tokenizerConfigUrl: z.string().url(), }), ]) .optional(), datasetName: z.string().min(1).optional(), datasetUrl: z.string().url().optional(), preprompt: z.string().default(""), prepromptUrl: z.string().url().optional(), chatPromptTemplate: z.string().optional(), promptExamples: z .array( z.object({ title: z.string().min(1), prompt: z.string().min(1), }) ) .optional(), endpoints: z.array(endpointSchema).optional(), parameters: z .object({ temperature: z.number().min(0).max(2).optional(), truncate: z.number().int().positive().optional(), max_new_tokens: z.number().int().positive().optional(), stop: z.array(z.string()).optional(), top_p: z.number().positive().optional(), top_k: z.number().positive().optional(), repetition_penalty: z.number().min(-2).max(2).optional(), presence_penalty: z.number().min(-2).max(2).optional(), }) .passthrough() .optional(), multimodal: z.boolean().default(false), multimodalAcceptedMimetypes: z.array(z.string()).optional(), tools: z.boolean().default(false), unlisted: z.boolean().default(false), embeddingModel: validateEmbeddingModelByName(embeddingModels).optional(), /** Used to enable/disable system prompt usage */ systemRoleSupported: z.boolean().default(true), reasoning: reasoningSchema.optional(), }); const ggufModelsConfig = await Promise.all( readdirSync(MODELS_FOLDER) .filter((f) => f.endsWith(".gguf")) .map(async (f) => { return { name: f.replace(".gguf", ""), endpoints: [ { type: "local" as const, modelPath: f, }, ], }; }) ); const turnStringIntoLocalModel = z.preprocess((obj: unknown) => { if (typeof obj !== "string") return obj; const name = obj.startsWith("hf:") ? obj.split(":")[1] : obj; const displayName = obj.startsWith("hf:") ? obj.split(":")[1].split("/").slice(0, 2).join("/") : obj.endsWith(".gguf") ? obj.replace(".gguf", "") : obj; const modelPath = obj.includes("/") && !obj.startsWith("hf:") ? `hf:${obj}` : obj; return { name, displayName, endpoints: [ { type: "local", modelPath, }, ], } satisfies z.input<typeof modelConfig>; }, modelConfig); let modelsRaw = z.array(turnStringIntoLocalModel).parse(JSON5.parse(config.MODELS ?? "[]")); if (config.LOAD_GGUF_MODELS === "true" || modelsRaw.length === 0) { const parsedGgufModels = z.array(modelConfig).parse(ggufModelsConfig); modelsRaw = [...modelsRaw, ...parsedGgufModels]; } async function getChatPromptRender( m: z.infer<typeof modelConfig> ): Promise<ReturnType<typeof compileTemplate<ChatTemplateInput>>> { if (m.endpoints?.some((e) => e.type === "local")) { const endpoint = m.endpoints?.find((e) => e.type === "local"); const path = endpoint?.modelPath ?? `hf:${m.id ?? m.name}`; const { resolveModelFile, readGgufFileInfo } = await import("node-llama-cpp"); const modelPath = await resolveModelFile(path, MODELS_FOLDER); const info = await readGgufFileInfo(modelPath, { readTensorInfo: false, }); if (info.metadata.tokenizer.chat_template) { // compile with jinja const jinjaTemplate = new Template(info.metadata.tokenizer.chat_template); return (inputs: ChatTemplateInput) => { return jinjaTemplate.render({ ...m, ...inputs }); }; } } if (m.chatPromptTemplate) { return compileTemplate<ChatTemplateInput>(m.chatPromptTemplate, m); } let tokenizer: PreTrainedTokenizer; try { tokenizer = await getTokenizer(m.tokenizer ?? m.id ?? m.name); } catch (e) { // if fetching the tokenizer fails but it wasnt manually set, use the default template if (!m.tokenizer) { logger.warn( `No tokenizer found for model ${m.name}, using default template. Consider setting tokenizer manually or making sure the model is available on the hub.`, m ); return compileTemplate<ChatTemplateInput>( "{{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}}", m ); } logger.error( e, `Failed to load tokenizer ${ m.tokenizer ?? m.id ?? m.name } make sure the model is available on the hub and you have access to any gated models.` ); process.exit(); } const renderTemplate = ({ messages, preprompt, tools, continueMessage }: ChatTemplateInput) => { let formattedMessages: { role: string; content: string; tool_calls?: { id: string; tool_call_id: string; output: string }[]; }[] = messages.map((message) => ({ content: message.content, role: message.from, })); if (!m.systemRoleSupported) { const firstSystemMessage = formattedMessages.find((msg) => msg.role === "system"); formattedMessages = formattedMessages.filter((msg) => msg.role !== "system"); if ( firstSystemMessage && formattedMessages.length > 0 && formattedMessages[0].role === "user" ) { formattedMessages[0].content = firstSystemMessage.content + "\n" + formattedMessages[0].content; } } if (preprompt && formattedMessages[0].role !== "system") { formattedMessages = [ { role: m.systemRoleSupported ? "system" : "user", content: preprompt, }, ...formattedMessages, ]; } const mappedTools = tools?.map((tool) => { const inputs: Record< string, { type: ToolInput["type"]; description: string; required: boolean; } > = {}; for (const value of tool.inputs) { if (value.paramType !== "fixed") { inputs[value.name] = { type: value.type, description: value.description ?? "", required: value.paramType === "required", }; } } return { name: tool.name, description: tool.description, parameter_definitions: inputs, }; }) ?? []; const output = tokenizer.apply_chat_template(formattedMessages, { tokenize: false, add_generation_prompt: !continueMessage, tools: mappedTools.length ? mappedTools : undefined, }); if (typeof output !== "string") { throw new Error("Failed to apply chat template, the output is not a string"); } return output; }; return renderTemplate; } const processModel = async (m: z.infer<typeof modelConfig>) => ({ ...m, chatPromptRender: await getChatPromptRender(m), id: m.id || m.name, displayName: m.displayName || m.name, preprompt: m.prepromptUrl ? await fetch(m.prepromptUrl).then((r) => r.text()) : m.preprompt, parameters: { ...m.parameters, stop_sequences: m.parameters?.stop }, }); const addEndpoint = (m: Awaited<ReturnType<typeof processModel>>) => ({ ...m, getEndpoint: async (): Promise<Endpoint> => { if (!m.endpoints) { return endpointTgi({ type: "tgi", url: `${config.HF_API_ROOT}/${m.name}`, accessToken: config.HF_TOKEN ?? config.HF_ACCESS_TOKEN, weight: 1, model: m, }); } const totalWeight = sum(m.endpoints.map((e) => e.weight)); let random = Math.random() * totalWeight; for (const endpoint of m.endpoints) { if (random < endpoint.weight) { const args = { ...endpoint, model: m }; switch (args.type) { case "tgi": return endpoints.tgi(args); case "local": return endpoints.local(args); case "inference-client": return endpoints.inferenceClient(args); case "anthropic": return endpoints.anthropic(args); case "anthropic-vertex": return endpoints.anthropicvertex(args); case "bedrock": return endpoints.bedrock(args); case "aws": return await endpoints.aws(args); case "openai": return await endpoints.openai(args); case "llamacpp": return endpoints.llamacpp(args); case "ollama": return endpoints.ollama(args); case "vertex": return await endpoints.vertex(args); case "genai": return await endpoints.genai(args); case "cloudflare": return await endpoints.cloudflare(args); case "cohere": return await endpoints.cohere(args); case "langserve": return await endpoints.langserve(args); default: // for legacy reason return endpoints.tgi(args); } } random -= endpoint.weight; } throw new Error(`Failed to select endpoint`); }, }); const inferenceApiIds = config.isHuggingChat ? await fetchJSON<{ id: string }[]>( "https://huggingface.co/api/models?pipeline_tag=text-generation&inference=warm&filter=conversational" ) .then((arr) => arr?.map((r) => r.id) || []) .catch(() => { logger.error("Failed to fetch inference API ids"); return []; }) : []; export const models = await Promise.all( modelsRaw.map((e) => processModel(e) .then(addEndpoint) .then(async (m) => ({ ...m, hasInferenceAPI: inferenceApiIds.includes(m.id ?? m.name), })) ) ); export type ProcessedModel = (typeof models)[number]; // super ugly but not sure how to make typescript happier export const validModelIdSchema = z.enum(models.map((m) => m.id) as [string, ...string[]]); export const defaultModel = models[0]; // Models that have been deprecated export const oldModels = config.OLD_MODELS ? z .array( z.object({ id: z.string().optional(), name: z.string().min(1), displayName: z.string().min(1).optional(), transferTo: validModelIdSchema.optional(), }) ) .parse(JSON5.parse(config.OLD_MODELS)) .map((m) => ({ ...m, id: m.id || m.name, displayName: m.displayName || m.name })) : []; export const validateModel = (_models: BackendModel[]) => { // Zod enum function requires 2 parameters return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]); }; // if `TASK_MODEL` is string & name of a model in `MODELS`, then we use `MODELS[TASK_MODEL]`, else we try to parse `TASK_MODEL` as a model config itself export const taskModel = addEndpoint( config.TASK_MODEL ? ((models.find((m) => m.name === config.TASK_MODEL) || (await processModel(modelConfig.parse(JSON5.parse(config.TASK_MODEL))))) ?? defaultModel) : defaultModel ); export type BackendModel = Optional< typeof defaultModel, "preprompt" | "parameters" | "multimodal" | "unlisted" | "tools" | "hasInferenceAPI" >;
chat-ui/src/lib/server/models.ts/0
{ "file_path": "chat-ui/src/lib/server/models.ts", "repo_id": "chat-ui", "token_count": 4827 }
84
import type { ConfigTool } from "$lib/types/Tool"; import { ObjectId } from "mongodb"; import { runWebSearch } from "../../websearch/runWebSearch"; const websearch: ConfigTool = { _id: new ObjectId("00000000000000000000000A"), type: "config", description: "Search the web for up-to-date answers to the user's query.", color: "blue", icon: "wikis", displayName: "Web Search", name: "websearch", endpoint: null, inputs: [ { name: "query", type: "str", description: "A search query which will be used to fetch the most relevant snippets regarding the user's query.", paramType: "required", }, ], outputComponent: null, outputComponentIdx: null, showOutput: false, async *call({ query }, { conv, assistant, messages }) { const webSearchToolResults = yield* runWebSearch(conv, messages, assistant?.rag, String(query)); const webSearchContext = webSearchToolResults?.contextSources .map(({ context }, idx) => `Source [${idx + 1}]\n${context.trim()}`) .join("\n\n----------\n\n"); return { outputs: [ { websearch: webSearchContext + "\n\nWhen answering the question, you must reference the sources you used inline by wrapping the index in brackets like this: [1]. If multiple sources are used, you must reference each one of them without commas like this: [1][2][3]. This information was fetched live from the web, today on " + new Date().toLocaleDateString("en-US", { year: "numeric", month: "long", day: "numeric", }), }, ], display: false, }; }, }; export default websearch;
chat-ui/src/lib/server/tools/web/search.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/web/search.ts", "repo_id": "chat-ui", "token_count": 569 }
85
export interface SerializedHTMLElement { tagName: string; attributes: Record<string, string>; content: (SerializedHTMLElement | string)[]; }
chat-ui/src/lib/server/websearch/scrape/types.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/scrape/types.ts", "repo_id": "chat-ui", "token_count": 46 }
86
import { writable } from "svelte/store"; export const loginModalOpen = writable(false);
chat-ui/src/lib/stores/loginModal.ts/0
{ "file_path": "chat-ui/src/lib/stores/loginModal.ts", "repo_id": "chat-ui", "token_count": 28 }
87
import type { ObjectId } from "mongodb"; export interface MigrationResult { _id: ObjectId; name: string; status: "success" | "failure" | "ongoing"; }
chat-ui/src/lib/types/MigrationResult.ts/0
{ "file_path": "chat-ui/src/lib/types/MigrationResult.ts", "repo_id": "chat-ui", "token_count": 53 }
88
/** * Chunk array into arrays of length at most `chunkSize` * * @param chunkSize must be greater than or equal to 1 */ export function chunk<T extends unknown[] | string>(arr: T, chunkSize: number): T[] { if (isNaN(chunkSize) || chunkSize < 1) { throw new RangeError("Invalid chunk size: " + chunkSize); } if (!arr.length) { return []; } /// Small optimization to not chunk buffers unless needed if (arr.length <= chunkSize) { return [arr]; } return range(Math.ceil(arr.length / chunkSize)).map((i) => { return arr.slice(i * chunkSize, (i + 1) * chunkSize); }) as T[]; } function range(n: number, b?: number): number[] { return b ? Array(b - n) .fill(0) .map((_, i) => n + i) : Array(n) .fill(0) .map((_, i) => i); }
chat-ui/src/lib/utils/chunk.ts/0
{ "file_path": "chat-ui/src/lib/utils/chunk.ts", "repo_id": "chat-ui", "token_count": 295 }
89
import type { MessageFile } from "$lib/types/Message"; import { type MessageUpdate, type MessageStreamUpdate, type MessageToolCallUpdate, MessageToolUpdateType, MessageUpdateType, type MessageToolUpdate, type MessageWebSearchUpdate, type MessageWebSearchGeneralUpdate, type MessageWebSearchSourcesUpdate, type MessageWebSearchErrorUpdate, MessageWebSearchUpdateType, type MessageToolErrorUpdate, type MessageToolResultUpdate, } from "$lib/types/MessageUpdate"; import { page } from "$app/state"; export const isMessageWebSearchUpdate = (update: MessageUpdate): update is MessageWebSearchUpdate => update.type === MessageUpdateType.WebSearch; export const isMessageWebSearchGeneralUpdate = ( update: MessageUpdate ): update is MessageWebSearchGeneralUpdate => isMessageWebSearchUpdate(update) && update.subtype === MessageWebSearchUpdateType.Update; export const isMessageWebSearchSourcesUpdate = ( update: MessageUpdate ): update is MessageWebSearchSourcesUpdate => isMessageWebSearchUpdate(update) && update.subtype === MessageWebSearchUpdateType.Sources; export const isMessageWebSearchErrorUpdate = ( update: MessageUpdate ): update is MessageWebSearchErrorUpdate => isMessageWebSearchUpdate(update) && update.subtype === MessageWebSearchUpdateType.Error; export const isMessageToolUpdate = (update: MessageUpdate): update is MessageToolUpdate => update.type === MessageUpdateType.Tool; export const isMessageToolCallUpdate = (update: MessageUpdate): update is MessageToolCallUpdate => isMessageToolUpdate(update) && update.subtype === MessageToolUpdateType.Call; export const isMessageToolResultUpdate = ( update: MessageUpdate ): update is MessageToolResultUpdate => isMessageToolUpdate(update) && update.subtype === MessageToolUpdateType.Result; export const isMessageToolErrorUpdate = (update: MessageUpdate): update is MessageToolErrorUpdate => isMessageToolUpdate(update) && update.subtype === MessageToolUpdateType.Error; type MessageUpdateRequestOptions = { base: string; inputs?: string; messageId?: string; isRetry: boolean; isContinue: boolean; webSearch: boolean; tools?: Array<string>; files?: MessageFile[]; }; export async function fetchMessageUpdates( conversationId: string, opts: MessageUpdateRequestOptions, abortSignal: AbortSignal ): Promise<AsyncGenerator<MessageUpdate>> { const abortController = new AbortController(); abortSignal.addEventListener("abort", () => abortController.abort()); const form = new FormData(); const optsJSON = JSON.stringify({ inputs: opts.inputs, id: opts.messageId, is_retry: opts.isRetry, is_continue: opts.isContinue, web_search: opts.webSearch, tools: opts.tools, }); opts.files?.forEach((file) => { const name = file.type + ";" + file.name; form.append("files", new File([file.value], name, { type: file.mime })); }); form.append("data", optsJSON); const response = await fetch(`${opts.base}/conversation/${conversationId}`, { method: "POST", body: form, signal: abortController.signal, }); if (!response.ok) { const errorMessage = await response .json() .then((obj) => obj.message) .catch(() => `Request failed with status code ${response.status}: ${response.statusText}`); throw Error(errorMessage); } if (!response.body) { throw Error("Body not defined"); } if (!(page.data.publicConfig.PUBLIC_SMOOTH_UPDATES === "true")) { return endpointStreamToIterator(response, abortController); } return smoothAsyncIterator( streamMessageUpdatesToFullWords(endpointStreamToIterator(response, abortController)) ); } async function* endpointStreamToIterator( response: Response, abortController: AbortController ): AsyncGenerator<MessageUpdate> { const reader = response.body?.pipeThrough(new TextDecoderStream()).getReader(); if (!reader) throw Error("Response for endpoint had no body"); // Handle any cases where we must abort reader.closed.then(() => abortController.abort()); // Handle logic for aborting abortController.signal.addEventListener("abort", () => reader.cancel()); // ex) If the last response is => {"type": "stream", "token": // It should be => {"type": "stream", "token": "Hello"} = prev_input_chunk + "Hello"} let prevChunk = ""; while (!abortController.signal.aborted) { const { done, value } = await reader.read(); if (done) { abortController.abort(); break; } if (!value) continue; const { messageUpdates, remainingText } = parseMessageUpdates(prevChunk + value); prevChunk = remainingText; for (const messageUpdate of messageUpdates) yield messageUpdate; } } function parseMessageUpdates(value: string): { messageUpdates: MessageUpdate[]; remainingText: string; } { const inputs = value.split("\n"); const messageUpdates: MessageUpdate[] = []; for (const input of inputs) { try { messageUpdates.push(JSON.parse(input) as MessageUpdate); } catch (error) { // in case of parsing error, we return what we were able to parse if (error instanceof SyntaxError) { return { messageUpdates, remainingText: inputs.at(-1) ?? "", }; } } } return { messageUpdates, remainingText: "" }; } /** * Emits all the message updates immediately that aren't "stream" type * Emits a concatenated "stream" type message update once it detects a full word * Example: "what" " don" "'t" => "what" " don't" * Only supports latin languages, ignores others */ async function* streamMessageUpdatesToFullWords( iterator: AsyncGenerator<MessageUpdate> ): AsyncGenerator<MessageUpdate> { let bufferedStreamUpdates: MessageStreamUpdate[] = []; const endAlphanumeric = /[a-zA-Z0-9À-ž'`]+$/; const beginnningAlphanumeric = /^[a-zA-Z0-9À-ž'`]+/; for await (const messageUpdate of iterator) { if (messageUpdate.type !== "stream") { yield messageUpdate; continue; } bufferedStreamUpdates.push(messageUpdate); let lastIndexEmitted = 0; for (let i = 1; i < bufferedStreamUpdates.length; i++) { const prevEndsAlphanumeric = endAlphanumeric.test(bufferedStreamUpdates[i - 1].token); const currBeginsAlphanumeric = beginnningAlphanumeric.test(bufferedStreamUpdates[i].token); const shouldCombine = prevEndsAlphanumeric && currBeginsAlphanumeric; const combinedTooMany = i - lastIndexEmitted >= 5; if (shouldCombine && !combinedTooMany) continue; // Combine tokens together and emit yield { type: MessageUpdateType.Stream, token: bufferedStreamUpdates .slice(lastIndexEmitted, i) .map((_) => _.token) .join(""), }; lastIndexEmitted = i; } bufferedStreamUpdates = bufferedStreamUpdates.slice(lastIndexEmitted); } for (const messageUpdate of bufferedStreamUpdates) yield messageUpdate; } /** * Attempts to smooth out the time between values emitted by an async iterator * by waiting for the average time between values to emit the next value */ async function* smoothAsyncIterator<T>(iterator: AsyncGenerator<T>): AsyncGenerator<T> { const eventTarget = new EventTarget(); let done = false; const valuesBuffer: T[] = []; const valueTimesMS: number[] = []; const next = async () => { const obj = await iterator.next(); if (obj.done) { done = true; } else { valuesBuffer.push(obj.value); valueTimesMS.push(performance.now()); next(); } eventTarget.dispatchEvent(new Event("next")); }; next(); let timeOfLastEmitMS = performance.now(); while (!done || valuesBuffer.length > 0) { // Only consider the last X times between tokens const sampledTimesMS = valueTimesMS.slice(-30); // Get the total time spent in abnormal periods const anomalyThresholdMS = 2000; const anomalyDurationMS = sampledTimesMS .map((time, i, times) => time - times[i - 1]) .slice(1) .filter((time) => time > anomalyThresholdMS) .reduce((a, b) => a + b, 0); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const totalTimeMSBetweenValues = sampledTimesMS.at(-1)! - sampledTimesMS[0]; const timeMSBetweenValues = totalTimeMSBetweenValues - anomalyDurationMS; const averageTimeMSBetweenValues = Math.min( 200, timeMSBetweenValues / (sampledTimesMS.length - 1) ); const timeSinceLastEmitMS = performance.now() - timeOfLastEmitMS; // Emit after waiting duration or cancel if "next" event is emitted const gotNext = await Promise.race([ sleep(Math.max(5, averageTimeMSBetweenValues - timeSinceLastEmitMS)), waitForEvent(eventTarget, "next"), ]); // Go to next iteration so we can re-calculate when to emit if (gotNext) continue; // Nothing in buffer to emit if (valuesBuffer.length === 0) continue; // Emit timeOfLastEmitMS = performance.now(); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion yield valuesBuffer.shift()!; } } const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); const waitForEvent = (eventTarget: EventTarget, eventName: string) => new Promise<boolean>((resolve) => eventTarget.addEventListener(eventName, () => resolve(true), { once: true }) );
chat-ui/src/lib/utils/messageUpdates.ts/0
{ "file_path": "chat-ui/src/lib/utils/messageUpdates.ts", "repo_id": "chat-ui", "token_count": 2913 }
90
import { v4 } from "uuid"; import type { Tree, TreeId, NewNode, TreeNode } from "./tree"; export function addChildren<T>(conv: Tree<T>, message: NewNode<T>, parentId?: TreeId): TreeId { // if this is the first message we just push it if (conv.messages.length === 0) { const messageId = v4(); conv.rootMessageId = messageId; conv.messages.push({ ...message, ancestors: [], id: messageId, } as TreeNode<T>); return messageId; } if (!parentId) { throw new Error("You need to specify a parentId if this is not the first message"); } const messageId = v4(); if (!conv.rootMessageId) { // if there is no parentId we just push the message if (!!parentId && parentId !== conv.messages[conv.messages.length - 1].id) { throw new Error("This is a legacy conversation, you can only append to the last message"); } conv.messages.push({ ...message, id: messageId } as TreeNode<T>); return messageId; } const ancestors = [...(conv.messages.find((m) => m.id === parentId)?.ancestors ?? []), parentId]; conv.messages.push({ ...message, ancestors, id: messageId, children: [], } as TreeNode<T>); const parent = conv.messages.find((m) => m.id === parentId); if (parent) { if (parent.children) { parent.children.push(messageId); } else parent.children = [messageId]; } return messageId; }
chat-ui/src/lib/utils/tree/addChildren.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/addChildren.ts", "repo_id": "chat-ui", "token_count": 486 }
91
<script lang="ts"> import { goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/state"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; const publicConfig = usePublicConfig(); import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { pendingMessage } from "$lib/stores/pendingMessage"; import { useSettingsStore } from "$lib/stores/settings.js"; import { findCurrentModel } from "$lib/utils/models"; import { onMount } from "svelte"; let { data } = $props(); let loading = $state(false); let files: File[] = $state([]); const settings = useSettingsStore(); async function createConversation(message: string) { try { loading = true; // check if $settings.activeModel is a valid model // else check if it's an assistant, and use that model // else use the first model const validModels = data.models.map((model) => model.id); let model; if (validModels.includes($settings.activeModel)) { model = $settings.activeModel; } else { if (data.assistant?.modelId && validModels.includes(data.assistant.modelId)) { model = data.assistant.modelId; } else { model = data.models[0].id; } } const res = await fetch(`${base}/conversation`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ model, preprompt: $settings.customPrompts[$settings.activeModel], assistantId: data.assistant?._id, }), }); if (!res.ok) { const errorMessage = (await res.json()).message || ERROR_MESSAGES.default; error.set(errorMessage); console.error("Error while creating conversation: ", errorMessage); return; } const { conversationId } = await res.json(); // Ugly hack to use a store as temp storage, feel free to improve ^^ pendingMessage.set({ content: message, files, }); // invalidateAll to update list of conversations await goto(`${base}/conversation/${conversationId}`, { invalidateAll: true }); } catch (err) { error.set((err as Error).message || ERROR_MESSAGES.default); console.error(err); } finally { loading = false; } } onMount(() => { // check if there's a ?q query param with a message const query = page.url.searchParams.get("q"); if (query) createConversation(query); }); let currentModel = $derived( findCurrentModel( [...data.models, ...data.oldModels], !$settings.assistants.includes($settings.activeModel) ? $settings.activeModel : data.assistant?.modelId ) ); </script> <svelte:head> <title>{publicConfig.PUBLIC_APP_NAME}</title> </svelte:head> <ChatWindow on:message={(ev) => createConversation(ev.detail)} {loading} assistant={data.assistant} {currentModel} models={data.models} bind:files />
chat-ui/src/routes/+page.svelte/0
{ "file_path": "chat-ui/src/routes/+page.svelte", "repo_id": "chat-ui", "token_count": 1078 }
92
import { base } from "$app/paths"; import { collections } from "$lib/server/database"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; import { config } from "$lib/server/config"; import { sendSlack } from "$lib/server/sendSlack"; import type { Tool } from "$lib/types/Tool"; export async function POST({ params, request, locals, url }) { // is there already a report from this user for this model ? const report = await collections.reports.findOne({ createdBy: locals.user?._id ?? locals.sessionId, object: "tool", contentId: new ObjectId(params.toolId), }); if (report) { return error(400, "Already reported"); } const { reason } = z.object({ reason: z.string().min(1).max(128) }).parse(await request.json()); if (!reason) { return error(400, "Invalid report reason"); } const { acknowledged } = await collections.reports.insertOne({ _id: new ObjectId(), contentId: new ObjectId(params.toolId), object: "tool", createdBy: locals.user?._id ?? locals.sessionId, createdAt: new Date(), updatedAt: new Date(), reason, }); if (!acknowledged) { return error(500, "Failed to report tool"); } if (config.WEBHOOK_URL_REPORT_ASSISTANT) { const prefixUrl = config.PUBLIC_SHARE_PREFIX || `${config.PUBLIC_ORIGIN || url.origin}${base}`; const toolUrl = `${prefixUrl}/tools/${params.toolId}`; const tool = await collections.tools.findOne<Pick<Tool, "displayName" | "name">>( { _id: new ObjectId(params.toolId) }, { projection: { displayName: 1, name: 1 } } ); const username = locals.user?.username; await sendSlack( `🔴 Tool <${toolUrl}|${tool?.displayName ?? tool?.name}> reported by ${ username ? `<http://hf.co/${username}|${username}>` : "non-logged in user" }.\n\n> ${reason}` ); } return new Response("Tool reported", { status: 200 }); }
chat-ui/src/routes/api/tools/[toolId]/report/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/tools/[toolId]/report/+server.ts", "repo_id": "chat-ui", "token_count": 668 }
93
import { config } from "$lib/server/config"; import { startOfHour } from "date-fns"; import { authCondition, requiresUser } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { models, validModelIdSchema } from "$lib/server/models"; import { ERROR_MESSAGES } from "$lib/stores/errors"; import type { Message } from "$lib/types/Message"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; import { MessageReasoningUpdateType, MessageUpdateStatus, MessageUpdateType, type MessageUpdate, } from "$lib/types/MessageUpdate"; import { uploadFile } from "$lib/server/files/uploadFile"; import { convertLegacyConversation } from "$lib/utils/tree/convertLegacyConversation"; import { isMessageId } from "$lib/utils/tree/isMessageId"; import { buildSubtree } from "$lib/utils/tree/buildSubtree.js"; import { addChildren } from "$lib/utils/tree/addChildren.js"; import { addSibling } from "$lib/utils/tree/addSibling.js"; import { usageLimits } from "$lib/server/usageLimits"; import { MetricsServer } from "$lib/server/metrics"; import { textGeneration } from "$lib/server/textGeneration"; import type { TextGenerationContext } from "$lib/server/textGeneration/types"; import { logger } from "$lib/server/logger.js"; import { documentParserToolId } from "$lib/utils/toolIds.js"; export async function POST({ request, locals, params, getClientAddress }) { const id = z.string().parse(params.id); const convId = new ObjectId(id); const promptedAt = new Date(); const userId = locals.user?._id ?? locals.sessionId; // check user if (!userId) { error(401, "Unauthorized"); } // check if the user has access to the conversation const convBeforeCheck = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (convBeforeCheck && !convBeforeCheck.rootMessageId) { const res = await collections.conversations.updateOne( { _id: convId, }, { $set: { ...convBeforeCheck, ...convertLegacyConversation(convBeforeCheck), }, } ); if (!res.acknowledged) { error(500, "Failed to convert conversation"); } } const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { error(404, "Conversation not found"); } // register the event for ratelimiting await collections.messageEvents.insertOne({ type: "message", userId, createdAt: new Date(), expiresAt: new Date(Date.now() + 60_000), ip: getClientAddress(), }); const messagesBeforeLogin = config.MESSAGES_BEFORE_LOGIN ? parseInt(config.MESSAGES_BEFORE_LOGIN) : 0; // guest mode check if (!locals.user?._id && requiresUser && messagesBeforeLogin) { const totalMessages = ( await collections.conversations .aggregate([ { $match: { ...authCondition(locals), "messages.from": "assistant" } }, { $project: { messages: 1 } }, { $limit: messagesBeforeLogin + 1 }, { $unwind: "$messages" }, { $match: { "messages.from": "assistant" } }, { $count: "messages" }, ]) .toArray() )[0]?.messages ?? 0; if (totalMessages > messagesBeforeLogin) { error(429, "Exceeded number of messages before login"); } } if (usageLimits?.messagesPerMinute) { // check if the user is rate limited const nEvents = Math.max( await collections.messageEvents.countDocuments({ userId, type: "message", expiresAt: { $gt: new Date() }, }), await collections.messageEvents.countDocuments({ ip: getClientAddress(), type: "message", expiresAt: { $gt: new Date() }, }) ); if (nEvents > usageLimits.messagesPerMinute) { error(429, ERROR_MESSAGES.rateLimited); } } if (usageLimits?.messages && conv.messages.length > usageLimits.messages) { error( 429, `This conversation has more than ${usageLimits.messages} messages. Start a new one to continue` ); } // fetch the model const model = models.find((m) => m.id === conv.model); if (!model) { error(410, "Model not available anymore"); } // finally parse the content of the request const form = await request.formData(); const json = form.get("data"); if (!json || typeof json !== "string") { error(400, "Invalid request"); } const { inputs: newPrompt, id: messageId, is_retry: isRetry, is_continue: isContinue, web_search: webSearch, tools: toolsPreferences, } = z .object({ id: z.string().uuid().refine(isMessageId).optional(), // parent message id to append to for a normal message, or the message id for a retry/continue inputs: z.optional( z .string() .min(1) .transform((s) => s.replace(/\r\n/g, "\n")) ), is_retry: z.optional(z.boolean()), is_continue: z.optional(z.boolean()), web_search: z.optional(z.boolean()), tools: z.array(z.string()).optional(), files: z.optional( z.array( z.object({ type: z.literal("base64").or(z.literal("hash")), name: z.string(), value: z.string(), mime: z.string(), }) ) ), }) .parse(JSON.parse(json)); const inputFiles = await Promise.all( form .getAll("files") .filter((entry): entry is File => entry instanceof File && entry.size > 0) .map(async (file) => { const [type, ...name] = file.name.split(";"); return { type: z.literal("base64").or(z.literal("hash")).parse(type), value: await file.text(), mime: file.type, name: name.join(";"), }; }) ); // Check for PDF files in the input const hasPdfFiles = inputFiles?.some((file) => file.mime === "application/pdf") ?? false; // Check for existing PDF files in the conversation const hasPdfInConversation = conv.messages?.some((msg) => msg.files?.some((file) => file.mime === "application/pdf")) ?? false; if (usageLimits?.messageLength && (newPrompt?.length ?? 0) > usageLimits.messageLength) { error(400, "Message too long."); } // each file is either: // base64 string requiring upload to the server // hash pointing to an existing file const hashFiles = inputFiles?.filter((file) => file.type === "hash") ?? []; const b64Files = inputFiles ?.filter((file) => file.type !== "hash") .map((file) => { const blob = Buffer.from(file.value, "base64"); return new File([blob], file.name, { type: file.mime }); }) ?? []; // check sizes // todo: make configurable if (b64Files.some((file) => file.size > 10 * 1024 * 1024)) { error(413, "File too large, should be <10MB"); } const uploadedFiles = await Promise.all(b64Files.map((file) => uploadFile(file, conv))).then( (files) => [...files, ...hashFiles] ); // we will append tokens to the content of this message let messageToWriteToId: Message["id"] | undefined = undefined; // used for building the prompt, subtree of the conversation that goes from the latest message to the root let messagesForPrompt: Message[] = []; if (isContinue && messageId) { // if it's the last message and we continue then we build the prompt up to the last message // we will strip the end tokens afterwards when the prompt is built if ((conv.messages.find((msg) => msg.id === messageId)?.children?.length ?? 0) > 0) { error(400, "Can only continue the last message"); } messageToWriteToId = messageId; messagesForPrompt = buildSubtree(conv, messageId); } else if (isRetry && messageId) { // two cases, if we're retrying a user message with a newPrompt set, // it means we're editing a user message // if we're retrying on an assistant message, newPrompt cannot be set // it means we're retrying the last assistant message for a new answer const messageToRetry = conv.messages.find((message) => message.id === messageId); if (!messageToRetry) { error(404, "Message not found"); } if (messageToRetry.from === "user" && newPrompt) { // add a sibling to this message from the user, with the alternative prompt // add a children to that sibling, where we can write to const newUserMessageId = addSibling( conv, { from: "user", content: newPrompt, files: uploadedFiles, createdAt: new Date(), updatedAt: new Date(), }, messageId ); messageToWriteToId = addChildren( conv, { from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date(), }, newUserMessageId ); messagesForPrompt = buildSubtree(conv, newUserMessageId); } else if (messageToRetry.from === "assistant") { // we're retrying an assistant message, to generate a new answer // just add a sibling to the assistant answer where we can write to messageToWriteToId = addSibling( conv, { from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date() }, messageId ); messagesForPrompt = buildSubtree(conv, messageId); messagesForPrompt.pop(); // don't need the latest assistant message in the prompt since we're retrying it } } else { // just a normal linear conversation, so we add the user message // and the blank assistant message back to back const newUserMessageId = addChildren( conv, { from: "user", content: newPrompt ?? "", files: uploadedFiles, createdAt: new Date(), updatedAt: new Date(), }, messageId ); messageToWriteToId = addChildren( conv, { from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date(), }, newUserMessageId ); // build the prompt from the user message messagesForPrompt = buildSubtree(conv, newUserMessageId); } const messageToWriteTo = conv.messages.find((message) => message.id === messageToWriteToId); if (!messageToWriteTo) { error(500, "Failed to create message"); } if (messagesForPrompt.length === 0) { error(500, "Failed to create prompt"); } // update the conversation with the new messages await collections.conversations.updateOne( { _id: convId }, { $set: { messages: conv.messages, title: conv.title, updatedAt: new Date() } } ); let doneStreaming = false; let lastTokenTimestamp: undefined | Date = undefined; // we now build the stream const stream = new ReadableStream({ async start(controller) { messageToWriteTo.updates ??= []; async function update(event: MessageUpdate) { if (!messageToWriteTo || !conv) { throw Error("No message or conversation to write events to"); } // Add token to content or skip if empty if (event.type === MessageUpdateType.Stream) { if (event.token === "") return; messageToWriteTo.content += event.token; // add to token total MetricsServer.getMetrics().model.tokenCountTotal.inc({ model: model?.id }); // if this is the first token, add to time to first token if (!lastTokenTimestamp) { MetricsServer.getMetrics().model.timeToFirstToken.observe( { model: model?.id }, Date.now() - promptedAt.getTime() ); lastTokenTimestamp = new Date(); } // add to time per token MetricsServer.getMetrics().model.timePerOutputToken.observe( { model: model?.id }, Date.now() - (lastTokenTimestamp ?? promptedAt).getTime() ); lastTokenTimestamp = new Date(); } else if ( event.type === MessageUpdateType.Reasoning && event.subtype === MessageReasoningUpdateType.Stream ) { messageToWriteTo.reasoning ??= ""; messageToWriteTo.reasoning += event.token; } // Set the title else if (event.type === MessageUpdateType.Title) { conv.title = event.title; await collections.conversations.updateOne( { _id: convId }, { $set: { title: conv?.title, updatedAt: new Date() } } ); } // Set the final text and the interrupted flag else if (event.type === MessageUpdateType.FinalAnswer) { messageToWriteTo.interrupted = event.interrupted; messageToWriteTo.content = initialMessageContent + event.text; // add to latency MetricsServer.getMetrics().model.latency.observe( { model: model?.id }, Date.now() - promptedAt.getTime() ); } // Add file else if (event.type === MessageUpdateType.File) { messageToWriteTo.files = [ ...(messageToWriteTo.files ?? []), { type: "hash", name: event.name, value: event.sha, mime: event.mime }, ]; } // Append to the persistent message updates if it's not a stream update if ( event.type !== MessageUpdateType.Stream && !( event.type === MessageUpdateType.Status && event.status === MessageUpdateStatus.KeepAlive ) && !( event.type === MessageUpdateType.Reasoning && event.subtype === MessageReasoningUpdateType.Stream ) ) { messageToWriteTo?.updates?.push(event); } // Avoid remote keylogging attack executed by watching packet lengths // by padding the text with null chars to a fixed length // https://cdn.arstechnica.net/wp-content/uploads/2024/03/LLM-Side-Channel.pdf if (event.type === MessageUpdateType.Stream) { event = { ...event, token: event.token.padEnd(16, "\0") }; } // Send the update to the client controller.enqueue(JSON.stringify(event) + "\n"); // Send 4096 of spaces to make sure the browser doesn't blocking buffer that holding the response if (event.type === MessageUpdateType.FinalAnswer) { controller.enqueue(" ".repeat(4096)); } } await collections.conversations.updateOne( { _id: convId }, { $set: { title: conv.title, updatedAt: new Date() } } ); messageToWriteTo.updatedAt = new Date(); let hasError = false; const initialMessageContent = messageToWriteTo.content; try { const ctx: TextGenerationContext = { model, endpoint: await model.getEndpoint(), conv, messages: messagesForPrompt, assistant: undefined, isContinue: isContinue ?? false, webSearch: webSearch ?? false, toolsPreference: [ ...(toolsPreferences ?? []), ...(hasPdfFiles || hasPdfInConversation ? [documentParserToolId] : []), // Add document parser tool if PDF files are present ], promptedAt, ip: getClientAddress(), username: locals.user?.username, }; // run the text generation and send updates to the client for await (const event of textGeneration(ctx)) await update(event); } catch (e) { hasError = true; await update({ type: MessageUpdateType.Status, status: MessageUpdateStatus.Error, message: (e as Error).message, }); logger.error(e); } finally { // check if no output was generated if (!hasError && messageToWriteTo.content === initialMessageContent) { await update({ type: MessageUpdateType.Status, status: MessageUpdateStatus.Error, message: "No output was generated. Something went wrong.", }); } } await collections.conversations.updateOne( { _id: convId }, { $set: { messages: conv.messages, title: conv?.title, updatedAt: new Date() } } ); // used to detect if cancel() is called bc of interrupt or just because the connection closes doneStreaming = true; controller.close(); }, async cancel() { if (doneStreaming) return; await collections.conversations.updateOne( { _id: convId }, { $set: { messages: conv.messages, title: conv.title, updatedAt: new Date() } } ); }, }); if (conv.assistantId) { await collections.assistantStats.updateOne( { assistantId: conv.assistantId, "date.at": startOfHour(new Date()), "date.span": "hour" }, { $inc: { count: 1 } }, { upsert: true } ); } const metrics = MetricsServer.getMetrics(); metrics.model.messagesTotal.inc({ model: model?.id }); // Todo: maybe we should wait for the message to be saved before ending the response - in case of errors return new Response(stream, { headers: { "Content-Type": "application/jsonl", }, }); } export async function DELETE({ locals, params }) { const convId = new ObjectId(params.id); const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { error(404, "Conversation not found"); } await collections.conversations.deleteOne({ _id: conv._id }); return new Response(); } export async function PATCH({ request, locals, params }) { const values = z .object({ title: z.string().trim().min(1).max(100).optional(), model: validModelIdSchema.optional(), }) .parse(await request.json()); const convId = new ObjectId(params.id); const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { error(404, "Conversation not found"); } // Only include defined values in the update const updateValues = { ...(values.title !== undefined && { title: values.title }), ...(values.model !== undefined && { model: values.model }), }; await collections.conversations.updateOne( { _id: convId, }, { $set: updateValues, } ); return new Response(); }
chat-ui/src/routes/conversation/[id]/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/+server.ts", "repo_id": "chat-ui", "token_count": 6328 }
94
<script lang="ts"> import logo from "../../../../../static/huggingchat/logo.svg?raw"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; const publicConfig = usePublicConfig(); interface Props { name: string; logoUrl: string | undefined; } let { name, logoUrl }: Props = $props(); </script> <div class=" flex h-[648px] w-full flex-col items-center bg-white"> <div class="flex flex-1 flex-col items-center justify-center"> {#if logoUrl} <img class="h-48 w-48" src={logoUrl} alt="avatar" /> {/if} <h1 class="m-0 text-5xl font-bold text-black"> {name} </h1> </div> <div class="flex h-[200px] w-full flex-col items-center justify-center rounded-b-none bg-{publicConfig.PUBLIC_APP_COLOR}-500/10 pb-10 pt-10 text-4xl text-gray-500" style="border-radius: 100% 100% 0 0;" > Try it now {#if publicConfig.isHuggingChat} on {/if} {#if publicConfig.isHuggingChat} <div class="flex flex-row pt-3 text-5xl font-bold text-black"> <div class="mr-5 flex items-center justify-center" id="logo"> <!-- eslint-disable-next-line --> {@html logo} </div> <span>HuggingChat</span> </div> {/if} </div> </div>
chat-ui/src/routes/models/[...model]/thumbnail.png/ModelThumbnail.svelte/0
{ "file_path": "chat-ui/src/routes/models/[...model]/thumbnail.png/ModelThumbnail.svelte", "repo_id": "chat-ui", "token_count": 502 }
95
import { useAPIClient, handleResponse } from "$lib/APIClient"; export const load = async ({ parent, fetch }) => { const client = useAPIClient({ fetch }); const reports = await client.user.reports.get().then(handleResponse); return { assistants: (await parent().then((data) => data.assistants)).map((el) => ({ ...el, reported: reports.some( (r) => r.contentId.toString() === el._id.toString() && r.object === "assistant" ), })), }; };
chat-ui/src/routes/settings/+layout.ts/0
{ "file_path": "chat-ui/src/routes/settings/+layout.ts", "repo_id": "chat-ui", "token_count": 166 }
96
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": "rect", "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "nominal", "sort": "ascending", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "nominal", "sort": "ascending", "title": "<DVC_METRIC_Y_LABEL>" }, "color": { "aggregate": "count", "type": "quantitative" }, "facet": { "field": "rev", "type": "nominal" } } }
datasets/.dvc/plots/confusion.json/0
{ "file_path": "datasets/.dvc/plots/confusion.json", "repo_id": "datasets", "token_count": 450 }
97
# How to add one new datasets Add datasets directly to the 🤗 Hugging Face Hub! You can share your dataset on https://huggingface.co/datasets directly using your account, see the documentation: * [Create a dataset and upload files on the website](https://huggingface.co/docs/datasets/upload_dataset) * [Advanced guide using the CLI](https://huggingface.co/docs/datasets/share)
datasets/ADD_NEW_DATASET.md/0
{ "file_path": "datasets/ADD_NEW_DATASET.md", "repo_id": "datasets", "token_count": 113 }
98
# Know your dataset There are two types of dataset objects, a regular [`Dataset`] and then an ✨ [`IterableDataset`] ✨. A [`Dataset`] provides fast random access to the rows, and memory-mapping so that loading even large datasets only uses a relatively small amount of device memory. But for really, really big datasets that won't even fit on disk or in memory, an [`IterableDataset`] allows you to access and use the dataset without waiting for it to download completely! This tutorial will show you how to load and access a [`Dataset`] and an [`IterableDataset`]. ## Dataset When you load a dataset split, you'll get a [`Dataset`] object. You can do many things with a [`Dataset`] object, which is why it's important to learn how to manipulate and interact with the data stored inside. This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset, but feel free to load any dataset you'd like and follow along! ```py >>> from datasets import load_dataset >>> dataset = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") ``` ### Indexing A [`Dataset`] contains columns of data, and each column can be a different type of data. The *index*, or axis label, is used to access examples from the dataset. For example, indexing by the row returns a dictionary of an example from the dataset: ```py # Get the first row in the dataset >>> dataset[0] {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` Use the `-` operator to start from the end of the dataset: ```py # Get the last row in the dataset >>> dataset[-1] {'label': 0, 'text': 'things really get weird , though not particularly scary : the movie is all portent and no content .'} ``` Indexing by the column name returns a list of all the values in the column: ```py >>> dataset["text"] ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic', ..., 'things really get weird , though not particularly scary : the movie is all portent and no content .'] ``` You can combine row and column name indexing to return a specific value at a position: ```py >>> dataset[0]["text"] 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .' ``` Indexing order doesn't matter. Indexing by the column name first returns a [`Column`] object that you can index as usual with row indices: ```py >>> import time >>> start_time = time.time() >>> text = dataset[0]["text"] >>> end_time = time.time() >>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") Elapsed time: 0.0031 seconds >>> start_time = time.time() >>> text = dataset["text"][0] >>> end_time = time.time() >>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") Elapsed time: 0.0042 seconds ``` ### Slicing Slicing returns a slice - or subset - of the dataset, which is useful for viewing several rows at once. To slice a dataset, use the `:` operator to specify a range of positions. ```py # Get the first three rows >>> dataset[:3] {'label': [1, 1, 1], 'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic']} # Get rows between three and six >>> dataset[3:6] {'label': [1, 1, 1], 'text': ['if you sometimes like to go to the movies to have fun , wasabi is a good place to start .', "emerges as something rare , an issue movie that's so honest and keenly observed that it doesn't feel like one .", 'the film provides some great insight into the neurotic mindset of all comics -- even those who have reached the absolute top of the game .']} ``` ## IterableDataset An [`IterableDataset`] is loaded when you set the `streaming` parameter to `True` in [`~datasets.load_dataset`]: ```py >>> from datasets import load_dataset >>> iterable_dataset = load_dataset("ethz/food101", split="train", streaming=True) >>> for example in iterable_dataset: ... print(example) ... break {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F5C520>, 'label': 6} ``` You can also create an [`IterableDataset`] from an *existing* [`Dataset`], but it is faster than streaming mode because the dataset is streamed from local files: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") >>> iterable_dataset = dataset.to_iterable_dataset() ``` An [`IterableDataset`] progressively iterates over a dataset one example at a time, so you don't have to wait for the whole dataset to download before you can use it. As you can imagine, this is quite useful for large datasets you want to use immediately! ### Indexing An [`IterableDataset`]'s behavior is different from a regular [`Dataset`]. You don't get random access to examples in an [`IterableDataset`]. Instead, you should iterate over its elements, for example, by calling `next(iter())` or with a `for` loop to return the next item from the [`IterableDataset`]: ```py >>> next(iter(iterable_dataset)) {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F59B50>, 'label': 6} >>> for example in iterable_dataset: ... print(example) ... break {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DE82B0>, 'label': 6} ``` But an [`IterableDataset`] supports column indexing that returns an iterable for the column values: ```py >>> next(iter(iterable_dataset["label"])) 6 ``` ### Creating a subset You can return a subset of the dataset with a specific number of examples in it with [`IterableDataset.take`]: ```py # Get first three examples >>> list(iterable_dataset.take(3)) [{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DEE9D0>, 'label': 6}, {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F7479DE8190>, 'label': 6}, {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x383 at 0x7F7479DE8310>, 'label': 6}] ``` But unlike [slicing](access/#slicing), [`IterableDataset.take`] creates a new [`IterableDataset`]. ## Next steps Interested in learning more about the differences between these two types of datasets? Learn more about them in the [Differences between `Dataset` and `IterableDataset`](about_mapstyle_vs_iterable) conceptual guide. To get more hands-on with these datasets types, check out the [Process](process) guide to learn how to preprocess a [`Dataset`] or the [Stream](stream) guide to learn how to preprocess an [`IterableDataset`].
datasets/docs/source/access.mdx/0
{ "file_path": "datasets/docs/source/access.mdx", "repo_id": "datasets", "token_count": 2326 }
99
# Load image data Image datasets have [`Image`] type columns, which contain PIL objects. <Tip> To work with image datasets, you need to have the `vision` dependency installed. Check out the [installation](./installation#vision) guide to learn how to install it. </Tip> When you load an image dataset and call the image column, the images are decoded as PIL Images: ```py >>> from datasets import load_dataset, Image >>> dataset = load_dataset("beans", split="train") >>> dataset[0]["image"] ``` <Tip warning={true}> Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding and resampling all the image objects in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>. ## Local files You can load a dataset from the image path. Use the [`~Dataset.cast_column`] function to accept a column of image file paths, and decode it into a PIL image with the [`Image`] feature: ```py >>> from datasets import Dataset, Image >>> dataset = Dataset.from_dict({"image": ["path/to/image_1", "path/to/image_2", ..., "path/to/image_n"]}).cast_column("image", Image()) >>> dataset[0]["image"] <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>] ``` If you only want to load the underlying path to the image dataset without decoding the image object, set `decode=False` in the [`Image`] feature: ```py >>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False)) >>> dataset[0]["image"] {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/bean_rust/bean_rust_train.29.jpg'} ``` ## ImageFolder You can also load a dataset with an `ImageFolder` dataset builder which does not require writing a custom dataloader. This makes `ImageFolder` ideal for quickly creating and loading image datasets with several thousand images for different vision tasks. Your image dataset structure should look like this: ``` folder/train/dog/golden_retriever.png folder/train/dog/german_shepherd.png folder/train/dog/chihuahua.png folder/train/cat/maine_coon.png folder/train/cat/bengal.png folder/train/cat/birman.png ``` Alternatively it should have metadata, for example: ``` folder/train/metadata.csv folder/train/0001.png folder/train/0002.png folder/train/0003.png ``` If the dataset follows the `ImageFolder` structure, then you can load it directly with [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("username/dataset_name") >>> # OR locally: >>> dataset = load_dataset("/path/to/folder") ``` For local datasets, this is equivalent to passing `imagefolder` manually in [`load_dataset`] and the directory in `data_dir`: ```py >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder") ``` Then you can access the videos as `PIL.Image` objects: ``` >>> dataset["train"][0] {"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>, "label": 0} >>> dataset["train"][-1] {"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E8DAD30>, "label": 1} ``` To ignore the information in the metadata file, set `drop_metadata=True` in [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("username/dataset_with_metadata", drop_metadata=True) ``` If you don't have a metadata file, `ImageFolder` automatically infers the label name from the directory name. If you want to drop automatically created labels, set `drop_labels=True`. In this case, your dataset will only contain an image column: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("username/dataset_without_metadata", drop_labels=True) ``` Finally the `filters` argument lets you load only a subset of the dataset, based on a condition on the label or the metadata. This is especially useful if the metadata is in Parquet format, since this format enables fast filtering. It is also recommended to use this argument with `streaming=True`, because by default the dataset is fully downloaded before filtering. ```python >>> filters = [("label", "=", 0)] >>> dataset = load_dataset("username/dataset_name", streaming=True, filters=filters) ``` <Tip> For more information about creating your own `ImageFolder` dataset, take a look at the [Create an image dataset](./image_dataset) guide. </Tip> ## WebDataset The [WebDataset](https://github.com/webdataset/webdataset) format is based on a folder of TAR archives and is suitable for big image datasets. Because of their size, WebDatasets are generally loaded in streaming mode (using `streaming=True`). You can load a WebDataset like this: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", streaming=True) ``` ## Image decoding By default, images are decoded sequentially as `PIL.Images` when you iterate on a dataset. However it is possible to speed up the dataset significantly using multithreaded decoding: ```python >>> import os >>> num_threads = num_threads = min(32, (os.cpu_count() or 1) + 4) >>> dataset = dataset.decode(num_threads=num_threads) >>> for example in dataset: # up to 20 times faster ! ... ... ``` You can enable multithreading using `num_threads`. This is especially useful to speed up remote data streaming. However it can be slower than `num_threads=0` for local data on fast disks. If you are not interested in the images decoded as `PIL.Images` and would like to access the path/bytes instead, you can disable decoding: ```python >>> dataset = dataset.decode(False) ``` Note: [`IterableDataset.decode`] is only available for streaming datasets at the moment.
datasets/docs/source/image_load.mdx/0
{ "file_path": "datasets/docs/source/image_load.mdx", "repo_id": "datasets", "token_count": 1851 }
100
# Process 🤗 Datasets provides many tools for modifying the structure and content of a dataset. These tools are important for tidying up a dataset, creating additional columns, converting between features and formats, and much more. This guide will show you how to: - Reorder rows and split the dataset. - Rename and remove columns, and other common column operations. - Apply processing functions to each example in a dataset. - Concatenate datasets. - Apply a custom formatting transform. - Save and export processed datasets. For more details specific to processing other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_process">process audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_process">process image dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_process">process text dataset guide</a>. The examples in this guide use the MRPC dataset, but feel free to load any dataset of your choice and follow along! ```py >>> from datasets import load_dataset >>> dataset = load_dataset("nyu-mll/glue", "mrpc", split="train") ``` <Tip warning={true}> All processing methods in this guide return a new [`Dataset`] object. Modification is not done in-place. Be careful about overriding your previous dataset! </Tip> ## Sort, shuffle, select, split, and shard There are several functions for rearranging the structure of a dataset. These functions are useful for selecting only the rows you want, creating train and test splits, and sharding very large datasets into smaller chunks. ### Sort Use [`~Dataset.sort`] to sort column values according to their numerical values. The provided column must be NumPy compatible. ```py >>> dataset["label"][:10] [1, 0, 1, 0, 1, 1, 0, 1, 0, 0] >>> sorted_dataset = dataset.sort("label") >>> sorted_dataset["label"][:10] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> sorted_dataset["label"][-10:] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` Under the hood, this creates a list of indices that is sorted according to values of the column. This indices mapping is then used to access the right rows in the underlying Arrow table. ### Shuffle The [`~Dataset.shuffle`] function randomly rearranges the column values. You can specify the `generator` parameter in this function to use a different `numpy.random.Generator` if you want more control over the algorithm used to shuffle the dataset. ```py >>> shuffled_dataset = sorted_dataset.shuffle(seed=42) >>> shuffled_dataset["label"][:10] [1, 1, 1, 0, 1, 1, 1, 1, 1, 0] ``` Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping. However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower. This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. Alternatively, you can switch to an [`IterableDataset`] and leverage its fast approximate shuffling [`IterableDataset.shuffle`]: ```py >>> iterable_dataset = dataset.to_iterable_dataset(num_shards=128) >>> shuffled_iterable_dataset = iterable_dataset.shuffle(seed=42, buffer_size=1000) ``` ### Select and Filter There are two options for filtering rows in a dataset: [`~Dataset.select`] and [`~Dataset.filter`]. - [`~Dataset.select`] returns rows according to a list of indices: ```py >>> small_dataset = dataset.select([0, 10, 20, 30, 40, 50]) >>> len(small_dataset) 6 ``` - [`~Dataset.filter`] returns rows that match a specified condition: ```py >>> start_with_ar = dataset.filter(lambda example: example["sentence1"].startswith("Ar")) >>> len(start_with_ar) 6 >>> start_with_ar["sentence1"] ['Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .', 'Arison said Mann may have been one of the pioneers of the world music movement and he had a deep love of Brazilian music .', 'Arts helped coach the youth on an eighth-grade football team at Lombardi Middle School in Green Bay .', 'Around 9 : 00 a.m. EDT ( 1300 GMT ) , the euro was at $ 1.1566 against the dollar , up 0.07 percent on the day .', "Arguing that the case was an isolated example , Canada has threatened a trade backlash if Tokyo 's ban is not justified on scientific grounds .", 'Artists are worried the plan would harm those who need help most - performers who have a difficult time lining up shows .' ] ``` [`~Dataset.filter`] can also filter by indices if you set `with_indices=True`: ```py >>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True) >>> len(even_dataset) 1834 >>> len(dataset) / 2 1834.0 ``` Unless the list of indices to keep is contiguous, those methods also create an indices mapping under the hood. ### Split The [`~Dataset.train_test_split`] function creates train and test splits if your dataset doesn't already have them. This allows you to adjust the relative proportions or an absolute number of samples in each split. In the example below, use the `test_size` parameter to create a test split that is 10% of the original dataset: ```py >>> dataset.train_test_split(test_size=0.1) {'train': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 3301), 'test': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 367)} >>> 0.1 * len(dataset) 366.8 ``` The splits are shuffled by default, but you can set `shuffle=False` to prevent shuffling. ### Shard 🤗 Datasets supports sharding to divide a very large dataset into a predefined number of chunks. Specify the `num_shards` parameter in [`~Dataset.shard`] to determine the number of shards to split the dataset into. You'll also need to provide the shard you want to return with the `index` parameter. For example, the [stanfordnlp/imdb](https://huggingface.co/datasets/stanfordnlp/imdb) dataset has 25000 examples: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stanfordnlp/imdb", split="train") >>> print(dataset) Dataset({ features: ['text', 'label'], num_rows: 25000 }) ``` After sharding the dataset into four chunks, the first shard will only have 6250 examples: ```py >>> dataset.shard(num_shards=4, index=0) Dataset({ features: ['text', 'label'], num_rows: 6250 }) >>> print(25000/4) 6250.0 ``` ## Rename, remove, cast, and flatten The following functions allow you to modify the columns of a dataset. These functions are useful for renaming or removing columns, changing columns to a new set of features, and flattening nested column structures. ### Rename Use [`~Dataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place. Provide [`~Dataset.rename_column`] with the name of the original column, and the new column name: ```py >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'label', 'idx'], num_rows: 3668 }) >>> dataset = dataset.rename_column("sentence1", "sentenceA") >>> dataset = dataset.rename_column("sentence2", "sentenceB") >>> dataset Dataset({ features: ['sentenceA', 'sentenceB', 'label', 'idx'], num_rows: 3668 }) ``` ### Remove When you need to remove one or more columns, provide the column name to remove to the [`~Dataset.remove_columns`] function. Remove more than one column by providing a list of column names: ```py >>> dataset = dataset.remove_columns("label") >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'idx'], num_rows: 3668 }) >>> dataset = dataset.remove_columns(["sentence1", "sentence2"]) >>> dataset Dataset({ features: ['idx'], num_rows: 3668 }) ``` Conversely, [`~Dataset.select_columns`] selects one or more columns to keep and removes the rest. This function takes either one or a list of column names: ```py >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'label', 'idx'], num_rows: 3668 }) >>> dataset = dataset.select_columns(['sentence1', 'sentence2', 'idx']) >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'idx'], num_rows: 3668 }) >>> dataset = dataset.select_columns('idx') >>> dataset Dataset({ features: ['idx'], num_rows: 3668 }) ``` ### Cast The [`~Dataset.cast`] function transforms the feature type of one or more columns. This function accepts your new [`Features`] as its argument. The example below demonstrates how to change the [`ClassLabel`] and [`Value`] features: ```py >>> dataset.features {'sentence1': Value('string'), 'sentence2': Value('string'), 'label': ClassLabel(names=['not_equivalent', 'equivalent']), 'idx': Value('int32')} >>> from datasets import ClassLabel, Value >>> new_features = dataset.features.copy() >>> new_features["label"] = ClassLabel(names=["negative", "positive"]) >>> new_features["idx"] = Value("int64") >>> dataset = dataset.cast(new_features) >>> dataset.features {'sentence1': Value('string'), 'sentence2': Value('string'), 'label': ClassLabel(names=['negative', 'positive']), 'idx': Value('int64')} ``` <Tip> Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value("int32")` to `Value("bool")` if the original column only contains ones and zeros. </Tip> Use the [`~Dataset.cast_column`] function to change the feature type of a single column. Pass the column name and its new feature type as arguments: ```py >>> dataset.features {'audio': Audio(sampling_rate=44100, mono=True)} >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> dataset.features {'audio': Audio(sampling_rate=16000, mono=True)} ``` ### Flatten Sometimes a column can be a nested structure of several types. Take a look at the nested structure below from the SQuAD dataset: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rajpurkar/squad", split="train") >>> dataset.features {'id': Value('string'), 'title': Value('string'), 'context': Value('string'), 'question': Value('string'), 'answers': {'text': List(Value('string')), 'answer_start': List(Value('int32'))}} ``` The `answers` field contains two subfields: `text` and `answer_start`. Use the [`~Dataset.flatten`] function to extract the subfields into their own separate columns: ```py >>> flat_dataset = dataset.flatten() >>> flat_dataset Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) ``` Notice how the subfields are now their own independent columns: `answers.text` and `answers.answer_start`. ## Map Some of the more powerful applications of 🤗 Datasets come from using the [`~Dataset.map`] function. The primary purpose of [`~Dataset.map`] is to speed up processing functions. It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns. In the following example, prefix each `sentence1` value in the dataset with `'My sentence: '`. Start by creating a function that adds `'My sentence: '` to the beginning of each sentence. The function needs to accept and output a `dict`: ```py >>> def add_prefix(example): ... example["sentence1"] = 'My sentence: ' + example["sentence1"] ... return example ``` Now use [`~Dataset.map`] to apply the `add_prefix` function to the entire dataset: ```py >>> updated_dataset = small_dataset.map(add_prefix) >>> updated_dataset["sentence1"][:5] ['My sentence: Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', "My sentence: Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .", 'My sentence: They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .', 'My sentence: Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .', ] ``` Let's take a look at another example, except this time, you'll remove a column with [`~Dataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed. Specify the column to remove with the `remove_columns` parameter in [`~Dataset.map`]: ```py >>> updated_dataset = dataset.map(lambda example: {"new_sentence": example["sentence1"]}, remove_columns=["sentence1"]) >>> updated_dataset.column_names ['sentence2', 'label', 'idx', 'new_sentence'] ``` <Tip> 🤗 Datasets also has a [`~Dataset.remove_columns`] function which is faster because it doesn't copy the data of the remaining columns. </Tip> You can also use [`~Dataset.map`] with indices if you set `with_indices=True`. The example below adds the index to the beginning of each sentence: ```py >>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True) >>> updated_dataset["sentence2"][:5] ['0: Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', "1: Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .", "2: On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .", '3: Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .', '4: PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .' ] ``` ### Multiprocessing Multiprocessing significantly speeds up processing by parallelizing processes on the CPU. Set the `num_proc` parameter in [`~Dataset.map`] to set the number of processes to use: ```py >>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True, num_proc=4) ``` The [`~Dataset.map`] also works with the rank of the process if you set `with_rank=True`. This is analogous to the `with_indices` parameter. The `with_rank` parameter in the mapped function goes after the `index` one if it is already present. ```py >>> import torch >>> from multiprocess import set_start_method >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> from datasets import load_dataset >>> >>> # Get an example dataset >>> dataset = load_dataset("fka/awesome-chatgpt-prompts", split="train") >>> >>> # Get an example model and its tokenizer >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B-Chat").eval() >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat") >>> >>> def gpu_computation(batch, rank): ... # Move the model on the right GPU if it's not there already ... device = f"cuda:{(rank or 0) % torch.cuda.device_count()}" ... model.to(device) ... ... # Your big GPU call goes here, for example: ... chats = [[ ... {"role": "system", "content": "You are a helpful assistant."}, ... {"role": "user", "content": prompt} ... ] for prompt in batch["prompt"]] ... texts = [tokenizer.apply_chat_template( ... chat, ... tokenize=False, ... add_generation_prompt=True ... ) for chat in chats] ... model_inputs = tokenizer(texts, padding=True, return_tensors="pt").to(device) ... with torch.no_grad(): ... outputs = model.generate(**model_inputs, max_new_tokens=512) ... batch["output"] = tokenizer.batch_decode(outputs, skip_special_tokens=True) ... return batch >>> >>> if __name__ == "__main__": ... set_start_method("spawn") ... updated_dataset = dataset.map( ... gpu_computation, ... batched=True, ... batch_size=16, ... with_rank=True, ... num_proc=torch.cuda.device_count(), # one process per GPU ... ) ``` The main use-case for rank is to parallelize computation across several GPUs. This requires setting `multiprocess.set_start_method("spawn")`. If you don't you'll receive the following CUDA error: ```bash RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method. ``` ### Batch processing The [`~Dataset.map`] function supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` parameter. Batch processing enables interesting applications such as splitting long sentences into shorter chunks and data augmentation. #### Split long examples When examples are too long, you may want to split them into several smaller chunks. Begin by creating a function that: 1. Splits the `sentence1` field into chunks of 50 characters. 2. Stacks all the chunks together to create the new dataset. ```py >>> def chunk_examples(examples): ... chunks = [] ... for sentence in examples["sentence1"]: ... chunks += [sentence[i:i + 50] for i in range(0, len(sentence), 50)] ... return {"chunks": chunks} ``` Apply the function with [`~Dataset.map`]: ```py >>> chunked_dataset = dataset.map(chunk_examples, batched=True, remove_columns=dataset.column_names) >>> chunked_dataset[:10] {'chunks': ['Amrozi accused his brother , whom he called " the ', 'witness " , of deliberately distorting his evidenc', 'e .', "Yucaipa owned Dominick 's before selling the chain", ' to Safeway in 1998 for $ 2.5 billion .', 'They had published an advertisement on the Interne', 't on June 10 , offering the cargo for sale , he ad', 'ded .', 'Around 0335 GMT , Tab shares were up 19 cents , or', ' 4.4 % , at A $ 4.56 , having earlier set a record']} ``` Notice how the sentences are split into shorter chunks now, and there are more rows in the dataset. ```py >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'label', 'idx'], num_rows: 3668 }) >>> chunked_dataset Dataset({ features: ['chunks'], num_rows: 10470 }) ``` #### Data augmentation The [`~Dataset.map`] function could also be used for data augmentation. The following example generates additional words for a masked token in a sentence. Load and use the [RoBERTA](https://huggingface.co/roberta-base) model in 🤗 Transformers' [FillMaskPipeline](https://huggingface.co/transformers/main_classes/pipelines#transformers.FillMaskPipeline): ```py >>> from random import randint >>> from transformers import pipeline >>> fillmask = pipeline("fill-mask", model="roberta-base") >>> mask_token = fillmask.tokenizer.mask_token >>> smaller_dataset = dataset.filter(lambda e, i: i<100, with_indices=True) ``` Create a function to randomly select a word to mask in the sentence. The function should also return the original sentence and the top two replacements generated by RoBERTA. ```py >>> def augment_data(examples): ... outputs = [] ... for sentence in examples["sentence1"]: ... words = sentence.split(' ') ... K = randint(1, len(words)-1) ... masked_sentence = " ".join(words[:K] + [mask_token] + words[K+1:]) ... predictions = fillmask(masked_sentence) ... augmented_sequences = [predictions[i]["sequence"] for i in range(3)] ... outputs += [sentence] + augmented_sequences ... ... return {"data": outputs} ``` Use [`~Dataset.map`] to apply the function over the whole dataset: ```py >>> augmented_dataset = smaller_dataset.map(augment_data, batched=True, remove_columns=dataset.column_names, batch_size=8) >>> augmented_dataset[:9]["data"] ['Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', 'Amrozi accused his brother, whom he called " the witness ", of deliberately withholding his evidence.', 'Amrozi accused his brother, whom he called " the witness ", of deliberately suppressing his evidence.', 'Amrozi accused his brother, whom he called " the witness ", of deliberately destroying his evidence.', "Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .", 'Yucaipa owned Dominick Stores before selling the chain to Safeway in 1998 for $ 2.5 billion.', "Yucaipa owned Dominick's before selling the chain to Safeway in 1998 for $ 2.5 billion.", 'Yucaipa owned Dominick Pizza before selling the chain to Safeway in 1998 for $ 2.5 billion.' ] ``` For each original sentence, RoBERTA augmented a random word with three alternatives. The original word `distorting` is supplemented by `withholding`, `suppressing`, and `destroying`. ### Asynchronous processing Asynchronous functions are useful to call API endpoints in parallel, for example to download content like images or call a model endpoint. You can define an asynchronous function using the `async` and `await` keywords, here is an example function to call a chat model from Hugging Face: ```python >>> import aiohttp >>> import asyncio >>> from huggingface_hub import get_token >>> sem = asyncio.Semaphore(20) # max number of simultaneous queries >>> async def query_model(model, prompt): ... api_url = f"https://api-inference.huggingface.co/models/{model}/v1/chat/completions" ... headers = {"Authorization": f"Bearer {get_token()}", "Content-Type": "application/json"} ... json = {"messages": [{"role": "user", "content": prompt}], "max_tokens": 20, "seed": 42} ... async with sem, aiohttp.ClientSession() as session, session.post(api_url, headers=headers, json=json) as response: ... output = await response.json() ... return {"Output": output["choices"][0]["message"]["content"]} ``` Asynchronous functions run in parallel, which accelerates the process a lot. The same code takes a lot more time if it's run sequentially, because it does nothing while waiting for the model response. It is generally recommended to use `async` / `await` when you function has to wait for a response from an API for example, or if it downloads data and it can take some time. Note the presence of a `Semaphore`: it sets the maximum number of queries that can run in parallel. It is recommended to use a `Semaphore` when calling APIs to avoid rate limit errors. Let's use it to call the [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) model and ask it to return the main topic of each math problem in the [Maxwell-Jia/AIME_2024](https://huggingface.co/Maxwell-Jia/AIME_2024) dataset: ````python >>> from datasets import load_dataset >>> ds = load_dataset("Maxwell-Jia/AIME_2024", split="train") >>> model = "microsoft/Phi-3-mini-4k-instruct" >>> prompt = 'What is this text mainly about ? Here is the text:\n\n```\n{Problem}\n```\n\nReply using one or two words max, e.g. "The main topic is Linear Algebra".' >>> async def get_topic(example): ... return await query_model(model, prompt.format(Problem=example['Problem'])) >>> ds = ds.map(get_topic) >>> ds[0] {'ID': '2024-II-4', 'Problem': 'Let $x,y$ and $z$ be positive real numbers that...', 'Solution': 'Denote $\\log_2(x) = a$, $\\log_2(y) = b$, and..., 'Answer': 33, 'Output': 'The main topic is Logarithms.'} ```` Here, [`Dataset.map`] runs many `get_topic` function asynchronously so it doesn't have to wait for every single model response which would take a lot of time to do sequentially. By default, [`Dataset.map`] runs up to one thousand map functions in parallel, so don't forget to set the maximum number of API calls that can run in parallel with a `Semaphore`, otherwise the model could return rate limit errors or overload. For advanced use cases, you can change the maximum number of queries in parallel in `datasets.config`. ### Process multiple splits Many datasets have splits that can be processed simultaneously with [`DatasetDict.map`]. For example, tokenize the `sentence1` field in the train and test split by: ```py >>> from datasets import load_dataset # load all the splits >>> dataset = load_dataset('nyu-mll/glue', 'mrpc') >>> encoded_dataset = dataset.map(lambda examples: tokenizer(examples["sentence1"]), batched=True) >>> encoded_dataset["train"][0] {'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', 'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', 'label': 1, 'idx': 0, 'input_ids': [ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] } ``` ### Distributed usage When you use [`~Dataset.map`] in a distributed setting, you should also use [torch.distributed.barrier](https://pytorch.org/docs/stable/distributed?highlight=barrier#torch.distributed.barrier). This ensures the main process performs the mapping, while the other processes load the results, thereby avoiding duplicate work. The following example shows how you can use `torch.distributed.barrier` to synchronize the processes: ```py >>> from datasets import Dataset >>> import torch.distributed >>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> if training_args.local_rank > 0: ... print("Waiting for main process to perform the mapping") ... torch.distributed.barrier() >>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1}) >>> if training_args.local_rank == 0: ... print("Loading results from main process") ... torch.distributed.barrier() ``` ## Batch The [`~Dataset.batch`] method allows you to group samples from the dataset into batches. This is particularly useful when you want to create batches of data for training or evaluation, especially when working with deep learning models. Here's an example of how to use the `batch()` method: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") >>> batched_dataset = dataset.batch(batch_size=4) >>> batched_dataset[0] {'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic', 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'], 'label': [1, 1, 1, 1]} ``` The `batch()` method accepts the following parameters: - `batch_size` (`int`): The number of samples in each batch. - `drop_last_batch` (`bool`, defaults to `False`): Whether to drop the last incomplete batch if the dataset size is not divisible by the batch size. - `num_proc` (`int`, optional, defaults to `None`): The number of processes to use for multiprocessing. If None, no multiprocessing is used. This can significantly speed up batching for large datasets. Note that `Dataset.batch()` returns a new [`Dataset`] where each item is a batch of multiple samples from the original dataset. If you want to process data in batches, you should use a batched [`~Dataset.map`] directly, which applies a function to batches but the output dataset is unbatched. ## Concatenate Separate datasets can be concatenated if they share the same column types. Concatenate datasets with [`concatenate_datasets`]: ```py >>> from datasets import concatenate_datasets, load_dataset >>> stories = load_dataset("ajibawa-2023/General-Stories-Collection", split="train") >>> stories = stories.remove_columns([col for col in stories.column_names if col != "text"]) # only keep the 'text' column >>> wiki = load_dataset("wikimedia/wikipedia", "20220301.en", split="train") >>> wiki = wiki.remove_columns([col for col in wiki.column_names if col != "text"]) # only keep the 'text' column >>> assert stories.features.type == wiki.features.type >>> bert_dataset = concatenate_datasets([stories, wiki]) ``` You can also concatenate two datasets horizontally by setting `axis=1` as long as the datasets have the same number of rows: ```py >>> from datasets import Dataset >>> stories_ids = Dataset.from_dict({"ids": list(range(len(stories)))}) >>> stories_with_ids = concatenate_datasets([stories, stories_ids], axis=1) ``` ### Interleave You can also mix several datasets together by taking alternating examples from each one to create a new dataset. This is known as _interleaving_, which is enabled by the [`interleave_datasets`] function. Both [`interleave_datasets`] and [`concatenate_datasets`] work with regular [`Dataset`] and [`IterableDataset`] objects. Refer to the [Stream](./stream#interleave) guide for an example of how to interleave [`IterableDataset`] objects. You can define sampling probabilities for each of the original datasets to specify how to interleave the datasets. In this case, the new dataset is constructed by getting examples one by one from a random dataset until one of the datasets runs out of samples. ```py >>> from datasets import Dataset, interleave_datasets >>> seed = 42 >>> probabilities = [0.3, 0.5, 0.2] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed) >>> dataset["a"] [10, 11, 20, 12, 0, 21, 13] ``` You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples. You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached. Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`. ```py >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20] ``` ## Format The [`~Dataset.with_format`] function changes the format of a column to be compatible with some common data formats. Specify the output you'd like in the `type` parameter. You can also choose which the columns you want to format using `columns=`. Formatting is applied on-the-fly. For example, create PyTorch tensors by setting `type="torch"`: ```py >>> dataset = dataset.with_format(type="torch") ``` The [`~Dataset.set_format`] function also changes the format of a column, except it runs in-place: ```py >>> dataset.set_format(type="torch") ``` If you need to reset the dataset to its original format, set the format to `None` (or use [`~Dataset.reset_format`]): ```py >>> dataset.format {'type': 'torch', 'format_kwargs': {}, 'columns': [...], 'output_all_columns': False} >>> dataset = dataset.with_format(None) >>> dataset.format {'type': None, 'format_kwargs': {}, 'columns': [...], 'output_all_columns': False} ``` ### Tensors formats Several tensors or arrays formats are supported. It is generally recommended to use these formats instead of converting outputs of a dataset to tensors or arrays manually to avoid unnecessary data copies and accelerate data loading. Here is the list of supported tensors or arrays formats: - NumPy: format name is "numpy", for more information see [Using Datasets with NumPy](use_with_numpy) - PyTorch: format name is "torch", for more information see [Using Datasets with PyTorch](use_with_pytorch) - TensorFlow: format name is "tensorflow", for more information see [Using Datasets with TensorFlow](use_with_tensorflow) - JAX: format name is "jax", for more information see [Using Datasets with JAX](use_with_jax) <Tip> Check out the [Using Datasets with TensorFlow](use_with_tensorflow#using-totfdataset) guide for more details on how to efficiently create a TensorFlow dataset. </Tip> When a dataset is formatted in a tensor or array format, all the data are formatted as tensors or arrays (except unsupported types like strings for example for PyTorch): ```python >>> ds = Dataset.from_dict({"text": ["foo", "bar"], "tokens": [[0, 1, 2], [3, 4, 5]]}) >>> ds = ds.with_format("torch") >>> ds[0] {'text': 'foo', 'tokens': tensor([0, 1, 2])} >>> ds[:2] {'text': ['foo', 'bar'], 'tokens': tensor([[0, 1, 2], [3, 4, 5]])} ``` ### Tabular formats You can use a dataframes or tables format to optimize data loading and data processing, since they generally offer zero-copy operations and transforms written in low-level languages. Here is the list of supported dataframes or tables formats: - Pandas: format name is "pandas", for more information see [Using Datasets with Pandas](use_with_pandas) - Polars: format name is "polars", for more information see [Using Datasets with Polars](use_with_polars) - PyArrow: format name is "arrow", for more information see [Using Datasets with PyArrow](use_with_tensorflow) When a dataset is formatted in a dataframe or table format, every dataset row or batches of rows is formatted as a dataframe or table, and dataset colums are formatted as a series or array: ```python >>> ds = Dataset.from_dict({"text": ["foo", "bar"], "label": [0, 1]}) >>> ds = ds.with_format("pandas") >>> ds[:2] text label 0 foo 0 1 bar 1 ``` Those formats make it possible to iterate on the data faster by avoiding data copies, and also enable faster data processing in [`~Dataset.map`] or [`~Dataset.filter`]: ```python >>> ds = ds.map(lambda df: df.assign(upper_text=df.text.str.upper()), batched=True) >>> ds[:2] text label upper_text 0 foo 0 FOO 1 bar 1 BAR ``` ### Custom format transform The [`~Dataset.with_transform`] function applies a custom formatting transform on-the-fly. This function replaces any previously specified format. For example, you can use this function to tokenize and pad tokens on-the-fly. Tokenization is only applied when examples are accessed: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") >>> def encode(batch): ... return tokenizer(batch["sentence1"], batch["sentence2"], padding="longest", truncation=True, max_length=512, return_tensors="pt") >>> dataset = dataset.with_transform(encode) >>> dataset.format {'type': 'custom', 'format_kwargs': {'transform': <function __main__.encode(batch)>}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False} ``` There is also [`~Dataset.set_transform`] which does the same but runs in-place. You can also use the [`~Dataset.with_transform`] function for custom decoding on [`Features`]. The example below uses the [`pydub`](http://pydub.com/) package as an alternative to `torchcodec` decoding: ```py >>> import numpy as np >>> from pydub import AudioSegment >>> audio_dataset_amr = Dataset.from_dict({"audio": ["audio_samples/audio.amr"]}) >>> def decode_audio_with_pydub(batch, sampling_rate=16_000): ... def pydub_decode_file(audio_path): ... sound = AudioSegment.from_file(audio_path) ... if sound.frame_rate != sampling_rate: ... sound = sound.set_frame_rate(sampling_rate) ... channel_sounds = sound.split_to_mono() ... samples = [s.get_array_of_samples() for s in channel_sounds] ... fp_arr = np.array(samples).T.astype(np.float32) ... fp_arr /= np.iinfo(samples[0].typecode).max ... return fp_arr ... ... batch["audio"] = [pydub_decode_file(audio_path) for audio_path in batch["audio"]] ... return batch >>> audio_dataset_amr.set_transform(decode_audio_with_pydub) ``` ## Save Once your dataset is ready, you can save it as a Hugging Face Dataset in Parquet format and reuse it later with [`load_dataset`]. Save your dataset by providing the name of the dataset repository on Hugging Face you wish to save it to to [`~Dataset.push_to_hub`]: ```python encoded_dataset.push_to_hub("username/my_dataset") ``` You can use multiple processes to upload it in parallel. This is especially useful if you want to speed up the process: ```python dataset.push_to_hub("username/my_dataset", num_proc=8) ``` Use the [`load_dataset`] function to reload the dataset (in streaming mode or not): ```python from datasets import load_dataset reloaded_dataset = load_dataset("username/my_dataset", streaming=True) ``` Alternatively, you can save it locally in Arrow format on disk. Compared to Parquet, Arrow is uncompressed which makes it much faster to reload which is great for local use on disk and ephemeral caching. But since it's larger and with less metadata, it is slower to upload/download/query than Parquet and less suited for long term storage. Use the [`~Dataset.save_to_disk`] and [`load_from_disk`] function to reload the dataset from your disk: ```py >>> encoded_dataset.save_to_disk("path/of/my/dataset/directory") >>> # later >>> from datasets import load_from_disk >>> reloaded_dataset = load_from_disk("path/of/my/dataset/directory") ``` ## Export 🤗 Datasets supports exporting as well so you can work with your dataset in other applications. The following table shows currently supported file formats you can export to: | File type | Export method | | ----------------------- | ------------------------------------------------------------------- | | CSV | [`Dataset.to_csv`] | | JSON | [`Dataset.to_json`] | | Parquet | [`Dataset.to_parquet`] | | SQL | [`Dataset.to_sql`] | | In-memory Python object | [`Dataset.to_pandas`], [`Dataset.to_polars`] or [`Dataset.to_dict`] | For example, export your dataset to a CSV file like this: ```py >>> encoded_dataset.to_csv("path/of/my/dataset.csv") ```
datasets/docs/source/process.mdx/0
{ "file_path": "datasets/docs/source/process.mdx", "repo_id": "datasets", "token_count": 12781 }
101
# Use with PyTorch This document is a quick introduction to using `datasets` with PyTorch, with a particular focus on how to get `torch.Tensor` objects out of our datasets, and how to use a PyTorch `DataLoader` and a Hugging Face `Dataset` with the best performance. ## Dataset format By default, datasets return regular python objects: integers, floats, strings, lists, etc. To get PyTorch tensors instead, you can set the format of the dataset to `pytorch` using [`Dataset.with_format`]: ```py >>> from datasets import Dataset >>> data = [[1, 2],[3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("torch") >>> ds[0] {'data': tensor([1, 2])} >>> ds[:2] {'data': tensor([[1, 2], [3, 4]])} ``` <Tip> A [`Dataset`] object is a wrapper of an Arrow table, which allows fast zero-copy reads from arrays in the dataset to PyTorch tensors. </Tip> To load the data as tensors on a GPU, specify the `device` argument: ```py >>> import torch >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> ds = ds.with_format("torch", device=device) >>> ds[0] {'data': tensor([1, 2], device='cuda:0')} ``` ### N-dimensional arrays If your dataset consists of N-dimensional arrays, you will see that by default they are considered as the same tensor if the shape is fixed: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] # fixed shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("torch") >>> ds[0] {'data': tensor([[1, 2], [3, 4]])} ``` ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3]],[[4, 5, 6],[7, 8]]] # varying shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("torch") >>> ds[0] {'data': [tensor([1, 2]), tensor([3])]} ``` However this logic often requires slow shape comparisons and data copies. To avoid this, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: ```py >>> from datasets import Dataset, Features, Array2D >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] >>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) >>> ds = Dataset.from_dict({"data": data}, features=features) >>> ds = ds.with_format("torch") >>> ds[0] {'data': tensor([[1, 2], [3, 4]])} >>> ds[:2] {'data': tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])} ``` ### Other feature types [`ClassLabel`] data are properly converted to tensors: ```py >>> from datasets import Dataset, Features, ClassLabel >>> labels = [0, 0, 1] >>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) >>> ds = Dataset.from_dict({"label": labels}, features=features) >>> ds = ds.with_format("torch") >>> ds[:3] {'label': tensor([0, 0, 1])} ``` String and binary objects are unchanged, since PyTorch only supports numbers. The [`Image`] and [`Audio`] feature types are also supported. <Tip> To use the [`Image`] feature type, you'll need to install the `vision` extra as `pip install datasets[vision]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> images = ["path/to/image.png"] * 10 >>> features = Features({"image": Image()}) >>> ds = Dataset.from_dict({"image": images}, features=features) >>> ds = ds.with_format("torch") >>> ds[0]["image"].shape torch.Size([512, 512, 4]) >>> ds[0] {'image': tensor([[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]], dtype=torch.uint8)} >>> ds[:2]["image"].shape torch.Size([2, 512, 512, 4]) >>> ds[:2] {'image': tensor([[[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]]], dtype=torch.uint8)} ``` <Tip> To use the [`Audio`] feature type, you'll need to install the `audio` extra as `pip install datasets[audio]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> audio = ["path/to/audio.wav"] * 10 >>> features = Features({"audio": Audio()}) >>> ds = Dataset.from_dict({"audio": audio}, features=features) >>> ds = ds.with_format("torch") >>> ds[0]["audio"]["array"] tensor([ 6.1035e-05, 1.5259e-05, 1.6785e-04, ..., -1.5259e-05, -1.5259e-05, 1.5259e-05]) >>> ds[0]["audio"]["sampling_rate"] tensor(44100) ``` ## Data loading Like `torch.utils.data.Dataset` objects, a [`Dataset`] can be passed directly to a PyTorch `DataLoader`: ```py >>> import numpy as np >>> from datasets import Dataset >>> from torch.utils.data import DataLoader >>> data = np.random.rand(16) >>> label = np.random.randint(0, 2, size=16) >>> ds = Dataset.from_dict({"data": data, "label": label}).with_format("torch") >>> dataloader = DataLoader(ds, batch_size=4) >>> for batch in dataloader: ... print(batch) {'data': tensor([0.0047, 0.4979, 0.6726, 0.8105]), 'label': tensor([0, 1, 0, 1])} {'data': tensor([0.4832, 0.2723, 0.4259, 0.2224]), 'label': tensor([0, 0, 0, 0])} {'data': tensor([0.5837, 0.3444, 0.4658, 0.6417]), 'label': tensor([0, 1, 0, 0])} {'data': tensor([0.7022, 0.1225, 0.7228, 0.8259]), 'label': tensor([1, 1, 1, 1])} ``` ### Optimize data loading There are several ways you can increase the speed your data is loaded which can save you time, especially if you are working with large datasets. PyTorch offers parallelized data loading, retrieving batches of indices instead of individually, and streaming to iterate over the dataset without downloading it on disk. #### Use multiple Workers You can parallelize data loading with the `num_workers` argument of a PyTorch `DataLoader` and get a higher throughput. Under the hood, the `DataLoader` starts `num_workers` processes. Each process reloads the dataset passed to the `DataLoader` and is used to query examples. Reloading the dataset inside a worker doesn't fill up your RAM, since it simply memory-maps the dataset again from your disk. ```py >>> import numpy as np >>> from datasets import Dataset, load_from_disk >>> from torch.utils.data import DataLoader >>> data = np.random.rand(10_000) >>> Dataset.from_dict({"data": data}).save_to_disk("my_dataset") >>> ds = load_from_disk("my_dataset").with_format("torch") >>> dataloader = DataLoader(ds, batch_size=32, num_workers=4) ``` ### Stream data Stream a dataset by loading it as an [`IterableDataset`]. This allows you to progressively iterate over a remote dataset without downloading it on disk and or over local data files. Learn more about which type of dataset is best for your use case in the [choosing between a regular dataset or an iterable dataset](./about_mapstyle_vs_iterable) guide. An iterable dataset from `datasets` inherits from `torch.utils.data.IterableDataset` so you can pass it to a `torch.utils.data.DataLoader`: ```py >>> import numpy as np >>> from datasets import Dataset, load_dataset >>> from torch.utils.data import DataLoader >>> data = np.random.rand(10_000) >>> Dataset.from_dict({"data": data}).push_to_hub("<username>/my_dataset") # Upload to the Hugging Face Hub >>> my_iterable_dataset = load_dataset("<username>/my_dataset", streaming=True, split="train") >>> dataloader = DataLoader(my_iterable_dataset, batch_size=32) ``` If the dataset is split in several shards (i.e. if the dataset consists of multiple data files), then you can stream in parallel using `num_workers`: ```py >>> my_iterable_dataset = load_dataset("deepmind/code_contests", streaming=True, split="train") >>> my_iterable_dataset.num_shards 39 >>> dataloader = DataLoader(my_iterable_dataset, batch_size=32, num_workers=4) ``` In this case each worker is given a subset of the list of shards to stream from. ### Checkpoint and resume If you need a DataLoader that you can checkpoint and resume in the middle of training, you can use the `StatefulDataLoader` from [torchdata](https://github.com/pytorch/data): ```py >>> from torchdata.stateful_dataloader import StatefulDataLoader >>> my_iterable_dataset = load_dataset("deepmind/code_contests", streaming=True, split="train") >>> dataloader = StatefulDataLoader(my_iterable_dataset, batch_size=32, num_workers=4) >>> # save in the middle of training >>> state_dict = dataloader.state_dict() >>> # and resume later >>> dataloader.load_state_dict(state_dict) ``` This is possible thanks to [`IterableDataset.state_dict`] and [`IterableDataset.load_state_dict`]. ### Distributed To split your dataset across your training nodes, you can use [`datasets.distributed.split_dataset_by_node`]: ```python import os from datasets.distributed import split_dataset_by_node ds = split_dataset_by_node(ds, rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"])) ``` This works for both map-style datasets and iterable datasets. The dataset is split for the node at rank `rank` in a pool of nodes of size `world_size`. For map-style datasets: Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. For iterable datasets: If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.num_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. This can also be combined with a `torch.utils.data.DataLoader` if you want each node to use multiple workers to load the data.
datasets/docs/source/use_with_pytorch.mdx/0
{ "file_path": "datasets/docs/source/use_with_pytorch.mdx", "repo_id": "datasets", "token_count": 3446 }
102
from argparse import ArgumentParser from typing import Optional from datasets.commands import BaseDatasetsCLICommand from datasets.hub import delete_from_hub def _command_factory(args): return DeleteFromHubCommand( args.dataset_id, args.config_name, args.token, args.revision, ) class DeleteFromHubCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser): parser: ArgumentParser = parser.add_parser("delete_from_hub", help="Delete dataset config from the Hub") parser.add_argument( "dataset_id", help="source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME" ) parser.add_argument("config_name", help="config name to delete") parser.add_argument("--token", help="access token to the Hugging Face Hub") parser.add_argument("--revision", help="source revision") parser.set_defaults(func=_command_factory) def __init__( self, dataset_id: str, config_name: str, token: Optional[str], revision: Optional[str], ): self._dataset_id = dataset_id self._config_name = config_name self._token = token self._revision = revision def run(self) -> None: _ = delete_from_hub(self._dataset_id, self._config_name, revision=self._revision, token=self._token)
datasets/src/datasets/commands/delete_from_hub.py/0
{ "file_path": "datasets/src/datasets/commands/delete_from_hub.py", "repo_id": "datasets", "token_count": 562 }
103
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..table import array_cast from ..utils.file_utils import is_local_path, xopen from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _IMAGE_COMPRESSION_FORMATS: Optional[list[str]] = None _NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _VALID_IMAGE_ARRAY_DTPYES = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class Image: """Image [`Feature`] to read image data from an image file. Input: The Image feature accepts as input: - A `str`: Absolute path to the image file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the image file to the archive file. - `bytes`: Bytes of the image file. This is useful for parquet or webdataset files which embed image files. - An `np.ndarray`: NumPy array representing an image. - A `PIL.Image.Image`: PIL image object. Output: The Image features output data as `PIL.Image.Image` objects. Args: mode (`str`, *optional*): The mode to convert the image to. If `None`, the native mode of the image is used. decode (`bool`, defaults to `True`): Whether to decode the image data. If `False`, returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`. Examples: ```py >>> from datasets import load_dataset, Image >>> ds = load_dataset("AI-Lab-Makerere/beans", split="train") >>> ds.features["image"] Image(decode=True, id=None) >>> ds[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0> >>> ds = ds.cast_column('image', Image(decode=False)) {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'} ``` """ mode: Optional[str] = None decode: bool = True id: Optional[str] = field(default=None, repr=False) # Automatically constructed dtype: ClassVar[str] = "PIL.Image.Image" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Image", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, bytearray, dict, np.ndarray, "PIL.Image.Image"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`): Data passed as input to Image feature. Returns: `dict` with "path" and "bytes" fields """ if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if isinstance(value, list): value = np.array(value) if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, (bytes, bytearray)): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(value) elif isinstance(value, PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image": """Decode example image file into image data. Args: value (`str` or `dict`): A string with the absolute image file path, a dictionary with keys: - `path`: String with absolute or relative image file path. - `bytes`: The bytes of the image file. token_per_repo_id (`dict`, *optional*): To access and decode image files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `PIL.Image.Image` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.") if config.PIL_AVAILABLE: import PIL.Image import PIL.ImageOps else: raise ImportError("To support decoding images, please install 'Pillow'.") if token_per_repo_id is None: token_per_repo_id = {} path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): image = PIL.Image.open(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) source_url_fields = string_to_dict(source_url, pattern) token = ( token_per_repo_id.get(source_url_fields["repo_id"]) if source_url_fields is not None else None ) download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: bytes_ = BytesIO(f.read()) image = PIL.Image.open(bytes_) else: image = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None: image = PIL.ImageOps.exif_transpose(image) if self.mode and self.mode != image.mode: image = image.convert(self.mode) return image def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Image arrow storage type. The Arrow types that can be converted to the Image pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the image bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the image array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_list(storage.type): bytes_array = pa.array( [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays( [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray, token_per_repo_id=None) -> pa.StructArray: """Embed image files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if token_per_repo_id is None: token_per_repo_id = {} @no_op_if_value_is_null def path_to_bytes(path): source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) source_url_fields = string_to_dict(source_url, pattern) token = token_per_repo_id.get(source_url_fields["repo_id"]) if source_url_fields is not None else None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: return f.read() bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) def list_image_compression_formats() -> list[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys())) return _IMAGE_COMPRESSION_FORMATS def image_to_bytes(image: "PIL.Image.Image") -> bytes: """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression.""" buffer = BytesIO() if image.format in list_image_compression_formats(): format = image.format else: format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(buffer, format=format) return buffer.getvalue() def encode_pil_image(image: "PIL.Image.Image") -> dict: if hasattr(image, "filename") and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(image)} def encode_np_array(array: np.ndarray) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") dtype = array.dtype dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER dtype_kind = dtype.kind dtype_itemsize = dtype.itemsize dest_dtype = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: if dtype_kind not in ["u", "i"]: raise TypeError( f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." ) dest_dtype = np.dtype("|u1") if dtype != dest_dtype: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize) if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = np.dtype(dtype_str) warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" ) image = PIL.Image.fromarray(array.astype(dest_dtype)) return {"path": None, "bytes": image_to_bytes(image)} def objects_to_list_of_image_dicts( objs: Union[list[str], list[dict], list[np.ndarray], list["PIL.Image.Image"]], ) -> list[dict]: """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`.""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if objs: _, obj = first_non_null_value(objs) if isinstance(obj, str): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(obj, np.ndarray): obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array) return [obj_to_image_dict_func(obj) for obj in objs] elif isinstance(obj, PIL.Image.Image): obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image) return [obj_to_image_dict_func(obj) for obj in objs] else: return objs else: return objs
datasets/src/datasets/features/image.py/0
{ "file_path": "datasets/src/datasets/features/image.py", "repo_id": "datasets", "token_count": 7271 }
104
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """List and inspect datasets.""" import os from collections.abc import Mapping, Sequence from typing import Optional, Union from .download.download_config import DownloadConfig from .download.download_manager import DownloadMode from .download.streaming_download_manager import StreamingDownloadManager from .info import DatasetInfo from .load import ( dataset_module_factory, get_dataset_builder_class, load_dataset_builder, ) from .utils.logging import get_logger from .utils.version import Version logger = get_logger(__name__) class SplitsNotFoundError(ValueError): pass def get_dataset_infos( path: str, data_files: Optional[Union[dict, list, str]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ): """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict. Args: path (`str`): path to the dataset repository. Can be either: - a local path to the dataset directory containing the data files, e.g. `'./dataset/squad'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'rajpurkar/squad'`, `'nyu-mll/glue'` or``'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): Optional attributes for builder class which will override the attributes if supplied. Example: ```py >>> from datasets import get_dataset_infos >>> get_dataset_infos('cornell-movie-review-data/rotten_tomatoes') {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...} ``` """ config_names = get_dataset_config_names( path=path, revision=revision, download_config=download_config, download_mode=download_mode, data_files=data_files, token=token, ) return { config_name: get_dataset_config_info( path=path, config_name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs, ) for config_name in config_names } def get_dataset_config_names( path: str, revision: Optional[Union[str, Version]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, data_files: Optional[Union[dict, list, str]] = None, **download_kwargs, ): """Get the list of available config names for a particular dataset. Args: path (`str`): path to the dataset repository. Can be either: - a local path to the dataset directory containing the data files, e.g. `'./dataset/squad'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'rajpurkar/squad'`, `'nyu-mll/glue'` or``'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. **download_kwargs (additional keyword arguments): Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied, for example `token`. Example: ```py >>> from datasets import get_dataset_config_names >>> get_dataset_config_names("nyu-mll/glue") ['cola', 'sst2', 'mrpc', 'qqp', 'stsb', 'mnli', 'mnli_mismatched', 'mnli_matched', 'qnli', 'rte', 'wnli', 'ax'] ``` """ dataset_module = dataset_module_factory( path, revision=revision, download_config=download_config, download_mode=download_mode, data_files=data_files, **download_kwargs, ) builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path)) return list(builder_cls.builder_configs.keys()) or [ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default") ] def get_dataset_default_config_name( path: str, revision: Optional[Union[str, Version]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, data_files: Optional[Union[dict, list, str]] = None, **download_kwargs, ) -> Optional[str]: """Get the default config name for a particular dataset. Can return None only if the dataset has multiple configurations and no default configuration. Args: path (`str`): path to the dataset repository. Can be either: - a local path to the dataset directory containing the data files, e.g. `'./dataset/squad'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'rajpurkar/squad'`, `'nyu-mll/glue'` or``'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. **download_kwargs (additional keyword arguments): Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied, for example `token`. Returns: Optional[str]: the default config name if there is one Example: ```py >>> from datasets import get_dataset_default_config_name >>> get_dataset_default_config_name("openbookqa") 'main' ``` """ dataset_module = dataset_module_factory( path, revision=revision, download_config=download_config, download_mode=download_mode, data_files=data_files, **download_kwargs, ) builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path)) builder_configs = list(builder_cls.builder_configs.keys()) if builder_configs: default_config_name = builder_configs[0] if len(builder_configs) == 1 else None else: default_config_name = "default" return builder_cls.DEFAULT_CONFIG_NAME or default_config_name def get_dataset_config_info( path: str, config_name: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ) -> DatasetInfo: """Get the meta information (DatasetInfo) about a dataset for a particular config Args: path (`str`): path to the dataset repository. Can be either: - a local path to the dataset directory containing the data files, e.g. `'./dataset/squad'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'rajpurkar/squad'`, `'nyu-mll/glue'` or``'openai/webtext'` config_name (:obj:`str`, optional): Defining the name of the dataset configuration. data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s). download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters. download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If True, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied. """ builder = load_dataset_builder( path, name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs, ) info = builder.info if info.splits is None: download_config = download_config.copy() if download_config else DownloadConfig() if token is not None: download_config.token = token builder._check_manual_download( StreamingDownloadManager(base_path=builder.base_path, download_config=download_config) ) try: info.splits = { split_generator.name: {"name": split_generator.name, "dataset_name": path} for split_generator in builder._split_generators( StreamingDownloadManager(base_path=builder.base_path, download_config=download_config) ) } except Exception as err: raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err return info def get_dataset_split_names( path: str, config_name: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ): """Get the list of available splits for a particular config and dataset. Args: path (`str`): path to the dataset repository. Can be either: - a local path to the dataset directory containing the data files, e.g. `'./dataset/squad'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'rajpurkar/squad'`, `'nyu-mll/glue'` or``'openai/webtext'` config_name (`str`, *optional*): Defining the name of the dataset configuration. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. revision ([`Version`] or `str`, *optional*): Version of the dataset to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): Optional attributes for builder class which will override the attributes if supplied. Example: ```py >>> from datasets import get_dataset_split_names >>> get_dataset_split_names('cornell-movie-review-data/rotten_tomatoes') ['train', 'validation', 'test'] ``` """ info = get_dataset_config_info( path, config_name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs, ) return list(info.splits.keys())
datasets/src/datasets/inspect.py/0
{ "file_path": "datasets/src/datasets/inspect.py", "repo_id": "datasets", "token_count": 5953 }
105
import itertools from dataclasses import dataclass from typing import Optional import pyarrow as pa import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ArrowConfig(datasets.BuilderConfig): """BuilderConfig for Arrow.""" features: Optional[datasets.Features] = None def __post_init__(self): super().__post_init__() class Arrow(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = ArrowConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] # Infer features if they are stored in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(files): with open(file, "rb") as f: try: reader = pa.ipc.open_stream(f) except (OSError, pa.lib.ArrowInvalid): reader = pa.ipc.open_file(f) self.info.features = datasets.Features.from_arrow_schema(reader.schema) break splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.info.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: try: try: batches = pa.ipc.open_stream(f) except (OSError, pa.lib.ArrowInvalid): reader = pa.ipc.open_file(f) batches = (reader.get_batch(i) for i in range(reader.num_record_batches)) for batch_idx, record_batch in enumerate(batches): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/arrow/arrow.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/arrow/arrow.py", "repo_id": "datasets", "token_count": 1641 }
106
import io import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets import datasets.config from datasets.table import table_cast from datasets.utils.file_utils import readline logger = datasets.utils.logging.get_logger(__name__) def ujson_dumps(*args, **kwargs): try: return pd.io.json.ujson_dumps(*args, **kwargs) except AttributeError: # Before pandas-2.2.0, ujson_dumps was renamed to dumps: import ujson_dumps as dumps return pd.io.json.dumps(*args, **kwargs) def ujson_loads(*args, **kwargs): try: return pd.io.json.ujson_loads(*args, **kwargs) except AttributeError: # Before pandas-2.2.0, ujson_loads was renamed to loads: import ujson_loads as loads return pd.io.json.loads(*args, **kwargs) def pandas_read_json(path_or_buf, **kwargs): if datasets.config.PANDAS_VERSION.major >= 2: kwargs["dtype_backend"] = "pyarrow" return pd.read_json(path_or_buf, **kwargs) @dataclass class JsonConfig(datasets.BuilderConfig): """BuilderConfig for JSON.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" encoding_errors: Optional[str] = None field: Optional[str] = None use_threads: bool = True # deprecated block_size: Optional[int] = None # deprecated chunksize: int = 10 << 20 # 10MB newlines_in_values: Optional[bool] = None def __post_init__(self): super().__post_init__() class Json(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = JsonConfig def _info(self): if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead") self.config.chunksize = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported") return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): type = self.config.features.arrow_schema.field(column_name).type pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type)) # convert to string when needed for i, column_name in enumerate(pa_table.column_names): if pa.types.is_struct(pa_table[column_name].type) and self.config.features.get( column_name, None ) == datasets.Value("string"): jsonl = ( pa_table[column_name] .to_pandas(types_mapper=pd.ArrowDtype) .to_json(orient="records", lines=True) ) string_array = pa.array( ("{" + x.rstrip() for x in ("\n" + jsonl).split("\n{") if x), type=pa.string() ) pa_table = pa_table.set_column(i, column_name, string_array) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # If the file is one json object and if we need to look at the items in one specific field if self.config.field is not None: with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: dataset = ujson_loads(f.read()) # We keep only the field we are interested in dataset = dataset[self.config.field] df = pandas_read_json(io.StringIO(ujson_dumps(dataset))) if df.columns.tolist() == [0]: df.columns = list(self.config.features) if self.config.features else ["text"] pa_table = pa.Table.from_pandas(df, preserve_index=False) yield file_idx, self._cast_table(pa_table) # If the file has one json object per line else: with open(file, "rb") as f: batch_idx = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small block_size = max(self.config.chunksize // 32, 16 << 10) encoding_errors = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: batch = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(f) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8") try: while True: try: pa_table = paj.read_json( io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(e, pa.ArrowInvalid) and "straddling" not in str(e) or block_size > len(batch) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( file, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f: df = pandas_read_json(f) except ValueError: logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}") raise e if df.columns.tolist() == [0]: df.columns = list(self.config.features) if self.config.features else ["text"] try: pa_table = pa.Table.from_pandas(df, preserve_index=False) except pa.ArrowInvalid as e: logger.error( f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}" ) raise ValueError( f"Failed to convert pandas DataFrame to Arrow Table from file {file}." ) from None yield file_idx, self._cast_table(pa_table) break yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1
datasets/src/datasets/packaged_modules/json/json.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/json/json.py", "repo_id": "datasets", "token_count": 4992 }
107
# # Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved. # This file coems from the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). # """ Binary tensor encodings for PyTorch and NumPy. This defines efficient binary encodings for tensors. The format is 8 byte aligned and can be used directly for computations when transmitted, say, via RDMA. The format is supported by WebDataset with the `.ten` filename extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used for fast tensor storage with LMDB and in disk files (which can be memory mapped) Data is encoded as a series of chunks: - magic number (int64) - length in bytes (int64) - bytes (multiple of 64 bytes long) Arrays are a header chunk followed by a data chunk. Header chunks have the following structure: - dtype (int64) - 8 byte array name - ndim (int64) - dim[0] - dim[1] - ... """ import struct import sys import numpy as np def bytelen(a): """Determine the length of a in bytes.""" if hasattr(a, "nbytes"): return a.nbytes elif isinstance(a, (bytearray, bytes)): return len(a) else: raise ValueError(a, "cannot determine nbytes") def bytedata(a): """Return a the raw data corresponding to a.""" if isinstance(a, (bytearray, bytes, memoryview)): return a elif hasattr(a, "data"): return a.data else: raise ValueError(a, "cannot return bytedata") # tables for converting between long/short NumPy dtypes long_to_short = """ float16 f2 float32 f4 float64 f8 int8 i1 int16 i2 int32 i4 int64 i8 uint8 u1 uint16 u2 unit32 u4 uint64 u8 """.strip() long_to_short = [x.split() for x in long_to_short.split("\n")] long_to_short = {x[0]: x[1] for x in long_to_short} short_to_long = {v: k for k, v in long_to_short.items()} def check_acceptable_input_type(data, allow64): """Check that the data has an acceptable type for tensor encoding. :param data: array :param allow64: allow 64 bit types """ for a in data: if a.dtype.name not in long_to_short: raise ValueError("unsupported dataypte") if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]: raise ValueError("64 bit datatypes not allowed unless explicitly enabled") def str64(s): """Convert a string to an int64.""" s = s + "\0" * (8 - len(s)) s = s.encode("ascii") return struct.unpack("@q", s)[0] def unstr64(i): """Convert an int64 to a string.""" b = struct.pack("@q", i) return b.decode("ascii").strip("\0") def check_infos(data, infos, required_infos=None): """Verify the info strings.""" if required_infos is False or required_infos is None: return data if required_infos is True: return data, infos if not isinstance(required_infos, (tuple, list)): raise ValueError("required_infos must be tuple or list") for required, actual in zip(required_infos, infos): raise ValueError(f"actual info {actual} doesn't match required info {required}") return data def encode_header(a, info=""): """Encode an array header as a byte array.""" if a.ndim >= 10: raise ValueError("too many dimensions") if a.nbytes != np.prod(a.shape) * a.itemsize: raise ValueError("mismatch between size and shape") if a.dtype.name not in long_to_short: raise ValueError("unsupported array type") header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape) return bytedata(np.array(header, dtype="i8")) def decode_header(h): """Decode a byte array into an array header.""" h = np.frombuffer(h, dtype="i8") if unstr64(h[0]) not in short_to_long: raise ValueError("unsupported array type") dtype = np.dtype(short_to_long[unstr64(h[0])]) info = unstr64(h[1]) rank = int(h[2]) shape = tuple(h[3 : 3 + rank]) return shape, dtype, info def encode_list(l, infos=None): # noqa: E741 """Given a list of arrays, encode them into a list of byte arrays.""" if infos is None: infos = [""] else: if len(l) != len(infos): raise ValueError(f"length of list {l} must muatch length of infos {infos}") result = [] for i, a in enumerate(l): header = encode_header(a, infos[i % len(infos)]) result += [header, bytedata(a)] return result def decode_list(l, infos=False): # noqa: E741 """Given a list of byte arrays, decode them into arrays.""" result = [] infos0 = [] for header, data in zip(l[::2], l[1::2]): shape, dtype, info = decode_header(header) a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape) result += [a] infos0 += [info] return check_infos(result, infos0, infos) magic_str = "~TenBin~" magic = str64(magic_str) magic_bytes = unstr64(magic).encode("ascii") def roundup(n, k=64): """Round up to the next multiple of 64.""" return k * ((n + k - 1) // k) def encode_chunks(l): # noqa: E741 """Encode a list of chunks into a single byte array, with lengths and magics..""" size = sum(16 + roundup(b.nbytes) for b in l) result = bytearray(size) offset = 0 for b in l: result[offset : offset + 8] = magic_bytes offset += 8 result[offset : offset + 8] = struct.pack("@q", b.nbytes) offset += 8 result[offset : offset + bytelen(b)] = b offset += roundup(bytelen(b)) return result def decode_chunks(buf): """Decode a byte array into a list of chunks.""" result = [] offset = 0 total = bytelen(buf) while offset < total: if magic_bytes != buf[offset : offset + 8]: raise ValueError("magic bytes mismatch") offset += 8 nbytes = struct.unpack("@q", buf[offset : offset + 8])[0] offset += 8 b = buf[offset : offset + nbytes] offset += roundup(nbytes) result.append(b) return result def encode_buffer(l, infos=None): # noqa: E741 """Encode a list of arrays into a single byte array.""" if not isinstance(l, list): raise ValueError("requires list") return encode_chunks(encode_list(l, infos=infos)) def decode_buffer(buf, infos=False): """Decode a byte array into a list of arrays.""" return decode_list(decode_chunks(buf), infos=infos) def write_chunk(stream, buf): """Write a byte chunk to the stream with magics, length, and padding.""" nbytes = bytelen(buf) stream.write(magic_bytes) stream.write(struct.pack("@q", nbytes)) stream.write(bytedata(buf)) padding = roundup(nbytes) - nbytes if padding > 0: stream.write(b"\0" * padding) def read_chunk(stream): """Read a byte chunk from a stream with magics, length, and padding.""" magic = stream.read(8) if magic == b"": return None if magic != magic_bytes: raise ValueError("magic number does not match") nbytes = stream.read(8) nbytes = struct.unpack("@q", nbytes)[0] if nbytes < 0: raise ValueError("negative nbytes") data = stream.read(nbytes) padding = roundup(nbytes) - nbytes if padding > 0: stream.read(padding) return data def write(stream, l, infos=None): # noqa: E741 """Write a list of arrays to a stream, with magics, length, and padding.""" for chunk in encode_list(l, infos=infos): write_chunk(stream, chunk) def read(stream, n=sys.maxsize, infos=False): """Read a list of arrays from a stream, with magics, length, and padding.""" chunks = [] for _ in range(n): header = read_chunk(stream) if header is None: break data = read_chunk(stream) if data is None: raise ValueError("premature EOF") chunks += [header, data] return decode_list(chunks, infos=infos) def save(fname, *args, infos=None, nocheck=False): """Save a list of arrays to a file, with magics, length, and padding.""" if not nocheck and not fname.endswith(".ten"): raise ValueError("file name should end in .ten") with open(fname, "wb") as stream: write(stream, args, infos=infos) def load(fname, infos=False, nocheck=False): """Read a list of arrays from a file, with magics, length, and padding.""" if not nocheck and not fname.endswith(".ten"): raise ValueError("file name should end in .ten") with open(fname, "rb") as stream: return read(stream, infos=infos)
datasets/src/datasets/packaged_modules/webdataset/_tenbin.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/webdataset/_tenbin.py", "repo_id": "datasets", "token_count": 3409 }
108
"""Contains utilities to flag a feature as "experimental" in datasets.""" import warnings from functools import wraps from typing import Callable def experimental(fn: Callable) -> Callable: """Decorator to flag a feature as experimental. An experimental feature trigger a warning when used as it might be subject to breaking changes in the future. Args: fn (`Callable`): The function to flag as experimental. Returns: `Callable`: The decorated function. Example: ```python >>> from datasets.utils import experimental >>> @experimental ... def my_function(): ... print("Hello world!") >>> my_function() UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future. Hello world! ``` """ @wraps(fn) def _inner_fn(*args, **kwargs): warnings.warn( (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."), UserWarning, ) return fn(*args, **kwargs) return _inner_fn
datasets/src/datasets/utils/experimental.py/0
{ "file_path": "datasets/src/datasets/utils/experimental.py", "repo_id": "datasets", "token_count": 386 }
109
import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sharding without ambiguity for users lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)} if len(set(lists_lengths.values())) > 1: raise RuntimeError( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items()) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) max_length = max(lists_lengths.values(), default=0) return max(1, max_length) def _distribute_shards(num_shards: int, max_num_jobs: int) -> list[range]: """ Get the range of shard indices per job. If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard. The shards indices order is preserved: e.g. all the first shards are given the first job. Moreover all the jobs are given approximately the same number of shards. Example: ```python >>> _distribute_shards(2, max_num_jobs=4) [range(0, 1), range(1, 2)] >>> _distribute_shards(10, max_num_jobs=3) [range(0, 4), range(4, 7), range(7, 10)] ``` """ shards_indices_per_group = [] for group_idx in range(max_num_jobs): num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 shard_indices = range(start, start + num_shards_to_add) shards_indices_per_group.append(shard_indices) return shards_indices_per_group def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> list[dict]: """Split the gen_kwargs into `max_num_job` gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs) if num_shards == 1: return [dict(gen_kwargs)] else: shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(value, list) else value for key, value in gen_kwargs.items() } for group_idx in range(len(shard_indices_per_group)) ] def _merge_gen_kwargs(gen_kwargs_list: list[dict]) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key], list) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict: """Return a shuffled copy of the input gen_kwargs""" # We must shuffle all the lists, and lists of the same size must have the same shuffling. # This way entangled lists of (shard, shard_metadata) are still in the right order. # First, let's generate the shuffled indices per list size list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)} indices_per_size = {} for size in list_sizes: indices_per_size[size] = list(range(size)) rng.shuffle(indices_per_size[size]) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes shuffled_kwargs = dict(gen_kwargs) for key, value in shuffled_kwargs.items(): if isinstance(value, list): shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]] return shuffled_kwargs
datasets/src/datasets/utils/sharding.py/0
{ "file_path": "datasets/src/datasets/utils/sharding.py", "repo_id": "datasets", "token_count": 1690 }
110
from pathlib import Path import pytest from datasets import load_dataset from datasets.packaged_modules.cache.cache import Cache SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata" SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME = "hf-internal-testing/DatasetWithCapitalLetters" def test_cache(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache" ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) hash = Path(ds["train"].cache_files[0]["filename"]).parts[-2] cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash=hash) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) def test_cache_streaming(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_streaming" ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) hash = Path(ds["train"].cache_files[0]["filename"]).parts[-2] cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash=hash) reloaded = cache.as_streaming_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) def test_cache_auto_hash(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_auto_hash" ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto") reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) def test_cache_auto_hash_with_custom_config(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_auto_hash_with_custom_config" ds = load_dataset(str(text_dir), sample_by="paragraph", cache_dir=str(cache_dir)) another_ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) cache = Cache( cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto", sample_by="paragraph" ) another_cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto") assert cache.config_id.endswith("paragraph") assert not another_cache.config_id.endswith("paragraph") reloaded = cache.as_dataset() another_reloaded = another_cache.as_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) assert list(another_ds) == list(another_reloaded) assert list(another_ds["train"]) == list(another_reloaded["train"]) def test_cache_missing(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_missing" load_dataset(str(text_dir), cache_dir=str(cache_dir)) Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto").download_and_prepare() with pytest.raises(ValueError): Cache(cache_dir=str(cache_dir), dataset_name="missing", version="auto", hash="auto").download_and_prepare() with pytest.raises(ValueError): Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash="missing").download_and_prepare() with pytest.raises(ValueError): Cache( cache_dir=str(cache_dir), dataset_name=text_dir.name, config_name="missing", version="auto", hash="auto" ).download_and_prepare() @pytest.mark.integration def test_cache_multi_configs(tmp_path: Path): cache_dir = tmp_path / "test_cache_multi_configs" repo_id = SAMPLE_DATASET_TWO_CONFIG_IN_METADATA dataset_name = repo_id.split("/")[-1] config_name = "v1" ds = load_dataset(repo_id, config_name, cache_dir=str(cache_dir)) cache = Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, config_name=config_name, version="auto", hash="auto", ) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) with pytest.raises(ValueError) as excinfo: Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, config_name="missing", version="auto", hash="auto", ) assert config_name in str(excinfo.value) @pytest.mark.integration def test_cache_single_config(tmp_path: Path): cache_dir = tmp_path / "test_cache_single_config" repo_id = SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA dataset_name = repo_id.split("/")[-1] config_name = "custom" ds = load_dataset(repo_id, cache_dir=str(cache_dir)) cache = Cache(cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto") reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) cache = Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, config_name=config_name, repo_id=repo_id, version="auto", hash="auto", ) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) with pytest.raises(ValueError) as excinfo: Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, config_name="missing", version="auto", hash="auto", ) assert config_name in str(excinfo.value) @pytest.mark.integration def test_cache_capital_letters(tmp_path: Path): cache_dir = tmp_path / "test_cache_capital_letters" repo_id = SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME dataset_name = repo_id.split("/")[-1] ds = load_dataset(repo_id, cache_dir=str(cache_dir)) cache = Cache(cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto") reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) cache = Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto", ) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"])
datasets/tests/packaged_modules/test_cache.py/0
{ "file_path": "datasets/tests/packaged_modules/test_cache.py", "repo_id": "datasets", "token_count": 2721 }
111
import copy import os from pathlib import Path from typing import List from unittest.mock import patch import fsspec import pytest from fsspec.registry import _registry as _fsspec_registry from fsspec.spec import AbstractFileSystem from datasets.data_files import ( DataFilesDict, DataFilesList, DataFilesPatternsDict, DataFilesPatternsList, _get_data_files_patterns, _is_inside_unrequested_special_dir, _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir, get_data_patterns, resolve_pattern, ) from datasets.fingerprint import Hasher _TEST_PATTERNS = ["*", "**", "**/*", "*.txt", "data/*", "**/*.txt", "**/train.txt"] _FILES_TO_IGNORE = {".dummy", "README.md", "dummy_data.zip", "dataset_infos.json"} _DIRS_TO_IGNORE = {"data/.dummy_subdir", "__pycache__"} _TEST_PATTERNS_SIZES = { "*": 0, "**": 4, "**/*": 4, "*.txt": 0, "data/*": 2, "data/**": 4, "**/*.txt": 4, "**/train.txt": 2, } _TEST_URL = "https://raw.githubusercontent.com/huggingface/datasets/9675a5a1e7b99a86f9c250f6ea5fa5d1e6d5cc7d/setup.py" @pytest.fixture def complex_data_dir(tmp_path): data_dir = tmp_path / "complex_data_dir" data_dir.mkdir() (data_dir / "data").mkdir() with open(data_dir / "data" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "data" / "test.txt", "w") as f: f.write("bar\n" * 10) with open(data_dir / "README.md", "w") as f: f.write("This is a readme") with open(data_dir / ".dummy", "w") as f: f.write("this is a dummy file that is not a data file") (data_dir / "data" / "subdir").mkdir() with open(data_dir / "data" / "subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "data" / "subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) (data_dir / "data" / ".dummy_subdir").mkdir() with open(data_dir / "data" / ".dummy_subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "data" / ".dummy_subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) (data_dir / "__pycache__").mkdir() with open(data_dir / "__pycache__" / "script.py", "w") as f: f.write("foo\n" * 10) return str(data_dir) def is_relative_to(path, *other): # A built-in method in Python 3.9+ try: path.relative_to(*other) return True except ValueError: return False @pytest.fixture def pattern_results(complex_data_dir): # We use fsspec glob as a reference for data files resolution from patterns. # This is the same as dask for example. # # /!\ Here are some behaviors specific to fsspec glob that are different from glob.glob, Path.glob, Path.match or fnmatch: # - '*' matches only first level items # - '**' matches all items # - '**/*' matches all at least second level items # # More generally: # - '*' matches any character except a forward-slash (to match just the file or directory name) # - '**' matches any character including a forward-slash / return { pattern: sorted( Path(os.path.abspath(path)).as_posix() for path in fsspec.filesystem("file").glob(os.path.join(complex_data_dir, pattern)) if Path(path).name not in _FILES_TO_IGNORE and not any( is_relative_to(Path(path), os.path.join(complex_data_dir, dir_path)) for dir_path in _DIRS_TO_IGNORE ) and Path(path).is_file() ) for pattern in _TEST_PATTERNS } @pytest.fixture def hub_dataset_repo_path(tmpfs, complex_data_dir): for path in Path(complex_data_dir).rglob("*"): if path.is_file(): with tmpfs.open(path.relative_to(complex_data_dir).as_posix(), "wb") as f: f.write(path.read_bytes()) yield "tmp://" @pytest.fixture def hub_dataset_repo_patterns_results(hub_dataset_repo_path, complex_data_dir, pattern_results): return { pattern: [ hub_dataset_repo_path + Path(path).relative_to(complex_data_dir).as_posix() for path in pattern_results[pattern] ] for pattern in pattern_results } def test_is_inside_unrequested_special_dir(complex_data_dir, pattern_results): # usual patterns outside special dir work fine for pattern, result in pattern_results.items(): if result: matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir)) assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False # check behavior for special dir f = _is_inside_unrequested_special_dir assert f("__pycache__/b.txt", "**") is True assert f("__pycache__/b.txt", "*/b.txt") is True assert f("__pycache__/b.txt", "__pycache__/*") is False assert f("__pycache__/__b.txt", "__pycache__/*") is False assert f("__pycache__/__b.txt", "__*/*") is False assert f("__b.txt", "*") is False def test_is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(complex_data_dir, pattern_results): # usual patterns outside hidden dir work fine for pattern, result in pattern_results.items(): if result: matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir)) assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False # check behavior for hidden dir and file f = _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir assert f(".hidden_file.txt", "**") is True assert f(".hidden_file.txt", ".*") is False assert f(".hidden_dir/a.txt", "**") is True assert f(".hidden_dir/a.txt", ".*/*") is False assert f(".hidden_dir/a.txt", ".hidden_dir/*") is False assert f(".hidden_dir/.hidden_file.txt", "**") is True assert f(".hidden_dir/.hidden_file.txt", ".*/*") is True assert f(".hidden_dir/.hidden_file.txt", ".*/.*") is False assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/*") is True assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*") is False @pytest.mark.parametrize("pattern", _TEST_PATTERNS) def test_pattern_results_fixture(pattern_results, pattern): assert len(pattern_results[pattern]) == _TEST_PATTERNS_SIZES[pattern] assert all(Path(path).is_file() for path in pattern_results[pattern]) @pytest.mark.parametrize("pattern", _TEST_PATTERNS) def test_resolve_pattern_locally(complex_data_dir, pattern, pattern_results): try: resolved_data_files = resolve_pattern(pattern, complex_data_dir) assert sorted(str(f) for f in resolved_data_files) == pattern_results[pattern] except FileNotFoundError: assert len(pattern_results[pattern]) == 0 def test_resolve_pattern_locally_with_dot_in_base_path(complex_data_dir): base_path_with_dot = os.path.join(complex_data_dir, "data", ".dummy_subdir") resolved_data_files = resolve_pattern(os.path.join(base_path_with_dot, "train.txt"), base_path_with_dot) assert len(resolved_data_files) == 1 def test_resolve_pattern_locally_with_absolute_path(tmp_path, complex_data_dir): abs_path = os.path.join(complex_data_dir, "data", "train.txt") resolved_data_files = resolve_pattern(abs_path, str(tmp_path / "blabla")) assert len(resolved_data_files) == 1 def test_resolve_pattern_locally_with_double_dots(tmp_path, complex_data_dir): path_with_double_dots = os.path.join(complex_data_dir, "data", "subdir", "..", "train.txt") resolved_data_files = resolve_pattern(path_with_double_dots, str(tmp_path / "blabla")) assert len(resolved_data_files) == 1 def test_resolve_pattern_locally_returns_hidden_file_only_if_requested(complex_data_dir): with pytest.raises(FileNotFoundError): resolve_pattern("*dummy", complex_data_dir) resolved_data_files = resolve_pattern(".dummy", complex_data_dir) assert len(resolved_data_files) == 1 def test_resolve_pattern_locally_hidden_base_path(tmp_path): hidden = tmp_path / ".test_hidden_base_path" hidden.mkdir() (tmp_path / ".test_hidden_base_path" / "a.txt").touch() resolved_data_files = resolve_pattern("*", str(hidden)) assert len(resolved_data_files) == 1 def test_resolve_pattern_locallyreturns_hidden_dir_only_if_requested(complex_data_dir): with pytest.raises(FileNotFoundError): resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir) resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir) assert len(resolved_data_files) == 1 resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir) assert len(resolved_data_files) == 1 def test_resolve_pattern_locally_returns_special_dir_only_if_requested(complex_data_dir): with pytest.raises(FileNotFoundError): resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir) resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir) assert len(resolved_data_files) == 1 resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir) assert len(resolved_data_files) == 1 def test_resolve_pattern_locally_special_base_path(tmp_path): special = tmp_path / "__test_special_base_path__" special.mkdir() (tmp_path / "__test_special_base_path__" / "a.txt").touch() resolved_data_files = resolve_pattern("*", str(special)) assert len(resolved_data_files) == 1 @pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])]) def test_resolve_pattern_locally_with_extensions(complex_data_dir, pattern, size, extensions): if size > 0: resolved_data_files = resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions) assert len(resolved_data_files) == size else: with pytest.raises(FileNotFoundError): resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions) def test_fail_resolve_pattern_locally(complex_data_dir): with pytest.raises(FileNotFoundError): resolve_pattern(complex_data_dir, ["blablabla"]) @pytest.mark.skipif(os.name == "nt", reason="Windows does not support symlinks in the default mode") def test_resolve_pattern_locally_does_not_resolve_symbolic_links(tmp_path, complex_data_dir): (tmp_path / "train_data_symlink.txt").symlink_to(os.path.join(complex_data_dir, "data", "train.txt")) resolved_data_files = resolve_pattern("train_data_symlink.txt", str(tmp_path)) assert len(resolved_data_files) == 1 assert Path(resolved_data_files[0]) == tmp_path / "train_data_symlink.txt" def test_resolve_pattern_locally_sorted_files(tmp_path_factory): path = str(tmp_path_factory.mktemp("unsorted_text_files")) unsorted_names = ["0.txt", "2.txt", "3.txt"] for name in unsorted_names: with open(os.path.join(path, name), "w"): pass resolved_data_files = resolve_pattern("*", path) resolved_names = [os.path.basename(data_file) for data_file in resolved_data_files] assert resolved_names == sorted(unsorted_names) @pytest.mark.parametrize("pattern", _TEST_PATTERNS) def test_resolve_pattern_in_dataset_repository(hub_dataset_repo_path, pattern, hub_dataset_repo_patterns_results): try: resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path) assert sorted(str(f) for f in resolved_data_files) == hub_dataset_repo_patterns_results[pattern] except FileNotFoundError: assert len(hub_dataset_repo_patterns_results[pattern]) == 0 @pytest.mark.parametrize( "pattern,size,base_path", [("**", 4, None), ("**", 4, "data"), ("**", 2, "data/subdir"), ("**", 0, "data/subdir2")] ) def test_resolve_pattern_in_dataset_repository_with_base_path(hub_dataset_repo_path, pattern, size, base_path): base_path = hub_dataset_repo_path + (base_path or "") if size > 0: resolved_data_files = resolve_pattern(pattern, base_path) assert len(resolved_data_files) == size else: with pytest.raises(FileNotFoundError): resolve_pattern(pattern, base_path) @pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])]) def test_resolve_pattern_in_dataset_repository_with_extensions(hub_dataset_repo_path, pattern, size, extensions): if size > 0: resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions) assert len(resolved_data_files) == size else: with pytest.raises(FileNotFoundError): resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions) def test_fail_resolve_pattern_in_dataset_repository(hub_dataset_repo_path): with pytest.raises(FileNotFoundError): resolve_pattern("blablabla", hub_dataset_repo_path) def test_resolve_pattern_in_dataset_repository_returns_hidden_file_only_if_requested(hub_dataset_repo_path): with pytest.raises(FileNotFoundError): resolve_pattern("*dummy", hub_dataset_repo_path) resolved_data_files = resolve_pattern(".dummy", hub_dataset_repo_path) assert len(resolved_data_files) == 1 def test_resolve_pattern_in_dataset_repository_hidden_base_path(tmpfs): tmpfs.touch(".hidden/a.txt") resolved_data_files = resolve_pattern("*", base_path="tmp://.hidden") assert len(resolved_data_files) == 1 def test_resolve_pattern_in_dataset_repository_returns_hidden_dir_only_if_requested(hub_dataset_repo_path): with pytest.raises(FileNotFoundError): resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path) resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path) assert len(resolved_data_files) == 1 resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path) assert len(resolved_data_files) == 1 def test_resolve_pattern_in_dataset_repository_returns_special_dir_only_if_requested(hub_dataset_repo_path): with pytest.raises(FileNotFoundError): resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path) resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path) assert len(resolved_data_files) == 1 resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path) assert len(resolved_data_files) == 1 def test_resolve_pattern_in_dataset_repository_special_base_path(tmpfs): tmpfs.touch("__special__/a.txt") resolved_data_files = resolve_pattern("*", base_path="tmp://__special__") assert len(resolved_data_files) == 1 @pytest.fixture def dummy_fs(): DummyTestFS = mock_fs(["train.txt", "test.txt"]) _fsspec_registry["mock"] = DummyTestFS _fsspec_registry["dummy"] = DummyTestFS yield del _fsspec_registry["mock"] del _fsspec_registry["dummy"] def test_resolve_pattern_fs(dummy_fs): resolved_data_files = resolve_pattern("mock://train.txt", base_path="") assert resolved_data_files == ["mock://train.txt"] @pytest.mark.parametrize("pattern", _TEST_PATTERNS) def test_DataFilesList_from_patterns_in_dataset_repository_( hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern ): try: data_files_list = DataFilesList.from_patterns([pattern], hub_dataset_repo_path) assert sorted(data_files_list) == hub_dataset_repo_patterns_results[pattern] assert len(data_files_list.origin_metadata) == len(data_files_list) except FileNotFoundError: assert len(hub_dataset_repo_patterns_results[pattern]) == 0 def test_DataFilesList_from_patterns_locally_with_extra_files(complex_data_dir, text_file): data_files_list = DataFilesList.from_patterns([_TEST_URL, text_file.as_posix()], complex_data_dir) assert list(data_files_list) == [_TEST_URL, text_file.as_posix()] assert len(data_files_list.origin_metadata) == 2 def test_DataFilesList_from_patterns_raises_FileNotFoundError(complex_data_dir): with pytest.raises(FileNotFoundError): DataFilesList.from_patterns(["file_that_doesnt_exist.txt"], complex_data_dir) class TestDataFilesDict: def test_key_order_after_copy(self): data_files = DataFilesDict({"train": "train.csv", "test": "test.csv"}) copied_data_files = copy.deepcopy(data_files) assert list(copied_data_files.keys()) == list(data_files.keys()) # test split order with list() @pytest.mark.parametrize("pattern", _TEST_PATTERNS) def test_DataFilesDict_from_patterns_in_dataset_repository( hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern ): split_name = "train" try: data_files = DataFilesDict.from_patterns({split_name: [pattern]}, hub_dataset_repo_path) assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values()) assert sorted(data_files[split_name]) == hub_dataset_repo_patterns_results[pattern] except FileNotFoundError: assert len(hub_dataset_repo_patterns_results[pattern]) == 0 @pytest.mark.parametrize( "pattern,size,base_path,split_name", [ ("**", 4, None, "train"), ("**", 4, "data", "train"), ("**", 2, "data/subdir", "train"), ("**", 0, "data/subdir2", "train"), ], ) def test_DataFilesDict_from_patterns_in_dataset_repository_with_base_path( hub_dataset_repo_path, pattern, size, base_path, split_name ): base_path = hub_dataset_repo_path + (base_path or "") if size > 0: data_files = DataFilesDict.from_patterns({split_name: [pattern]}, base_path=base_path) assert len(data_files[split_name]) == size else: with pytest.raises(FileNotFoundError): resolve_pattern(pattern, base_path) @pytest.mark.parametrize("pattern", _TEST_PATTERNS) def test_DataFilesDict_from_patterns_locally(complex_data_dir, pattern_results, pattern): split_name = "train" try: data_files = DataFilesDict.from_patterns({split_name: [pattern]}, complex_data_dir) assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values()) assert sorted(data_files[split_name]) == pattern_results[pattern] except FileNotFoundError: assert len(pattern_results[pattern]) == 0 def test_DataFilesDict_from_patterns_in_dataset_repository_hashing(hub_dataset_repo_path): patterns = {"train": ["**/train.txt"], "test": ["**/test.txt"]} data_files1 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path) data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path) assert Hasher.hash(data_files1) == Hasher.hash(data_files2) data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True)) assert Hasher.hash(data_files1) == Hasher.hash(data_files2) # the tmpfs used to mock the hub repo is based on a local directory # therefore os.stat is used to get the mtime of the data files with patch("os.stat", return_value=os.stat(__file__)): data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path) assert Hasher.hash(data_files1) != Hasher.hash(data_files2) def test_DataFilesDict_from_patterns_locally_or_remote_hashing(text_file): patterns = {"train": [_TEST_URL], "test": [str(text_file)]} data_files1 = DataFilesDict.from_patterns(patterns) data_files2 = DataFilesDict.from_patterns(patterns) assert Hasher.hash(data_files1) == Hasher.hash(data_files2) data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True)) assert Hasher.hash(data_files1) == Hasher.hash(data_files2) patterns2 = {"train": [_TEST_URL], "test": [_TEST_URL]} data_files2 = DataFilesDict.from_patterns(patterns2) assert Hasher.hash(data_files1) != Hasher.hash(data_files2) with patch("fsspec.implementations.http._file_info", return_value={}): data_files2 = DataFilesDict.from_patterns(patterns) assert Hasher.hash(data_files1) != Hasher.hash(data_files2) with patch("os.stat", return_value=os.stat(__file__)): data_files2 = DataFilesDict.from_patterns(patterns) assert Hasher.hash(data_files1) != Hasher.hash(data_files2) def test_DataFilesPatternsList(text_file): data_files_patterns = DataFilesPatternsList([str(text_file)], allowed_extensions=[None]) data_files = data_files_patterns.resolve(base_path="") assert data_files == [text_file.as_posix()] assert isinstance(data_files, DataFilesList) data_files_patterns = DataFilesPatternsList([str(text_file)], allowed_extensions=[[".txt"]]) data_files = data_files_patterns.resolve(base_path="") assert data_files == [text_file.as_posix()] assert isinstance(data_files, DataFilesList) data_files_patterns = DataFilesPatternsList([str(text_file).replace(".txt", ".tx*")], allowed_extensions=[None]) data_files = data_files_patterns.resolve(base_path="") assert data_files == [text_file.as_posix()] assert isinstance(data_files, DataFilesList) data_files_patterns = DataFilesPatternsList([Path(text_file).name], allowed_extensions=[None]) data_files = data_files_patterns.resolve(base_path=str(Path(text_file).parent)) assert data_files == [text_file.as_posix()] data_files_patterns = DataFilesPatternsList([str(text_file)], allowed_extensions=[[".zip"]]) with pytest.raises(FileNotFoundError): data_files_patterns.resolve(base_path="") def test_DataFilesPatternsDict(text_file): data_files_patterns_dict = DataFilesPatternsDict( {"train": DataFilesPatternsList([str(text_file)], allowed_extensions=[None])} ) data_files_dict = data_files_patterns_dict.resolve(base_path="") assert data_files_dict == {"train": [text_file.as_posix()]} assert isinstance(data_files_dict, DataFilesDict) assert isinstance(data_files_dict["train"], DataFilesList) def mock_fs(file_paths: List[str]): """ Set up a mock filesystem for fsspec containing the provided files Example: ```py >>> DummyTestFS = mock_fs(["data/train.txt", "data.test.txt"]) >>> fs = DummyTestFS() >>> assert fsspec.get_filesystem_class("mock").__name__ == "DummyTestFS" >>> assert type(fs).__name__ == "DummyTestFS" >>> print(fs.glob("**")) ["data", "data/train.txt", "data.test.txt"] ``` """ file_paths = [file_path.split("://")[-1] for file_path in file_paths] dir_paths = { "/".join(file_path.split("/")[: i + 1]) for file_path in file_paths for i in range(file_path.count("/")) } fs_contents = [{"name": dir_path, "type": "directory"} for dir_path in dir_paths] + [ {"name": file_path, "type": "file", "size": 10} for file_path in file_paths ] class DummyTestFS(AbstractFileSystem): protocol = ("mock", "dummy") _fs_contents = fs_contents def ls(self, path, detail=True, refresh=True, **kwargs): if kwargs.pop("strip_proto", True): path = self._strip_protocol(path) files = not refresh and self._ls_from_cache(path) if not files: files = [file for file in self._fs_contents if path == self._parent(file["name"])] files.sort(key=lambda file: file["name"]) self.dircache[path.rstrip("/")] = files if detail: return files return [file["name"] for file in files] return DummyTestFS @pytest.mark.parametrize("base_path", ["", "mock://", "my_dir"]) @pytest.mark.parametrize( "data_file_per_split", [ # === Main cases === # file named after split at the root {"train": "train.txt", "validation": "valid.txt", "test": "test.txt"}, # file named after split in a directory { "train": "data/train.txt", "validation": "data/valid.txt", "test": "data/test.txt", }, # directory named after split { "train": "train/split.txt", "validation": "valid/split.txt", "test": "test/split.txt", }, # sharded splits { "train": [f"data/train_{i}.txt" for i in range(3)], "validation": [f"data/validation_{i}.txt" for i in range(3)], "test": [f"data/test_{i}.txt" for i in range(3)], }, # sharded splits with standard format (+ custom split name) { "train": [f"data/train-0000{i}-of-00003.txt" for i in range(3)], "validation": [f"data/validation-0000{i}-of-00003.txt" for i in range(3)], "test": [f"data/test-0000{i}-of-00003.txt" for i in range(3)], "random": [f"data/random-0000{i}-of-00003.txt" for i in range(3)], }, # === Secondary cases === # Default to train split {"train": "dataset.txt"}, {"train": "data/dataset.txt"}, {"train": ["data/image.jpg", "metadata.jsonl"]}, {"train": ["data/image.jpg", "metadata.csv"]}, # With prefix or suffix in directory or file names {"train": "my_train_dir/dataset.txt"}, {"train": "data/my_train_file.txt"}, {"test": "my_test_dir/dataset.txt"}, {"test": "data/my_test_file.txt"}, {"validation": "my_validation_dir/dataset.txt"}, {"validation": "data/my_validation_file.txt"}, {"train": "train_dir/dataset.txt"}, {"train": "data/train_file.txt"}, {"test": "test_dir/dataset.txt"}, {"test": "data/test_file.txt"}, {"validation": "validation_dir/dataset.txt"}, {"validation": "data/validation_file.txt"}, {"train": "my_train/dataset.txt"}, {"train": "data/my_train.txt"}, {"test": "my_test/dataset.txt"}, {"test": "data/my_test.txt"}, {"validation": "my_validation/dataset.txt"}, {"validation": "data/my_validation.txt"}, # With test<>eval aliases {"test": "eval.txt"}, {"test": "data/eval.txt"}, {"test": "eval/dataset.txt"}, # With valid<>dev aliases {"validation": "dev.txt"}, {"validation": "data/dev.txt"}, {"validation": "dev/dataset.txt"}, # With valid<>val aliases {"validation": "val.txt"}, {"validation": "data/val.txt"}, # With other extensions {"train": "train.parquet", "validation": "valid.parquet", "test": "test.parquet"}, # With "dev" or "eval" without separators {"train": "developers_list.txt"}, {"train": "data/seqeval_results.txt"}, {"train": "contest.txt"}, # With supported separators {"test": "my.test.file.txt"}, {"test": "my-test-file.txt"}, {"test": "my_test_file.txt"}, {"test": "my test file.txt"}, {"test": "my-test_file.txt"}, {"test": "test00001.txt"}, # <split>.<split> case {"test": "test/train.txt"}, ], ) def test_get_data_files_patterns(base_path, data_file_per_split): data_file_per_split = {k: v if isinstance(v, list) else [v] for k, v in data_file_per_split.items()} data_file_per_split = { split: [ base_path + ("/" if base_path and base_path[-1] != "/" else "") + file_path for file_path in data_file_per_split[split] ] for split in data_file_per_split } file_paths = sum(data_file_per_split.values(), []) DummyTestFS = mock_fs(file_paths) fs = DummyTestFS() def resolver(pattern): pattern = base_path + ("/" if base_path and base_path[-1] != "/" else "") + pattern return [ file_path[len(fs._strip_protocol(base_path)) :].lstrip("/") for file_path in fs.glob(pattern) if fs.isfile(file_path) ] patterns_per_split = _get_data_files_patterns(resolver) assert list(patterns_per_split.keys()) == list(data_file_per_split.keys()) # Test split order with list() for split, patterns in patterns_per_split.items(): matched = [file_path for pattern in patterns for file_path in resolver(pattern)] expected = [ fs._strip_protocol(file_path)[len(fs._strip_protocol(base_path)) :].lstrip("/") for file_path in data_file_per_split[split] ] assert matched == expected def test_get_data_patterns_from_directory_with_the_word_data_twice(tmp_path): repo_dir = tmp_path / "directory-name-ending-with-the-word-data" # parent directory contains the word "data/" data_dir = repo_dir / "data" data_dir.mkdir(parents=True) data_file = data_dir / "train-00001-of-00009.parquet" data_file.touch() data_file_patterns = get_data_patterns(repo_dir.as_posix()) assert data_file_patterns == {"train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"]}
datasets/tests/test_data_files.py/0
{ "file_path": "datasets/tests/test_data_files.py", "repo_id": "datasets", "token_count": 12037 }
112
import pytest from datasets.exceptions import DatasetNotFoundError from datasets.inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names, ) pytestmark = pytest.mark.integration @pytest.mark.parametrize( "path, config_name, expected_splits", [ ("rajpurkar/squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "default", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ], ) def test_get_dataset_config_info(path, config_name, expected_splits): info = get_dataset_config_info(path, config_name=config_name) assert info.config_name == config_name assert list(info.splits.keys()) == expected_splits def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data): info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token) assert list(info.splits.keys()) == ["train"] @pytest.mark.parametrize( "path, config_name, expected_exception", [ ("paws", None, ValueError), # non-existing, gated, private: ("hf-internal-testing/non-existing-dataset", "default", DatasetNotFoundError), ("hf-internal-testing/gated_dataset_with_data_files", "default", DatasetNotFoundError), ("hf-internal-testing/private_dataset_with_data_files", "default", DatasetNotFoundError), ("hf-internal-testing/gated_dataset_with_data_files", "default", DatasetNotFoundError), ("hf-internal-testing/private_dataset_with_data_files", "default", DatasetNotFoundError), ], ) def test_get_dataset_config_info_raises(path, config_name, expected_exception): with pytest.raises(expected_exception): get_dataset_config_info(path, config_name=config_name) @pytest.mark.parametrize( "path, expected", [ ("amirveyseh/acronym_identification", ["default"]), ("rajpurkar/squad", ["plain_text"]), ("dalle-mini/wit", ["default"]), ("hf-internal-testing/librispeech_asr_dummy", ["clean"]), ("hf-internal-testing/audiofolder_no_configs_in_metadata", ["default"]), ("hf-internal-testing/audiofolder_single_config_in_metadata", ["custom"]), ("hf-internal-testing/audiofolder_two_configs_in_metadata", ["v1", "v2"]), ], ) def test_get_dataset_config_names(path, expected): config_names = get_dataset_config_names(path) assert config_names == expected @pytest.mark.parametrize( "path, expected", [ ("amirveyseh/acronym_identification", "default"), ("rajpurkar/squad", "plain_text"), ("dalle-mini/wit", "default"), ("hf-internal-testing/librispeech_asr_dummy", "clean"), ("hf-internal-testing/audiofolder_no_configs_in_metadata", "default"), ("hf-internal-testing/audiofolder_single_config_in_metadata", "custom"), ("hf-internal-testing/audiofolder_two_configs_in_metadata", None), ], ) def test_get_dataset_default_config_name(path, expected): default_config_name = get_dataset_default_config_name(path) if expected: assert default_config_name == expected else: assert default_config_name is None @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config", [ ("rajpurkar/squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["default"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ], ) def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config): infos = get_dataset_infos(path) assert list(infos.keys()) == expected_configs expected_config = expected_configs[0] assert expected_config in infos info = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys()) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits", [ ("rajpurkar/squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "default", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ], ) def test_get_dataset_split_names(path, expected_config, expected_splits): infos = get_dataset_infos(path) assert expected_config in infos info = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys()) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception", [ ("paws", None, ValueError), ], ) def test_get_dataset_split_names_error(path, config_name, expected_exception): with pytest.raises(expected_exception): get_dataset_split_names(path, config_name=config_name)
datasets/tests/test_inspect.py/0
{ "file_path": "datasets/tests/test_inspect.py", "repo_id": "datasets", "token_count": 1987 }
113
import asyncio import importlib.metadata import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False) _run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True) _run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression require_lz4 = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") require_py7zr = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") require_zstandard = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio require_sndfile = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Dill-cloudpickle compatibility require_dill_gt_0_3_2 = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows require_not_windows = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) require_faiss = pytest.mark.skipif(find_spec("faiss") is None or sys.platform == "win32", reason="test requires faiss") require_moto = pytest.mark.skipif(find_spec("moto") is None, reason="test requires moto") require_numpy1_on_windows = pytest.mark.skipif( version.parse(importlib.metadata.version("numpy")) >= version.parse("2.0.0") and sys.platform == "win32", reason="test requires numpy < 2.0 on windows", ) def require_regex(test_case): """ Decorator marking a test that requires regex. These tests are skipped when Regex isn't installed. """ try: import regex # noqa except ImportError: test_case = unittest.skip("test requires regex")(test_case) return test_case def require_elasticsearch(test_case): """ Decorator marking a test that requires ElasticSearch. These tests are skipped when ElasticSearch isn't installed. """ try: import elasticsearch # noqa except ImportError: test_case = unittest.skip("test requires elasticsearch")(test_case) return test_case def require_sqlalchemy(test_case): """ Decorator marking a test that requires SQLAlchemy. These tests are skipped when SQLAlchemy isn't installed. """ try: import sqlalchemy # noqa except ImportError: test_case = unittest.skip("test requires sqlalchemy")(test_case) return test_case def require_torch(test_case): """ Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. """ if not config.TORCH_AVAILABLE: test_case = unittest.skip("test requires PyTorch")(test_case) return test_case def require_polars(test_case): """ Decorator marking a test that requires Polars. These tests are skipped when Polars isn't installed. """ if not config.POLARS_AVAILABLE: test_case = unittest.skip("test requires Polars")(test_case) return test_case def require_tf(test_case): """ Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed. """ if not config.TF_AVAILABLE: test_case = unittest.skip("test requires TensorFlow")(test_case) return test_case def require_jax(test_case): """ Decorator marking a test that requires JAX. These tests are skipped when JAX isn't installed. """ if not config.JAX_AVAILABLE: test_case = unittest.skip("test requires JAX")(test_case) return test_case def require_pil(test_case): """ Decorator marking a test that requires Pillow. These tests are skipped when Pillow isn't installed. """ if not config.PIL_AVAILABLE: test_case = unittest.skip("test requires Pillow")(test_case) return test_case def require_torchvision(test_case): """ Decorator marking a test that requires torchvision. These tests are skipped when torchvision isn't installed. """ if not config.TORCHVISION_AVAILABLE: test_case = unittest.skip("test requires torchvision")(test_case) return test_case def require_torchcodec(test_case): """ Decorator marking a test that requires torchcodec. These tests are skipped when torchcodec isn't installed. """ if not config.TORCHCODEC_AVAILABLE: test_case = unittest.skip("test requires torchcodec")(test_case) return test_case def require_pdfplumber(test_case): """ Decorator marking a test that requires pdfplumber. These tests are skipped when decord isn't installed. """ if not config.PDFPLUMBER_AVAILABLE: test_case = unittest.skip("test requires pdfplumber")(test_case) return test_case def require_transformers(test_case): """ Decorator marking a test that requires transformers. These tests are skipped when transformers isn't installed. """ try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(test_case) else: return test_case def require_tiktoken(test_case): """ Decorator marking a test that requires tiktoken. These tests are skipped when transformers isn't installed. """ try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(test_case) else: return test_case def require_spacy(test_case): """ Decorator marking a test that requires spacy. These tests are skipped when they aren't installed. """ try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(test_case) else: return test_case def require_pyspark(test_case): """ Decorator marking a test that requires pyspark. These tests are skipped when pyspark isn't installed. """ try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(test_case) else: return test_case def require_joblibspark(test_case): """ Decorator marking a test that requires joblibspark. These tests are skipped when pyspark isn't installed. """ try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(test_case) else: return test_case def require_torchdata_stateful_dataloader(test_case): """ Decorator marking a test that requires torchdata.stateful_dataloader. These tests are skipped when torchdata with stateful_dataloader module isn't installed. """ try: import torchdata.stateful_dataloader # noqa F401 except (ImportError, AssertionError): return unittest.skip("test requires torchdata.stateful_dataloader")(test_case) else: return test_case def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if not _run_slow_tests or _run_slow_tests == 0: test_case = unittest.skip("test is slow")(test_case) return test_case def local(test_case): """ Decorator marking a test as local Local tests are run by default. Set the RUN_LOCAL environment variable to a falsy value to not run them. """ if not _run_local_tests or _run_local_tests == 0: test_case = unittest.skip("test is local")(test_case) return test_case def packaged(test_case): """ Decorator marking a test as packaged Packaged tests are run by default. Set the RUN_PACKAGED environment variable to a falsy value to not run them. """ if not _run_packaged_tests or _run_packaged_tests == 0: test_case = unittest.skip("test is packaged")(test_case) return test_case def remote(test_case): """ Decorator marking a test as one that relies on GitHub or the Hugging Face Hub. Remote tests are skipped by default. Set the RUN_REMOTE environment variable to a falsy value to not run them. """ if not _run_remote_tests or _run_remote_tests == 0: test_case = unittest.skip("test requires remote")(test_case) return test_case def for_all_test_methods(*decorators): def decorate(cls): for name, fn in cls.__dict__.items(): if callable(fn) and name.startswith("test"): for decorator in decorators: fn = decorator(fn) setattr(cls, name, fn) return cls return decorate class RequestWouldHangIndefinitelyError(Exception): pass class OfflineSimulationMode(Enum): CONNECTION_FAILS = 0 CONNECTION_TIMES_OUT = 1 HF_HUB_OFFLINE_SET_TO_1 = 2 @contextmanager def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16): """ Simulate offline mode. There are three offline simulatiom modes: CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call. Connection errors are created by mocking socket.socket CONNECTION_TIMES_OUT: the connection hangs until it times out. The default timeout value is low (1e-16) to speed up the tests. Timeout errors are created by mocking requests.request HF_HUB_OFFLINE_SET_TO_1: the HF_HUB_OFFLINE environment variable is set to 1. This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error. """ online_request = requests.Session().request def timeout_request(session, method, url, **kwargs): # Change the url to an invalid url so that the connection hangs invalid_url = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) kwargs["timeout"] = timeout try: return online_request(method, invalid_url, **kwargs) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier e.request.url = url max_retry_error = e.args[0] max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),) e.args = (max_retry_error,) raise def raise_connection_error(session, prepared_request, **kwargs): raise requests.ConnectionError("Offline mode is enabled.", request=prepared_request) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send", raise_connection_error): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request", timeout_request): yield elif mode is OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1: with patch("datasets.config.HF_HUB_OFFLINE", True): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def set_current_working_directory_to_temp_dir(*args, **kwargs): original_working_dir = str(Path().resolve()) with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir: try: os.chdir(tmp_dir) yield finally: os.chdir(original_working_dir) @contextmanager def assert_arrow_memory_increases(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def assert_arrow_memory_doesnt_increase(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def is_rng_equal(rng1, rng2): return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist() def xfail_if_500_502_http_error(func): import decorator from requests.exceptions import HTTPError def _wrapper(func, *args, **kwargs): try: return func(*args, **kwargs) except HTTPError as err: if str(err).startswith("500") or str(err).startswith("502"): pytest.xfail(str(err)) raise err return decorator.decorator(_wrapper, func) # --- distributed testing functions --- # # copied from transformers # originally adapted from https://stackoverflow.com/a/59041913/9201239 class _RunOutput: def __init__(self, returncode, stdout, stderr): self.returncode = returncode self.stdout = stdout self.stderr = stderr async def _read_stream(stream, callback): while True: line = await stream.readline() if line: callback(line) else: break async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: if echo: print("\nRunning: ", " ".join(cmd)) p = await asyncio.create_subprocess_exec( cmd[0], *cmd[1:], stdin=stdin, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=env, ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) out = [] err = [] def tee(line, sink, pipe, label=""): line = line.decode("utf-8").rstrip() sink.append(line) if not quiet: print(label, line, file=pipe) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout, lambda line: tee(line, out, sys.stdout, label="stdout:")), _read_stream(p.stderr, lambda line: tee(line, err, sys.stderr, label="stderr:")), ], timeout=timeout, ) return _RunOutput(await p.wait(), out, err) def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: loop = asyncio.get_event_loop() result = loop.run_until_complete( _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) ) cmd_str = " ".join(cmd) if result.returncode > 0: stderr = "\n".join(result.stderr) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output.") return result def pytest_xdist_worker_id(): """ Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0 if `-n 1` or `pytest-xdist` isn't being used. """ worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0") worker = re.sub(r"^gw", "", worker, count=0, flags=re.M) return int(worker) def get_torch_dist_unique_port(): """ Returns a port number that can be fed to `torchrun`'s `--master_port` argument. Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same port at once. """ port = 29500 uniq_delta = pytest_xdist_worker_id() return port + uniq_delta
datasets/tests/utils.py/0
{ "file_path": "datasets/tests/utils.py", "repo_id": "datasets", "token_count": 6628 }
114
from functools import partial import torch from benchmarking_utils import BenchmarkMixin, BenchmarkScenario, model_init_fn from diffusers import LTXVideoTransformer3DModel from diffusers.utils.testing_utils import torch_device CKPT_ID = "Lightricks/LTX-Video-0.9.7-dev" RESULT_FILENAME = "ltx.csv" def get_input_dict(**device_dtype_kwargs): # 512x704 (161 frames) # `max_sequence_length`: 256 hidden_states = torch.randn(1, 7392, 128, **device_dtype_kwargs) encoder_hidden_states = torch.randn(1, 256, 4096, **device_dtype_kwargs) encoder_attention_mask = torch.ones(1, 256, **device_dtype_kwargs) timestep = torch.tensor([1.0], **device_dtype_kwargs) video_coords = torch.randn(1, 3, 7392, **device_dtype_kwargs) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, "timestep": timestep, "video_coords": video_coords, } if __name__ == "__main__": scenarios = [ BenchmarkScenario( name=f"{CKPT_ID}-bf16", model_cls=LTXVideoTransformer3DModel, model_init_kwargs={ "pretrained_model_name_or_path": CKPT_ID, "torch_dtype": torch.bfloat16, "subfolder": "transformer", }, get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16), model_init_fn=model_init_fn, compile_kwargs={"fullgraph": True}, ), BenchmarkScenario( name=f"{CKPT_ID}-layerwise-upcasting", model_cls=LTXVideoTransformer3DModel, model_init_kwargs={ "pretrained_model_name_or_path": CKPT_ID, "torch_dtype": torch.bfloat16, "subfolder": "transformer", }, get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16), model_init_fn=partial(model_init_fn, layerwise_upcasting=True), ), BenchmarkScenario( name=f"{CKPT_ID}-group-offload-leaf", model_cls=LTXVideoTransformer3DModel, model_init_kwargs={ "pretrained_model_name_or_path": CKPT_ID, "torch_dtype": torch.bfloat16, "subfolder": "transformer", }, get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16), model_init_fn=partial( model_init_fn, group_offload_kwargs={ "onload_device": torch_device, "offload_device": torch.device("cpu"), "offload_type": "leaf_level", "use_stream": True, "non_blocking": True, }, ), ), ] runner = BenchmarkMixin() runner.run_bencmarks_and_collate(scenarios, filename=RESULT_FILENAME)
diffusers/benchmarks/benchmarking_ltx.py/0
{ "file_path": "diffusers/benchmarks/benchmarking_ltx.py", "repo_id": "diffusers", "token_count": 1511 }
115
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual Inversion Textual Inversion is a training method for personalizing models by learning new text embeddings from a few example images. The file produced from training is extremely small (a few KBs) and the new embeddings can be loaded into the text encoder. [`TextualInversionLoaderMixin`] provides a function for loading Textual Inversion embeddings from Diffusers and Automatic1111 into the text encoder and loading a special token to activate the embeddings. <Tip> To learn more about how to load Textual Inversion embeddings, see the [Textual Inversion](../../using-diffusers/loading_adapters#textual-inversion) loading guide. </Tip> ## TextualInversionLoaderMixin [[autodoc]] loaders.textual_inversion.TextualInversionLoaderMixin
diffusers/docs/source/en/api/loaders/textual_inversion.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/textual_inversion.md", "repo_id": "diffusers", "token_count": 340 }
116
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoencoderKLCosmos [Cosmos Tokenizers](https://github.com/NVIDIA/Cosmos-Tokenizer). Supported models: - [nvidia/Cosmos-1.0-Tokenizer-CV8x8x8](https://huggingface.co/nvidia/Cosmos-1.0-Tokenizer-CV8x8x8) The model can be loaded with the following code snippet. ```python from diffusers import AutoencoderKLCosmos vae = AutoencoderKLCosmos.from_pretrained("nvidia/Cosmos-1.0-Tokenizer-CV8x8x8", subfolder="vae") ``` ## AutoencoderKLCosmos [[autodoc]] AutoencoderKLCosmos - decode - encode - all ## AutoencoderKLOutput [[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput
diffusers/docs/source/en/api/models/autoencoderkl_cosmos.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoderkl_cosmos.md", "repo_id": "diffusers", "token_count": 413 }
117
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Models 🤗 Diffusers provides pretrained models for popular algorithms and modules to create custom diffusion systems. The primary function of models is to denoise an input sample as modeled by the distribution \\(p_{\theta}(x_{t-1}|x_{t})\\). All models are built from the base [`ModelMixin`] class which is a [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html) providing basic functionality for saving and loading models, locally and from the Hugging Face Hub. ## ModelMixin [[autodoc]] ModelMixin ## FlaxModelMixin [[autodoc]] FlaxModelMixin ## PushToHubMixin [[autodoc]] utils.PushToHubMixin
diffusers/docs/source/en/api/models/overview.md/0
{ "file_path": "diffusers/docs/source/en/api/models/overview.md", "repo_id": "diffusers", "token_count": 336 }
118
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AuraFlow AuraFlow is inspired by [Stable Diffusion 3](../pipelines/stable_diffusion/stable_diffusion_3) and is by far the largest text-to-image generation model that comes with an Apache 2.0 license. This model achieves state-of-the-art results on the [GenEval](https://github.com/djghosh13/geneval) benchmark. It was developed by the Fal team and more details about it can be found in [this blog post](https://blog.fal.ai/auraflow/). <Tip> AuraFlow can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details. </Tip> ## Quantization Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model. Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`AuraFlowPipeline`] for inference with bitsandbytes. ```py import torch from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, AuraFlowTransformer2DModel, AuraFlowPipeline from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel quant_config = BitsAndBytesConfig(load_in_8bit=True) text_encoder_8bit = T5EncoderModel.from_pretrained( "fal/AuraFlow", subfolder="text_encoder", quantization_config=quant_config, torch_dtype=torch.float16, ) quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True) transformer_8bit = AuraFlowTransformer2DModel.from_pretrained( "fal/AuraFlow", subfolder="transformer", quantization_config=quant_config, torch_dtype=torch.float16, ) pipeline = AuraFlowPipeline.from_pretrained( "fal/AuraFlow", text_encoder=text_encoder_8bit, transformer=transformer_8bit, torch_dtype=torch.float16, device_map="balanced", ) prompt = "a tiny astronaut hatching from an egg on the moon" image = pipeline(prompt).images[0] image.save("auraflow.png") ``` Loading [GGUF checkpoints](https://huggingface.co/docs/diffusers/quantization/gguf) are also supported: ```py import torch from diffusers import ( AuraFlowPipeline, GGUFQuantizationConfig, AuraFlowTransformer2DModel, ) transformer = AuraFlowTransformer2DModel.from_single_file( "https://huggingface.co/city96/AuraFlow-v0.3-gguf/blob/main/aura_flow_0.3-Q2_K.gguf", quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), torch_dtype=torch.bfloat16, ) pipeline = AuraFlowPipeline.from_pretrained( "fal/AuraFlow-v0.3", transformer=transformer, torch_dtype=torch.bfloat16, ) prompt = "a cute pony in a field of flowers" image = pipeline(prompt).images[0] image.save("auraflow.png") ``` ## Support for `torch.compile()` AuraFlow can be compiled with `torch.compile()` to speed up inference latency even for different resolutions. First, install PyTorch nightly following the instructions from [here](https://pytorch.org/). The snippet below shows the changes needed to enable this: ```diff + torch.fx.experimental._config.use_duck_shape = False + pipeline.transformer = torch.compile( pipeline.transformer, fullgraph=True, dynamic=True ) ``` Specifying `use_duck_shape` to be `False` instructs the compiler if it should use the same symbolic variable to represent input sizes that are the same. For more details, check out [this comment](https://github.com/huggingface/diffusers/pull/11327#discussion_r2047659790). This enables from 100% (on low resolutions) to a 30% (on 1536x1536 resolution) speed improvements. Thanks to [AstraliteHeart](https://github.com/huggingface/diffusers/pull/11297/) who helped us rewrite the [`AuraFlowTransformer2DModel`] class so that the above works for different resolutions ([PR](https://github.com/huggingface/diffusers/pull/11297/)). ## AuraFlowPipeline [[autodoc]] AuraFlowPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/aura_flow.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/aura_flow.md", "repo_id": "diffusers", "token_count": 1479 }
119
<!--Copyright 2025 The HuggingFace Team and Tencent Hunyuan Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Hunyuan-DiT ![chinese elements understanding](https://github.com/gnobitab/diffusers-hunyuan/assets/1157982/39b99036-c3cb-4f16-bb1a-40ec25eda573) [Hunyuan-DiT : A Powerful Multi-Resolution Diffusion Transformer with Fine-Grained Chinese Understanding](https://huggingface.co/papers/2405.08748) from Tencent Hunyuan. The abstract from the paper is: *We present Hunyuan-DiT, a text-to-image diffusion transformer with fine-grained understanding of both English and Chinese. To construct Hunyuan-DiT, we carefully design the transformer structure, text encoder, and positional encoding. We also build from scratch a whole data pipeline to update and evaluate data for iterative model optimization. For fine-grained language understanding, we train a Multimodal Large Language Model to refine the captions of the images. Finally, Hunyuan-DiT can perform multi-turn multimodal dialogue with users, generating and refining images according to the context. Through our holistic human evaluation protocol with more than 50 professional human evaluators, Hunyuan-DiT sets a new state-of-the-art in Chinese-to-image generation compared with other open-source models.* You can find the original codebase at [Tencent/HunyuanDiT](https://github.com/Tencent/HunyuanDiT) and all the available checkpoints at [Tencent-Hunyuan](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT). **Highlights**: HunyuanDiT supports Chinese/English-to-image, multi-resolution generation. HunyuanDiT has the following components: * It uses a diffusion transformer as the backbone * It combines two text encoders, a bilingual CLIP and a multilingual T5 encoder <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> <Tip> You can further improve generation quality by passing the generated image from [`HungyuanDiTPipeline`] to the [SDXL refiner](../../using-diffusers/sdxl#base-to-refiner-model) model. </Tip> ## Optimization You can optimize the pipeline's runtime and memory consumption with torch.compile and feed-forward chunking. To learn about other optimization methods, check out the [Speed up inference](../../optimization/fp16) and [Reduce memory usage](../../optimization/memory) guides. ### Inference Use [`torch.compile`](https://huggingface.co/docs/diffusers/main/en/tutorials/fast_diffusion#torchcompile) to reduce the inference latency. First, load the pipeline: ```python from diffusers import HunyuanDiTPipeline import torch pipeline = HunyuanDiTPipeline.from_pretrained( "Tencent-Hunyuan/HunyuanDiT-Diffusers", torch_dtype=torch.float16 ).to("cuda") ``` Then change the memory layout of the pipelines `transformer` and `vae` components to `torch.channels-last`: ```python pipeline.transformer.to(memory_format=torch.channels_last) pipeline.vae.to(memory_format=torch.channels_last) ``` Finally, compile the components and run inference: ```python pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True) pipeline.vae.decode = torch.compile(pipeline.vae.decode, mode="max-autotune", fullgraph=True) image = pipeline(prompt="一个宇航员在骑马").images[0] ``` The [benchmark](https://gist.github.com/sayakpaul/29d3a14905cfcbf611fe71ebd22e9b23) results on a 80GB A100 machine are: ```bash With torch.compile(): Average inference time: 12.470 seconds. Without torch.compile(): Average inference time: 20.570 seconds. ``` ### Memory optimization By loading the T5 text encoder in 8 bits, you can run the pipeline in just under 6 GBs of GPU VRAM. Refer to [this script](https://gist.github.com/sayakpaul/3154605f6af05b98a41081aaba5ca43e) for details. Furthermore, you can use the [`~HunyuanDiT2DModel.enable_forward_chunking`] method to reduce memory usage. Feed-forward chunking runs the feed-forward layers in a transformer block in a loop instead of all at once. This gives you a trade-off between memory consumption and inference runtime. ```diff + pipeline.transformer.enable_forward_chunking(chunk_size=1, dim=1) ``` ## HunyuanDiTPipeline [[autodoc]] HunyuanDiTPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/hunyuandit.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/hunyuandit.md", "repo_id": "diffusers", "token_count": 1472 }
120
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Audio Stable Audio was proposed in [Stable Audio Open](https://huggingface.co/papers/2407.14358) by Zach Evans et al. . it takes a text prompt as input and predicts the corresponding sound or music sample. Stable Audio Open generates variable-length (up to 47s) stereo audio at 44.1kHz from text prompts. It comprises three components: an autoencoder that compresses waveforms into a manageable sequence length, a T5-based text embedding for text conditioning, and a transformer-based diffusion (DiT) model that operates in the latent space of the autoencoder. Stable Audio is trained on a corpus of around 48k audio recordings, where around 47k are from Freesound and the rest are from the Free Music Archive (FMA). All audio files are licensed under CC0, CC BY, or CC Sampling+. This data is used to train the autoencoder and the DiT. The abstract of the paper is the following: *Open generative models are vitally important for the community, allowing for fine-tunes and serving as baselines when presenting new models. However, most current text-to-audio models are private and not accessible for artists and researchers to build upon. Here we describe the architecture and training process of a new open-weights text-to-audio model trained with Creative Commons data. Our evaluation shows that the model's performance is competitive with the state-of-the-art across various metrics. Notably, the reported FDopenl3 results (measuring the realism of the generations) showcase its potential for high-quality stereo sound synthesis at 44.1kHz.* This pipeline was contributed by [Yoach Lacombe](https://huggingface.co/ylacombe). The original codebase can be found at [Stability-AI/stable-audio-tools](https://github.com/Stability-AI/stable-audio-tools). ## Tips When constructing a prompt, keep in mind: * Descriptive prompt inputs work best; use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific where possible (e.g. "melodic techno with a fast beat and synths" works better than "techno"). * Using a *negative prompt* can significantly improve the quality of the generated audio. Try using a negative prompt of "low quality, average quality". During inference: * The _quality_ of the generated audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference. * Multiple waveforms can be generated in one go: set `num_waveforms_per_prompt` to a value greater than 1 to enable. Automatic scoring will be performed between the generated waveforms and prompt text, and the audios ranked from best to worst accordingly. ## Quantization Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model. Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`StableAudioPipeline`] for inference with bitsandbytes. ```py import torch from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, StableAudioDiTModel, StableAudioPipeline from diffusers.utils import export_to_video from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel quant_config = BitsAndBytesConfig(load_in_8bit=True) text_encoder_8bit = T5EncoderModel.from_pretrained( "stabilityai/stable-audio-open-1.0", subfolder="text_encoder", quantization_config=quant_config, torch_dtype=torch.float16, ) quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True) transformer_8bit = StableAudioDiTModel.from_pretrained( "stabilityai/stable-audio-open-1.0", subfolder="transformer", quantization_config=quant_config, torch_dtype=torch.float16, ) pipeline = StableAudioPipeline.from_pretrained( "stabilityai/stable-audio-open-1.0", text_encoder=text_encoder_8bit, transformer=transformer_8bit, torch_dtype=torch.float16, device_map="balanced", ) prompt = "The sound of a hammer hitting a wooden surface." negative_prompt = "Low quality." audio = pipeline( prompt, negative_prompt=negative_prompt, num_inference_steps=200, audio_end_in_s=10.0, num_waveforms_per_prompt=3, generator=generator, ).audios output = audio[0].T.float().cpu().numpy() sf.write("hammer.wav", output, pipeline.vae.sampling_rate) ``` ## StableAudioPipeline [[autodoc]] StableAudioPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/stable_audio.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_audio.md", "repo_id": "diffusers", "token_count": 1438 }
121
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Diffusion XL <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"> </div> Stable Diffusion XL (SDXL) was proposed in [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://huggingface.co/papers/2307.01952) by Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. The abstract from the paper is: *We present SDXL, a latent diffusion model for text-to-image synthesis. Compared to previous versions of Stable Diffusion, SDXL leverages a three times larger UNet backbone: The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder. We design multiple novel conditioning schemes and train SDXL on multiple aspect ratios. We also introduce a refinement model which is used to improve the visual fidelity of samples generated by SDXL using a post-hoc image-to-image technique. We demonstrate that SDXL shows drastically improved performance compared the previous versions of Stable Diffusion and achieves results competitive with those of black-box state-of-the-art image generators.* ## Tips - Using SDXL with a DPM++ scheduler for less than 50 steps is known to produce [visual artifacts](https://github.com/huggingface/diffusers/issues/5433) because the solver becomes numerically unstable. To fix this issue, take a look at this [PR](https://github.com/huggingface/diffusers/pull/5541) which recommends for ODE/SDE solvers: - set `use_karras_sigmas=True` or `lu_lambdas=True` to improve image quality - set `euler_at_final=True` if you're using a solver with uniform step sizes (DPM++2M or DPM++2M SDE) - Most SDXL checkpoints work best with an image size of 1024x1024. Image sizes of 768x768 and 512x512 are also supported, but the results aren't as good. Anything below 512x512 is not recommended and likely won't be for default checkpoints like [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0). - SDXL can pass a different prompt for each of the text encoders it was trained on. We can even pass different parts of the same prompt to the text encoders. - SDXL output images can be improved by making use of a refiner model in an image-to-image setting. - SDXL offers `negative_original_size`, `negative_crops_coords_top_left`, and `negative_target_size` to negatively condition the model on image resolution and cropping parameters. <Tip> To learn how to use SDXL for various tasks, how to optimize performance, and other usage examples, take a look at the [Stable Diffusion XL](../../../using-diffusers/sdxl) guide. Check out the [Stability AI](https://huggingface.co/stabilityai) Hub organization for the official base and refiner model checkpoints! </Tip> ## StableDiffusionXLPipeline [[autodoc]] StableDiffusionXLPipeline - all - __call__ ## StableDiffusionXLImg2ImgPipeline [[autodoc]] StableDiffusionXLImg2ImgPipeline - all - __call__ ## StableDiffusionXLInpaintPipeline [[autodoc]] StableDiffusionXLInpaintPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md", "repo_id": "diffusers", "token_count": 1106 }
122
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # CosineDPMSolverMultistepScheduler The [`CosineDPMSolverMultistepScheduler`] is a variant of [`DPMSolverMultistepScheduler`] with cosine schedule, proposed by Nichol and Dhariwal (2021). It is being used in the [Stable Audio Open](https://huggingface.co/papers/2407.14358) paper and the [Stability-AI/stable-audio-tool](https://github.com/Stability-AI/stable-audio-tools) codebase. This scheduler was contributed by [Yoach Lacombe](https://huggingface.co/ylacombe). ## CosineDPMSolverMultistepScheduler [[autodoc]] CosineDPMSolverMultistepScheduler ## SchedulerOutput [[autodoc]] schedulers.scheduling_utils.SchedulerOutput
diffusers/docs/source/en/api/schedulers/cosine_dpm.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/cosine_dpm.md", "repo_id": "diffusers", "token_count": 358 }
123
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # IPNDMScheduler `IPNDMScheduler` is a fourth-order Improved Pseudo Linear Multistep scheduler. The original implementation can be found at [crowsonkb/v-diffusion-pytorch](https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296). ## IPNDMScheduler [[autodoc]] IPNDMScheduler ## SchedulerOutput [[autodoc]] schedulers.scheduling_utils.SchedulerOutput
diffusers/docs/source/en/api/schedulers/ipndm.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/ipndm.md", "repo_id": "diffusers", "token_count": 295 }
124
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Utilities Utility and helper functions for working with 🤗 Diffusers. ## numpy_to_pil [[autodoc]] utils.numpy_to_pil ## pt_to_pil [[autodoc]] utils.pt_to_pil ## load_image [[autodoc]] utils.load_image ## export_to_gif [[autodoc]] utils.export_to_gif ## export_to_video [[autodoc]] utils.export_to_video ## make_image_grid [[autodoc]] utils.make_image_grid ## randn_tensor [[autodoc]] utils.torch_utils.randn_tensor ## apply_layerwise_casting [[autodoc]] hooks.layerwise_casting.apply_layerwise_casting ## apply_group_offloading [[autodoc]] hooks.group_offloading.apply_group_offloading
diffusers/docs/source/en/api/utilities.md/0
{ "file_path": "diffusers/docs/source/en/api/utilities.md", "repo_id": "diffusers", "token_count": 387 }
125
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ComponentsManager The [`ComponentsManager`] is a model registry and management system for Modular Diffusers. It adds and tracks models, stores useful metadata (model size, device placement, adapters), prevents duplicate model instances, and supports offloading. This guide will show you how to use [`ComponentsManager`] to manage components and device memory. ## Add a component The [`ComponentsManager`] should be created alongside a [`ModularPipeline`] in either [`~ModularPipeline.from_pretrained`] or [`~ModularPipelineBlocks.init_pipeline`]. > [!TIP] > The `collection` parameter is optional but makes it easier to organize and manage components. <hfoptions id="create"> <hfoption id="from_pretrained"> ```py from diffusers import ModularPipeline, ComponentsManager comp = ComponentsManager() pipe = ModularPipeline.from_pretrained("YiYiXu/modular-demo-auto", components_manager=comp, collection="test1") ``` </hfoption> <hfoption id="init_pipeline"> ```py from diffusers import ComponentsManager from diffusers.modular_pipelines import SequentialPipelineBlocks from diffusers.modular_pipelines.stable_diffusion_xl import TEXT2IMAGE_BLOCKS t2i_blocks = SequentialPipelineBlocks.from_blocks_dict(TEXT2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" components = ComponentsManager() t2i_pipeline = t2i_blocks.init_pipeline(modular_repo_id, components_manager=components) ``` </hfoption> </hfoptions> Components are only loaded and registered when using [`~ModularPipeline.load_components`] or [`~ModularPipeline.load_default_components`]. The example below uses [`~ModularPipeline.load_default_components`] to create a second pipeline that reuses all the components from the first one, and assigns it to a different collection ```py pipe.load_default_components() pipe2 = ModularPipeline.from_pretrained("YiYiXu/modular-demo-auto", components_manager=comp, collection="test2") ``` Use the [`~ModularPipeline.null_component_names`] property to identify any components that need to be loaded, retrieve them with [`~ComponentsManager.get_components_by_names`], and then call [`~ModularPipeline.update_components`] to add the missing components. ```py pipe2.null_component_names ['text_encoder', 'text_encoder_2', 'tokenizer', 'tokenizer_2', 'image_encoder', 'unet', 'vae', 'scheduler', 'controlnet'] comp_dict = comp.get_components_by_names(names=pipe2.null_component_names) pipe2.update_components(**comp_dict) ``` To add individual components, use the [`~ComponentsManager.add`] method. This registers a component with a unique id. ```py from diffusers import AutoModel text_encoder = AutoModel.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder") component_id = comp.add("text_encoder", text_encoder) comp ``` Use [`~ComponentsManager.remove`] to remove a component using their id. ```py comp.remove("text_encoder_139917733042864") ``` ## Retrieve a component The [`ComponentsManager`] provides several methods to retrieve registered components. ### get_one The [`~ComponentsManager.get_one`] method returns a single component and supports pattern matching for the `name` parameter. If multiple components match, [`~ComponentsManager.get_one`] returns an error. | Pattern | Example | Description | |-------------|----------------------------------|-------------------------------------------| | exact | `comp.get_one(name="unet")` | exact name match | | wildcard | `comp.get_one(name="unet*")` | names starting with "unet" | | exclusion | `comp.get_one(name="!unet")` | exclude components named "unet" | | or | `comp.get_one(name="unet&#124;vae")` | name is "unet" or "vae" | [`~ComponentsManager.get_one`] also filters components by the `collection` argument or `load_id` argument. ```py comp.get_one(name="unet", collection="sdxl") ``` ### get_components_by_names The [`~ComponentsManager.get_components_by_names`] method accepts a list of names and returns a dictionary mapping names to components. This is especially useful with [`ModularPipeline`] since they provide lists of required component names and the returned dictionary can be passed directly to [`~ModularPipeline.update_components`]. ```py component_dict = comp.get_components_by_names(names=["text_encoder", "unet", "vae"]) {"text_encoder": component1, "unet": component2, "vae": component3} ``` ## Duplicate detection It is recommended to load model components with [`ComponentSpec`] to assign components with a unique id that encodes their loading parameters. This allows [`ComponentsManager`] to automatically detect and prevent duplicate model instances even when different objects represent the same underlying checkpoint. ```py from diffusers import ComponentSpec, ComponentsManager from transformers import CLIPTextModel comp = ComponentsManager() # Create ComponentSpec for the first text encoder spec = ComponentSpec(name="text_encoder", repo="stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder", type_hint=AutoModel) # Create ComponentSpec for a duplicate text encoder (it is same checkpoint, from the same repo/subfolder) spec_duplicated = ComponentSpec(name="text_encoder_duplicated", repo="stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder", type_hint=CLIPTextModel) # Load and add both components - the manager will detect they're the same model comp.add("text_encoder", spec.load()) comp.add("text_encoder_duplicated", spec_duplicated.load()) ``` This returns a warning with instructions for removing the duplicate. ```py ComponentsManager: adding component 'text_encoder_duplicated_139917580682672', but it has duplicate load_id 'stabilityai/stable-diffusion-xl-base-1.0|text_encoder|null|null' with existing components: text_encoder_139918506246832. To remove a duplicate, call `components_manager.remove('<component_id>')`. 'text_encoder_duplicated_139917580682672' ``` You could also add a component without using [`ComponentSpec`] and duplicate detection still works in most cases even if you're adding the same component under a different name. However, [`ComponentManager`] can't detect duplicates when you load the same component into different objects. In this case, you should load a model with [`ComponentSpec`]. ```py text_encoder_2 = AutoModel.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder") comp.add("text_encoder", text_encoder_2) 'text_encoder_139917732983664' ``` ## Collections Collections are labels assigned to components for better organization and management. Add a component to a collection with the `collection` argument in [`~ComponentsManager.add`]. Only one component per name is allowed in each collection. Adding a second component with the same name automatically removes the first component. ```py from diffusers import ComponentSpec, ComponentsManager comp = ComponentsManager() # Create ComponentSpec for the first UNet spec = ComponentSpec(name="unet", repo="stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", type_hint=AutoModel) # Create ComponentSpec for a different UNet spec2 = ComponentSpec(name="unet", repo="RunDiffusion/Juggernaut-XL-v9", subfolder="unet", type_hint=AutoModel, variant="fp16") # Add both UNets to the same collection - the second one will replace the first comp.add("unet", spec.load(), collection="sdxl") comp.add("unet", spec2.load(), collection="sdxl") ``` This makes it convenient to work with node-based systems because you can: - Mark all models as loaded from one node with the `collection` label. - Automatically replace models when new checkpoints are loaded under the same name. - Batch delete all models in a collection when a node is removed. ## Offloading The [`~ComponentsManager.enable_auto_cpu_offload`] method is a global offloading strategy that works across all models regardless of which pipeline is using them. Once enabled, you don't need to worry about device placement if you add or remove components. ```py comp.enable_auto_cpu_offload(device="cuda") ``` All models begin on the CPU and [`ComponentsManager`] moves them to the appropriate device right before they're needed, and moves other models back to the CPU when GPU memory is low. You can set your own rules for which models to offload first.
diffusers/docs/source/en/modular_diffusers/components_manager.md/0
{ "file_path": "diffusers/docs/source/en/modular_diffusers/components_manager.md", "repo_id": "diffusers", "token_count": 2737 }
126
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AWS Neuron Diffusers functionalities are available on [AWS Inf2 instances](https://aws.amazon.com/ec2/instance-types/inf2/), which are EC2 instances powered by [Neuron machine learning accelerators](https://aws.amazon.com/machine-learning/inferentia/). These instances aim to provide better compute performance (higher throughput, lower latency) with good cost-efficiency, making them good candidates for AWS users to deploy diffusion models to production. [Optimum Neuron](https://huggingface.co/docs/optimum-neuron/en/index) is the interface between Hugging Face libraries and AWS Accelerators, including AWS [Trainium](https://aws.amazon.com/machine-learning/trainium/) and AWS [Inferentia](https://aws.amazon.com/machine-learning/inferentia/). It supports many of the features in Diffusers with similar APIs, so it is easier to learn if you're already familiar with Diffusers. Once you have created an AWS Inf2 instance, install Optimum Neuron. ```bash python -m pip install --upgrade-strategy eager optimum[neuronx] ``` <Tip> We provide pre-built [Hugging Face Neuron Deep Learning AMI](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2) (DLAMI) and Optimum Neuron containers for Amazon SageMaker. It's recommended to correctly set up your environment. </Tip> The example below demonstrates how to generate images with the Stable Diffusion XL model on an inf2.8xlarge instance (you can switch to cheaper inf2.xlarge instances once the model is compiled). To generate some images, use the [`~optimum.neuron.NeuronStableDiffusionXLPipeline`] class, which is similar to the [`StableDiffusionXLPipeline`] class in Diffusers. Unlike Diffusers, you need to compile models in the pipeline to the Neuron format, `.neuron`. Launch the following command to export the model to the `.neuron` format. ```bash optimum-cli export neuron --model stabilityai/stable-diffusion-xl-base-1.0 \ --batch_size 1 \ --height 1024 `# height in pixels of generated image, eg. 768, 1024` \ --width 1024 `# width in pixels of generated image, eg. 768, 1024` \ --num_images_per_prompt 1 `# number of images to generate per prompt, defaults to 1` \ --auto_cast matmul `# cast only matrix multiplication operations` \ --auto_cast_type bf16 `# cast operations from FP32 to BF16` \ sd_neuron_xl/ ``` Now generate some images with the pre-compiled SDXL model. ```python >>> from optimum.neuron import NeuronStableDiffusionXLPipeline >>> stable_diffusion_xl = NeuronStableDiffusionXLPipeline.from_pretrained("sd_neuron_xl/") >>> prompt = "a pig with wings flying in floating US dollar banknotes in the air, skyscrapers behind, warm color palette, muted colors, detailed, 8k" >>> image = stable_diffusion_xl(prompt).images[0] ``` <img src="https://huggingface.co/datasets/Jingya/document_images/resolve/main/optimum/neuron/sdxl_pig.png" width="256" height="256" alt="peggy generated by sdxl on inf2" /> Feel free to check out more guides and examples on different use cases from the Optimum Neuron [documentation](https://huggingface.co/docs/optimum-neuron/en/inference_tutorials/stable_diffusion#generate-images-with-stable-diffusion-models-on-aws-inferentia)!
diffusers/docs/source/en/optimization/neuron.md/0
{ "file_path": "diffusers/docs/source/en/optimization/neuron.md", "repo_id": "diffusers", "token_count": 1077 }
127
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Basic performance Diffusion is a random process that is computationally demanding. You may need to run the [`DiffusionPipeline`] several times before getting a desired output. That's why it's important to carefully balance generation speed and memory usage in order to iterate faster, This guide recommends some basic performance tips for using the [`DiffusionPipeline`]. Refer to the Inference Optimization section docs such as [Accelerate inference](./optimization/fp16) or [Reduce memory usage](./optimization/memory) for more detailed performance guides. ## Memory usage Reducing the amount of memory used indirectly speeds up generation and can help a model fit on device. The [`~DiffusionPipeline.enable_model_cpu_offload`] method moves a model to the CPU when it is not in use to save GPU memory. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16, device_map="cuda" ) pipeline.enable_model_cpu_offload() prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ pipeline(prompt).images[0] print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") ``` ## Inference speed Denoising is the most computationally demanding process during diffusion. Methods that optimizes this process accelerates inference speed. Try the following methods for a speed up. - Add `device_map="cuda"` to place the pipeline on a GPU. Placing a model on an accelerator, like a GPU, increases speed because it performs computations in parallel. - Set `torch_dtype=torch.bfloat16` to execute the pipeline in half-precision. Reducing the data type precision increases speed because it takes less time to perform computations in a lower precision. ```py import torch import time from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16, device_map="cuda ) ``` - Use a faster scheduler, such as [`DPMSolverMultistepScheduler`], which only requires ~20-25 steps. - Set `num_inference_steps` to a lower value. Reducing the number of inference steps reduces the overall number of computations. However, this can result in lower generation quality. ```py pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ start_time = time.perf_counter() image = pipeline(prompt).images[0] end_time = time.perf_counter() print(f"Image generation took {end_time - start_time:.3f} seconds") ``` ## Generation quality Many modern diffusion models deliver high-quality images out-of-the-box. However, you can still improve generation quality by trying the following. - Try a more detailed and descriptive prompt. Include details such as the image medium, subject, style, and aesthetic. A negative prompt may also help by guiding a model away from undesirable features by using words like low quality or blurry. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16, device_map="cuda" ) prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ negative_prompt = "low quality, blurry, ugly, poor details" pipeline(prompt, negative_prompt=negative_prompt).images[0] ``` For more details about creating better prompts, take a look at the [Prompt techniques](./using-diffusers/weighted_prompts) doc. - Try a different scheduler, like [`HeunDiscreteScheduler`] or [`LMSDiscreteScheduler`], that gives up generation speed for quality. ```py import torch from diffusers import DiffusionPipeline, HeunDiscreteScheduler pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16, device_map="cuda" ) pipeline.scheduler = HeunDiscreteScheduler.from_config(pipeline.scheduler.config) prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ negative_prompt = "low quality, blurry, ugly, poor details" pipeline(prompt, negative_prompt=negative_prompt).images[0] ``` ## Next steps Diffusers offers more advanced and powerful optimizations such as [group-offloading](./optimization/memory#group-offloading) and [regional compilation](./optimization/fp16#regional-compilation). To learn more about how to maximize performance, take a look at the Inference Optimization section.
diffusers/docs/source/en/stable_diffusion.md/0
{ "file_path": "diffusers/docs/source/en/stable_diffusion.md", "repo_id": "diffusers", "token_count": 1720 }
128
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-to-image <Tip warning={true}> The text-to-image script is experimental, and it's easy to overfit and run into issues like catastrophic forgetting. Try exploring different hyperparameters to get the best results on your dataset. </Tip> Text-to-image models like Stable Diffusion are conditioned to generate images given a text prompt. Training a model can be taxing on your hardware, but if you enable `gradient_checkpointing` and `mixed_precision`, it is possible to train a model on a single 24GB GPU. If you're training with larger batch sizes or want to train faster, it's better to use GPUs with more than 30GB of memory. You can reduce your memory footprint by enabling memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing, gradient accumulation or xFormers. A GPU with at least 30GB of memory or a TPU v3 is recommended for training with Flax. This guide will explore the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/text_to_image pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/text_to_image pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. ## Script parameters <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) and let us know if you have any questions or concerns. </Tip> The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L193) function. This function provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the fp16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_text_to_image.py \ --mixed_precision="fp16" ``` Some basic and important parameters include: - `--pretrained_model_name_or_path`: the name of the model on the Hub or a local path to the pretrained model - `--dataset_name`: the name of the dataset on the Hub or a local path to the dataset to train on - `--image_column`: the name of the image column in the dataset to train on - `--caption_column`: the name of the text column in the dataset to train on - `--output_dir`: where to save the trained model - `--push_to_hub`: whether to push the trained model to the Hub - `--checkpointing_steps`: frequency of saving a checkpoint as the model trains; this is useful if for some reason training is interrupted, you can continue training from that checkpoint by adding `--resume_from_checkpoint` to your training command ### Min-SNR weighting The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: ```bash accelerate launch train_text_to_image.py \ --snr_gamma=5.0 ``` You can compare the loss surfaces for different `snr_gamma` values in this [Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) report. For smaller datasets, the effects of Min-SNR may not be as obvious compared to larger datasets. ## Training script The dataset preprocessing code and training loop are found in the [`main()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L490) function. If you need to adapt the training script, this is where you'll need to make your changes. The `train_text_to_image` script starts by [loading a scheduler](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L543) and tokenizer. You can choose to use a different scheduler here if you want: ```py noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) ``` Then the script [loads the UNet](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L619) model: ```py load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) ``` Next, the text and image columns of the dataset need to be preprocessed. The [`tokenize_captions`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L724) function handles tokenizing the inputs, and the [`train_transforms`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L742) function specifies the type of transforms to apply to the image. Both of these functions are bundled into `preprocess_train`: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples ``` Lastly, the [training loop](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L878) handles everything else. It encodes images into latent space, adds noise to the latents, computes the text embeddings to condition on, updates the model parameters, and saves and pushes the model to the Hub. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 <hfoptions id="training-inference"> <hfoption id="PyTorch"> Let's train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. <Tip> To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment variables to the path of the dataset and where to save the model to. </Tip> ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export dataset_name="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --enable_xformers_memory_efficient_attention \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" \ --push_to_hub ``` </hfoption> <hfoption id="Flax"> Training with Flax can be faster on TPUs and GPUs thanks to [@duongna211](https://github.com/duongna21). Flax is more efficient on a TPU, but GPU performance is also great. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). <Tip> To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment variables to the path of the dataset and where to save the model to. </Tip> ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export dataset_name="lambdalabs/naruto-blip-captions" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-naruto-model" \ --push_to_hub ``` </hfoption> </hfoptions> Once training is complete, you can use your newly trained model for inference: <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```py from diffusers import StableDiffusionPipeline import torch pipeline = StableDiffusionPipeline.from_pretrained("path/to/saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` </hfoption> <hfoption id="Flax"> ```py import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16) prompt = "yoda naruto" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("yoda-naruto.png") ``` </hfoption> </hfoptions> ## Next steps Congratulations on training your own text-to-image model! To learn more about how to use your new model, the following guides may be helpful: - Learn how to [load LoRA weights](../using-diffusers/loading_adapters#LoRA) for inference if you trained your model with LoRA. - Learn more about how certain parameters like guidance scale or techniques such as prompt weighting can help you control inference in the [Text-to-image](../using-diffusers/conditional_image_generation) task guide.
diffusers/docs/source/en/training/text2image.md/0
{ "file_path": "diffusers/docs/source/en/training/text2image.md", "repo_id": "diffusers", "token_count": 4048 }
129
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DiffEdit [[open-in-colab]] Image editing typically requires providing a mask of the area to be edited. DiffEdit automatically generates the mask for you based on a text query, making it easier overall to create a mask without image editing software. The DiffEdit algorithm works in three steps: 1. the diffusion model denoises an image conditioned on some query text and reference text which produces different noise estimates for different areas of the image; the difference is used to infer a mask to identify which area of the image needs to be changed to match the query text 2. the input image is encoded into latent space with DDIM 3. the latents are decoded with the diffusion model conditioned on the text query, using the mask as a guide such that pixels outside the mask remain the same as in the input image This guide will show you how to use DiffEdit to edit images without manually creating a mask. Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate ``` The [`StableDiffusionDiffEditPipeline`] requires an image mask and a set of partially inverted latents. The image mask is generated from the [`~StableDiffusionDiffEditPipeline.generate_mask`] function, and includes two parameters, `source_prompt` and `target_prompt`. These parameters determine what to edit in the image. For example, if you want to change a bowl of *fruits* to a bowl of *pears*, then: ```py source_prompt = "a bowl of fruits" target_prompt = "a bowl of pears" ``` The partially inverted latents are generated from the [`~StableDiffusionDiffEditPipeline.invert`] function, and it is generally a good idea to include a `prompt` or *caption* describing the image to help guide the inverse latent sampling process. The caption can often be your `source_prompt`, but feel free to experiment with other text descriptions! Let's load the pipeline, scheduler, inverse scheduler, and enable some optimizations to reduce memory usage: ```py import torch from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionDiffEditPipeline pipeline = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, safety_checker=None, use_safetensors=True, ) pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) pipeline.enable_model_cpu_offload() pipeline.enable_vae_slicing() ``` Load the image to edit: ```py from diffusers.utils import load_image, make_image_grid img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" raw_image = load_image(img_url).resize((768, 768)) raw_image ``` Use the [`~StableDiffusionDiffEditPipeline.generate_mask`] function to generate the image mask. You'll need to pass it the `source_prompt` and `target_prompt` to specify what to edit in the image: ```py from PIL import Image source_prompt = "a bowl of fruits" target_prompt = "a basket of pears" mask_image = pipeline.generate_mask( image=raw_image, source_prompt=source_prompt, target_prompt=target_prompt, ) Image.fromarray((mask_image.squeeze()*255).astype("uint8"), "L").resize((768, 768)) ``` Next, create the inverted latents and pass it a caption describing the image: ```py inv_latents = pipeline.invert(prompt=source_prompt, image=raw_image).latents ``` Finally, pass the image mask and inverted latents to the pipeline. The `target_prompt` becomes the `prompt` now, and the `source_prompt` is used as the `negative_prompt`: ```py output_image = pipeline( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, negative_prompt=source_prompt, ).images[0] mask_image = Image.fromarray((mask_image.squeeze()*255).astype("uint8"), "L").resize((768, 768)) make_image_grid([raw_image, mask_image, output_image], rows=1, cols=3) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://github.com/Xiang-cd/DiffEdit-stable-diffusion/blob/main/assets/target.png?raw=true"/> <figcaption class="mt-2 text-center text-sm text-gray-500">edited image</figcaption> </div> </div> ## Generate source and target embeddings The source and target embeddings can be automatically generated with the [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) model instead of creating them manually. Load the Flan-T5 model and tokenizer from the 🤗 Transformers library: ```py import torch from transformers import AutoTokenizer, T5ForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large") model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", torch_dtype=torch.float16) ``` Provide some initial text to prompt the model to generate the source and target prompts. ```py source_concept = "bowl" target_concept = "basket" source_text = f"Provide a caption for images containing a {source_concept}. " "The captions should be in English and should be no longer than 150 characters." target_text = f"Provide a caption for images containing a {target_concept}. " "The captions should be in English and should be no longer than 150 characters." ``` Next, create a utility function to generate the prompts: ```py @torch.no_grad() def generate_prompts(input_prompt): input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids.to("cuda") outputs = model.generate( input_ids, temperature=0.8, num_return_sequences=16, do_sample=True, max_new_tokens=128, top_k=10 ) return tokenizer.batch_decode(outputs, skip_special_tokens=True) source_prompts = generate_prompts(source_text) target_prompts = generate_prompts(target_text) print(source_prompts) print(target_prompts) ``` <Tip> Check out the [generation strategy](https://huggingface.co/docs/transformers/main/en/generation_strategies) guide if you're interested in learning more about strategies for generating different quality text. </Tip> Load the text encoder model used by the [`StableDiffusionDiffEditPipeline`] to encode the text. You'll use the text encoder to compute the text embeddings: ```py import torch from diffusers import StableDiffusionDiffEditPipeline pipeline = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, use_safetensors=True ) pipeline.enable_model_cpu_offload() pipeline.enable_vae_slicing() @torch.no_grad() def embed_prompts(sentences, tokenizer, text_encoder, device="cuda"): embeddings = [] for sent in sentences: text_inputs = tokenizer( sent, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=None)[0] embeddings.append(prompt_embeds) return torch.concatenate(embeddings, dim=0).mean(dim=0).unsqueeze(0) source_embeds = embed_prompts(source_prompts, pipeline.tokenizer, pipeline.text_encoder) target_embeds = embed_prompts(target_prompts, pipeline.tokenizer, pipeline.text_encoder) ``` Finally, pass the embeddings to the [`~StableDiffusionDiffEditPipeline.generate_mask`] and [`~StableDiffusionDiffEditPipeline.invert`] functions, and pipeline to generate the image: ```diff from diffusers import DDIMInverseScheduler, DDIMScheduler from diffusers.utils import load_image, make_image_grid from PIL import Image pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" raw_image = load_image(img_url).resize((768, 768)) mask_image = pipeline.generate_mask( image=raw_image, - source_prompt=source_prompt, - target_prompt=target_prompt, + source_prompt_embeds=source_embeds, + target_prompt_embeds=target_embeds, ) inv_latents = pipeline.invert( - prompt=source_prompt, + prompt_embeds=source_embeds, image=raw_image, ).latents output_image = pipeline( mask_image=mask_image, image_latents=inv_latents, - prompt=target_prompt, - negative_prompt=source_prompt, + prompt_embeds=target_embeds, + negative_prompt_embeds=source_embeds, ).images[0] mask_image = Image.fromarray((mask_image.squeeze()*255).astype("uint8"), "L") make_image_grid([raw_image, mask_image, output_image], rows=1, cols=3) ``` ## Generate a caption for inversion While you can use the `source_prompt` as a caption to help generate the partially inverted latents, you can also use the [BLIP](https://huggingface.co/docs/transformers/model_doc/blip) model to automatically generate a caption. Load the BLIP model and processor from the 🤗 Transformers library: ```py import torch from transformers import BlipForConditionalGeneration, BlipProcessor processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16, low_cpu_mem_usage=True) ``` Create a utility function to generate a caption from the input image: ```py @torch.no_grad() def generate_caption(images, caption_generator, caption_processor): text = "a photograph of" inputs = caption_processor(images, text, return_tensors="pt").to(device="cuda", dtype=caption_generator.dtype) caption_generator.to("cuda") outputs = caption_generator.generate(**inputs, max_new_tokens=128) # offload caption generator caption_generator.to("cpu") caption = caption_processor.batch_decode(outputs, skip_special_tokens=True)[0] return caption ``` Load an input image and generate a caption for it using the `generate_caption` function: ```py from diffusers.utils import load_image img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" raw_image = load_image(img_url).resize((768, 768)) caption = generate_caption(raw_image, model, processor) ``` <div class="flex justify-center"> <figure> <img class="rounded-xl" src="https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"/> <figcaption class="text-center">generated caption: "a photograph of a bowl of fruit on a table"</figcaption> </figure> </div> Now you can drop the caption into the [`~StableDiffusionDiffEditPipeline.invert`] function to generate the partially inverted latents!
diffusers/docs/source/en/using-diffusers/diffedit.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/diffedit.md", "repo_id": "diffusers", "token_count": 3847 }
130
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Scheduler features The scheduler is an important component of any diffusion model because it controls the entire denoising (or sampling) process. There are many types of schedulers, some are optimized for speed and some for quality. With Diffusers, you can modify the scheduler configuration to use custom noise schedules, sigmas, and rescale the noise schedule. Changing these parameters can have profound effects on inference quality and speed. This guide will demonstrate how to use these features to improve inference quality. > [!TIP] > Diffusers currently only supports the `timesteps` and `sigmas` parameters for a select list of schedulers and pipelines. Feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) if you want to extend these parameters to a scheduler and pipeline that does not currently support it! ## Timestep schedules The timestep or noise schedule determines the amount of noise at each sampling step. The scheduler uses this to generate an image with the corresponding amount of noise at each step. The timestep schedule is generated from the scheduler's default configuration, but you can customize the scheduler to use new and optimized sampling schedules that aren't in Diffusers yet. For example, [Align Your Steps (AYS)](https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/) is a method for optimizing a sampling schedule to generate a high-quality image in as little as 10 steps. The optimal [10-step schedule](https://github.com/huggingface/diffusers/blob/a7bf77fc284810483f1e60afe34d1d27ad91ce2e/src/diffusers/schedulers/scheduling_utils.py#L51) for Stable Diffusion XL is: ```py from diffusers.schedulers import AysSchedules sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"] print(sampling_schedule) "[999, 845, 730, 587, 443, 310, 193, 116, 53, 13]" ``` You can use the AYS sampling schedule in a pipeline by passing it to the `timesteps` parameter. ```py pipeline = StableDiffusionXLPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, algorithm_type="sde-dpmsolver++") prompt = "A cinematic shot of a cute little rabbit wearing a jacket and doing a thumbs up" generator = torch.Generator(device="cpu").manual_seed(2487854446) image = pipeline( prompt=prompt, negative_prompt="", generator=generator, timesteps=sampling_schedule, ).images[0] ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ays.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">AYS timestep schedule 10 steps</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/10.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Linearly-spaced timestep schedule 10 steps</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/25.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Linearly-spaced timestep schedule 25 steps</figcaption> </div> </div> ## Timestep spacing The way sample steps are selected in the schedule can affect the quality of the generated image, especially with respect to [rescaling the noise schedule](#rescale-noise-schedule), which can enable a model to generate much brighter or darker images. Diffusers provides three timestep spacing methods: - `leading` creates evenly spaced steps - `linspace` includes the first and last steps and evenly selects the remaining intermediate steps - `trailing` only includes the last step and evenly selects the remaining intermediate steps starting from the end It is recommended to use the `trailing` spacing method because it generates higher quality images with more details when there are fewer sample steps. But the difference in quality is not as obvious for more standard sample step values. ```py import torch from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler pipeline = StableDiffusionXLPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, timestep_spacing="trailing") prompt = "A cinematic shot of a cute little black cat sitting on a pumpkin at night" generator = torch.Generator(device="cpu").manual_seed(2487854446) image = pipeline( prompt=prompt, negative_prompt="", generator=generator, num_inference_steps=5, ).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/trailing_spacing.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">trailing spacing after 5 steps</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/leading_spacing.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">leading spacing after 5 steps</figcaption> </div> </div> ## Sigmas The `sigmas` parameter is the amount of noise added at each timestep according to the timestep schedule. Like the `timesteps` parameter, you can customize the `sigmas` parameter to control how much noise is added at each step. When you use a custom `sigmas` value, the `timesteps` are calculated from the custom `sigmas` value and the default scheduler configuration is ignored. For example, you can manually pass the [sigmas](https://github.com/huggingface/diffusers/blob/6529ee67ec02fcf58d2fd9242164ea002b351d75/src/diffusers/schedulers/scheduling_utils.py#L55) for something like the 10-step AYS schedule from before to the pipeline. ```py import torch from diffusers import DiffusionPipeline, EulerDiscreteScheduler model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.0] prompt = "anthropomorphic capybara wearing a suit and working with a computer" generator = torch.Generator(device='cuda').manual_seed(123) image = pipeline( prompt=prompt, num_inference_steps=10, sigmas=sigmas, generator=generator ).images[0] ``` When you take a look at the scheduler's `timesteps` parameter, you'll see that it is the same as the AYS timestep schedule because the `timestep` schedule is calculated from the `sigmas`. ```py print(f" timesteps: {pipe.scheduler.timesteps}") "timesteps: tensor([999., 845., 730., 587., 443., 310., 193., 116., 53., 13.], device='cuda:0')" ``` ### Karras sigmas > [!TIP] > Refer to the scheduler API [overview](../api/schedulers/overview) for a list of schedulers that support Karras sigmas. > > Karras sigmas should not be used for models that weren't trained with them. For example, the base Stable Diffusion XL model shouldn't use Karras sigmas but the [DreamShaperXL](https://hf.co/Lykon/dreamshaper-xl-1-0) model can since they are trained with Karras sigmas. Karras scheduler's use the timestep schedule and sigmas from the [Elucidating the Design Space of Diffusion-Based Generative Models](https://hf.co/papers/2206.00364) paper. This scheduler variant applies a smaller amount of noise per step as it approaches the end of the sampling process compared to other schedulers, and can increase the level of details in the generated image. Enable Karras sigmas by setting `use_karras_sigmas=True` in the scheduler. ```py import torch from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler pipeline = StableDiffusionXLPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, algorithm_type="sde-dpmsolver++", use_karras_sigmas=True) prompt = "A cinematic shot of a cute little rabbit wearing a jacket and doing a thumbs up" generator = torch.Generator(device="cpu").manual_seed(2487854446) image = pipeline( prompt=prompt, negative_prompt="", generator=generator, ).images[0] ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/karras_sigmas_true.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Karras sigmas enabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/karras_sigmas_false.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Karras sigmas disabled</figcaption> </div> </div> ## Rescale noise schedule In the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://hf.co/papers/2305.08891) paper, the authors discovered that common noise schedules allowed some signal to leak into the last timestep. This signal leakage at inference can cause models to only generate images with medium brightness. By enforcing a zero signal-to-noise ratio (SNR) for the timstep schedule and sampling from the last timestep, the model can be improved to generate very bright or dark images. > [!TIP] > For inference, you need a model that has been trained with *v_prediction*. To train your own model with *v_prediction*, add the following flag to the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) scripts. > > ```bash > --prediction_type="v_prediction" > ``` For example, load the [ptx0/pseudo-journey-v2](https://hf.co/ptx0/pseudo-journey-v2) checkpoint which was trained with `v_prediction` and the [`DDIMScheduler`]. Configure the following parameters in the [`DDIMScheduler`]: * `rescale_betas_zero_snr=True` to rescale the noise schedule to zero SNR * `timestep_spacing="trailing"` to start sampling from the last timestep Set `guidance_rescale` in the pipeline to prevent over-exposure. A lower value increases brightness but some of the details may appear washed out. ```py from diffusers import DiffusionPipeline, DDIMScheduler pipeline = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", use_safetensors=True) pipeline.scheduler = DDIMScheduler.from_config( pipeline.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing" ) pipeline.to("cuda") prompt = "cinematic photo of a snowy mountain at night with the northern lights aurora borealis overhead, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(23) image = pipeline(prompt, guidance_rescale=0.7, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/no-zero-snr.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">default Stable Diffusion v2-1 image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/zero-snr.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">image with zero SNR and trailing timestep spacing enabled</figcaption> </div> </div>
diffusers/docs/source/en/using-diffusers/scheduler_features.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/scheduler_features.md", "repo_id": "diffusers", "token_count": 4053 }
131
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # 簡単な案内 拡散モデル(Diffusion Model)は、ランダムな正規分布から段階的にノイズ除去するように学習され、画像や音声などの目的のものを生成できます。これは生成AIに多大な関心を呼び起こしました。インターネット上で拡散によって生成された画像の例を見たことがあるでしょう。🧨 Diffusersは、誰もが拡散モデルに広くアクセスできるようにすることを目的としたライブラリです。 この案内では、開発者または日常的なユーザーに関わらず、🧨 Diffusers を紹介し、素早く目的のものを生成できるようにします!このライブラリには3つの主要コンポーネントがあります: * [`DiffusionPipeline`]は事前に学習された拡散モデルからサンプルを迅速に生成するために設計された高レベルのエンドツーエンドクラス。 * 拡散システムを作成するためのビルディングブロックとして使用できる、人気のある事前学習された[モデル](./api/models)アーキテクチャとモジュール。 * 多くの異なる[スケジューラ](./api/schedulers/overview) - ノイズがどのようにトレーニングのために加えられるか、そして生成中にどのようにノイズ除去された画像を生成するかを制御するアルゴリズム。 この案内では、[`DiffusionPipeline`]を生成に使用する方法を紹介し、モデルとスケジューラを組み合わせて[`DiffusionPipeline`]の内部で起こっていることを再現する方法を説明します。 <Tip> この案内は🧨 Diffusers [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb)を簡略化したもので、すぐに使い始めることができます。Diffusers 🧨のゴール、設計哲学、コアAPIの詳細についてもっと知りたい方は、ノートブックをご覧ください! </Tip> 始める前に必要なライブラリーがすべてインストールされていることを確認してください: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index)生成とトレーニングのためのモデルのロードを高速化します - [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)ような最も一般的な拡散モデルを実行するには、[🤗 Transformers](https://huggingface.co/docs/transformers/index)が必要です。 ## 拡散パイプライン [`DiffusionPipeline`]は事前学習された拡散システムを生成に使用する最も簡単な方法です。これはモデルとスケジューラを含むエンドツーエンドのシステムです。[`DiffusionPipeline`]は多くの作業/タスクにすぐに使用することができます。また、サポートされているタスクの完全なリストについては[🧨Diffusersの概要](./api/pipelines/overview#diffusers-summary)の表を参照してください。 | **タスク** | **説明** | **パイプライン** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | 正規分布から画像生成 | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | 文章から画像生成 | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | 画像と文章から新たな画像生成 | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | 画像、マスク、および文章が指定された場合に、画像のマスクされた部分を文章をもとに修復 | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | 文章と深度推定によって構造を保持しながら画像生成 | [depth2img](./using-diffusers/depth2img) | まず、[`DiffusionPipeline`]のインスタンスを作成し、ダウンロードしたいパイプラインのチェックポイントを指定します。 この[`DiffusionPipeline`]はHugging Face Hubに保存されている任意の[チェックポイント](https://huggingface.co/models?library=diffusers&sort=downloads)を使用することができます。 この案内では、[`stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)チェックポイントでテキストから画像へ生成します。 <Tip warning={true}> [Stable Diffusion]モデルについては、モデルを実行する前にまず[ライセンス](https://huggingface.co/spaces/CompVis/stable-diffusion-license)を注意深くお読みください。🧨 Diffusers は、攻撃的または有害なコンテンツを防ぐために [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) を実装していますが、モデルの改良された画像生成機能により、潜在的に有害なコンテンツが生成される可能性があります。 </Tip> モデルを[`~DiffusionPipeline.from_pretrained`]メソッドでロードします: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` [`DiffusionPipeline`]は全てのモデリング、トークン化、スケジューリングコンポーネントをダウンロードしてキャッシュします。Stable Diffusionパイプラインは[`UNet2DConditionModel`]と[`PNDMScheduler`]などで構成されています: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` このモデルはおよそ14億個のパラメータで構成されているため、GPU上でパイプラインを実行することを強く推奨します。 PyTorchと同じように、ジェネレータオブジェクトをGPUに移すことができます: ```python >>> pipeline.to("cuda") ``` これで、文章を `pipeline` に渡して画像を生成し、ノイズ除去された画像にアクセスできるようになりました。デフォルトでは、画像出力は[`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class)オブジェクトでラップされます。 ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> `save`関数で画像を保存できます: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### ローカルパイプライン ローカルでパイプラインを使用することもできます。唯一の違いは、最初にウェイトをダウンロードする必要があることです: ```bash !git lfs install !git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` 保存したウェイトをパイプラインにロードします: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` これで、上のセクションと同じようにパイプラインを動かすことができます。 ### スケジューラの交換 スケジューラーによって、ノイズ除去のスピードや品質のトレードオフが異なります。どれが自分に最適かを知る最善の方法は、実際に試してみることです!Diffusers 🧨の主な機能の1つは、スケジューラを簡単に切り替えることができることです。例えば、デフォルトの[`PNDMScheduler`]を[`EulerDiscreteScheduler`]に置き換えるには、[`~diffusers.ConfigMixin.from_config`]メソッドでロードできます: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` 新しいスケジューラを使って画像を生成し、その違いに気づくかどうか試してみてください! 次のセクションでは、[`DiffusionPipeline`]を構成するコンポーネント(モデルとスケジューラ)を詳しく見て、これらのコンポーネントを使って猫の画像を生成する方法を学びます。 ## モデル ほとんどのモデルはノイズの多いサンプルを取り、各タイムステップで*残りのノイズ*を予測します(他のモデルは前のサンプルを直接予測するか、速度または[`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)を予測するように学習します)。モデルを混ぜて他の拡散システムを作ることもできます。 モデルは[`~ModelMixin.from_pretrained`]メソッドで開始されます。このメソッドはモデルをローカルにキャッシュするので、次にモデルをロードするときに高速になります。この案内では、[`UNet2DModel`]をロードします。これは基本的な画像生成モデルであり、猫画像で学習されたチェックポイントを使います: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` モデルのパラメータにアクセスするには、`model.config` を呼び出せます: ```py >>> model.config ``` モデル構成は🧊凍結🧊されたディクショナリであり、モデル作成後にこれらのパラメー タを変更することはできません。これは意図的なもので、最初にモデル・アーキテクチャを定義するために使用されるパラメータが同じままであることを保証します。他のパラメータは生成中に調整することができます。 最も重要なパラメータは以下の通りです: * sample_size`: 入力サンプルの高さと幅。 * `in_channels`: 入力サンプルの入力チャンネル数。 * down_block_types` と `up_block_types`: UNet アーキテクチャを作成するために使用されるダウンサンプリングブロックとアップサンプリングブロックのタイプ。 * block_out_channels`: ダウンサンプリングブロックの出力チャンネル数。逆順でアップサンプリングブロックの入力チャンネル数にも使用されます。 * layer_per_block`: 各 UNet ブロックに含まれる ResNet ブロックの数。 このモデルを生成に使用するには、ランダムな画像の形の正規分布を作成します。このモデルは複数のランダムな正規分布を受け取ることができるため`batch`軸を入れます。入力チャンネル数に対応する`channel`軸も必要です。画像の高さと幅に対応する`sample_size`軸を持つ必要があります: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` 画像生成には、ノイズの多い画像と `timestep` をモデルに渡します。`timestep`は入力画像がどの程度ノイズが多いかを示します。これは、モデルが拡散プロセスにおける自分の位置を決定するのに役立ちます。モデルの出力を得るには `sample` メソッドを使用します: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` しかし、実際の例を生成するには、ノイズ除去プロセスをガイドするスケジューラが必要です。次のセクションでは、モデルをスケジューラと組み合わせる方法を学びます。 ## スケジューラ スケジューラは、モデルの出力(この場合は `noisy_residual` )が与えられたときに、ノイズの多いサンプルからノイズの少ないサンプルへの移行を管理します。 <Tip> 🧨 Diffusersは拡散システムを構築するためのツールボックスです。[`DiffusionPipeline`]は事前に構築された拡散システムを使い始めるのに便利な方法ですが、独自のモデルとスケジューラコンポーネントを個別に選択してカスタム拡散システムを構築することもできます。 </Tip> この案内では、[`DDPMScheduler`]を[`~diffusers.ConfigMixin.from_config`]メソッドでインスタンス化します: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 スケジューラがどのようにコンフィギュレーションからインスタンス化されるかに注目してください。モデルとは異なり、スケジューラは学習可能な重みを持たず、パラメーターを持ちません! </Tip> 最も重要なパラメータは以下の通りです: * num_train_timesteps`: ノイズ除去処理の長さ、言い換えれば、ランダムな正規分布をデータサンプルに処理するのに必要なタイムステップ数です。 * `beta_schedule`: 生成とトレーニングに使用するノイズスケジュールのタイプ。 * `beta_start` と `beta_end`: ノイズスケジュールの開始値と終了値。 少しノイズの少ない画像を予測するには、スケジューラの [`~diffusers.DDPMScheduler.step`] メソッドに以下を渡します: モデルの出力、`timestep`、現在の `sample`。 ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` `less_noisy_sample`は次の`timestep`に渡すことができ、そこでさらにノイズが少なくなります! では、すべてをまとめて、ノイズ除去プロセス全体を視覚化してみましょう。 まず、ノイズ除去された画像を後処理して `PIL.Image` として表示する関数を作成します: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` ノイズ除去処理を高速化するために入力とモデルをGPUに移します: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` ここで、ノイズが少なくなったサンプルの残りのノイズを予測するノイズ除去ループを作成し、スケジューラを使ってさらにノイズの少ないサンプルを計算します: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` 何もないところから猫が生成されるのを、座って見てください!😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## 次のステップ このクイックツアーで、🧨ディフューザーを使ったクールな画像をいくつか作成できたと思います!次のステップとして * モデルをトレーニングまたは微調整については、[training](./tutorials/basic_training)チュートリアルを参照してください。 * 様々な使用例については、公式およびコミュニティの[training or finetuning scripts](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples)の例を参照してください。 * スケジューラのロード、アクセス、変更、比較については[Using different Schedulers](./using-diffusers/schedulers)ガイドを参照してください。 * プロンプトエンジニアリング、スピードとメモリの最適化、より高品質な画像を生成するためのヒントやトリックについては、[Stable Diffusion](./stable_diffusion)ガイドを参照してください。 * 🧨 Diffusers の高速化については、最適化された [PyTorch on a GPU](./optimization/fp16)のガイド、[Stable Diffusion on Apple Silicon (M1/M2)](./optimization/mps)と[ONNX Runtime](./optimization/onnx)を参照してください。
diffusers/docs/source/ja/quicktour.md/0
{ "file_path": "diffusers/docs/source/ja/quicktour.md", "repo_id": "diffusers", "token_count": 7859 }
132
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Apple Silicon (M1/M2)에서 Stable Diffusion을 사용하는 방법 Diffusers는 Stable Diffusion 추론을 위해 PyTorch `mps`를 사용해 Apple 실리콘과 호환됩니다. 다음은 Stable Diffusion이 있는 M1 또는 M2 컴퓨터를 사용하기 위해 따라야 하는 단계입니다. ## 요구 사항 - Apple silicon (M1/M2) 하드웨어의 Mac 컴퓨터. - macOS 12.6 또는 이후 (13.0 또는 이후 추천). - Python arm64 버전 - PyTorch 2.0(추천) 또는 1.13(`mps`를 지원하는 최소 버전). Yhttps://pytorch.org/get-started/locally/의 지침에 따라 `pip` 또는 `conda`로 설치할 수 있습니다. ## 추론 파이프라인 아래 코도는 익숙한 `to()` 인터페이스를 사용하여 `mps` 백엔드로 Stable Diffusion 파이프라인을 M1 또는 M2 장치로 이동하는 방법을 보여줍니다. <Tip warning={true}> **PyTorch 1.13을 사용 중일 때 ** 추가 일회성 전달을 사용하여 파이프라인을 "프라이밍"하는 것을 추천합니다. 이것은 발견한 이상한 문제에 대한 임시 해결 방법입니다. 첫 번째 추론 전달은 후속 전달와 약간 다른 결과를 생성합니다. 이 전달은 한 번만 수행하면 되며 추론 단계를 한 번만 사용하고 결과를 폐기해도 됩니다. </Tip> 이전 팁에서 설명한 것들을 포함한 여러 문제를 해결하므로 PyTorch 2 이상을 사용하는 것이 좋습니다. ```python # `hf auth login`에 로그인되어 있음을 확인 from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = pipe.to("mps") # 컴퓨터가 64GB 이하의 RAM 램일 때 추천 pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" # 처음 "워밍업" 전달 (위 설명을 보세요) _ = pipe(prompt, num_inference_steps=1) # 결과는 워밍업 전달 후의 CPU 장치의 결과와 일치합니다. image = pipe(prompt).images[0] ``` ## 성능 추천 M1/M2 성능은 메모리 압력에 매우 민감합니다. 시스템은 필요한 경우 자동으로 스왑되지만 스왑할 때 성능이 크게 저하됩니다. 특히 컴퓨터의 시스템 RAM이 64GB 미만이거나 512 × 512픽셀보다 큰 비표준 해상도에서 이미지를 생성하는 경우, 추론 중에 메모리 압력을 줄이고 스와핑을 방지하기 위해 *어텐션 슬라이싱*을 사용하는 것이 좋습니다. 어텐션 슬라이싱은 비용이 많이 드는 어텐션 작업을 한 번에 모두 수행하는 대신 여러 단계로 수행합니다. 일반적으로 범용 메모리가 없는 컴퓨터에서 ~20%의 성능 영향을 미치지만 64GB 이상이 아닌 경우 대부분의 Apple Silicon 컴퓨터에서 *더 나은 성능*이 관찰되었습니다. ```python pipeline.enable_attention_slicing() ``` ## Known Issues - 여러 프롬프트를 배치로 생성하는 것은 [충돌이 발생하거나 안정적으로 작동하지 않습니다](https://github.com/huggingface/diffusers/issues/363). 우리는 이것이 [PyTorch의 `mps` 백엔드](https://github.com/pytorch/pytorch/issues/84039)와 관련이 있다고 생각합니다. 이 문제는 해결되고 있지만 지금은 배치 대신 반복 방법을 사용하는 것이 좋습니다.
diffusers/docs/source/ko/optimization/mps.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/mps.md", "repo_id": "diffusers", "token_count": 2535 }
133
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🧨 Diffusers 학습 예시 이번 챕터에서는 다양한 유즈케이스들에 대한 예제 코드들을 통해 어떻게하면 효과적으로 `diffusers` 라이브러리를 사용할 수 있을까에 대해 알아보도록 하겠습니다. **Note**: 혹시 오피셜한 예시코드를 찾고 있다면, [여기](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)를 참고해보세요! 여기서 다룰 예시들은 다음을 지향합니다. - **손쉬운 디펜던시 설치** (Self-contained) : 여기서 사용될 예시 코드들의 디펜던시 패키지들은 전부 `pip install` 명령어를 통해 설치 가능한 패키지들입니다. 또한 친절하게 `requirements.txt` 파일에 해당 패키지들이 명시되어 있어, `pip install -r requirements.txt`로 간편하게 해당 디펜던시들을 설치할 수 있습니다. 예시: [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) - **손쉬운 수정** (Easy-to-tweak) : 저희는 가능하면 많은 유즈 케이스들을 제공하고자 합니다. 하지만 예시는 결국 그저 예시라는 점들 기억해주세요. 여기서 제공되는 예시코드들을 그저 단순히 복사-붙혀넣기하는 식으로는 여러분이 마주한 문제들을 손쉽게 해결할 순 없을 것입니다. 다시 말해 어느 정도는 여러분의 상황과 니즈에 맞춰 코드를 일정 부분 고쳐나가야 할 것입니다. 따라서 대부분의 학습 예시들은 데이터의 전처리 과정과 학습 과정에 대한 코드들을 함께 제공함으로써, 사용자가 니즈에 맞게 손쉬운 수정할 수 있도록 돕고 있습니다. - **입문자 친화적인** (Beginner-friendly) : 이번 챕터는 diffusion 모델과 `diffusers` 라이브러리에 대한 전반적인 이해를 돕기 위해 작성되었습니다. 따라서 diffusion 모델에 대한 최신 SOTA (state-of-the-art) 방법론들 가운데서도, 입문자에게는 많이 어려울 수 있다고 판단되면, 해당 방법론들은 여기서 다루지 않으려고 합니다. - **하나의 태스크만 포함할 것**(One-purpose-only): 여기서 다룰 예시들은 하나의 태스크만 포함하고 있어야 합니다. 물론 이미지 초해상화(super-resolution)와 이미지 보정(modification)과 같은 유사한 모델링 프로세스를 갖는 태스크들이 존재하겠지만, 하나의 예제에 하나의 태스크만을 담는 것이 더 이해하기 용이하다고 판단했기 때문입니다. 저희는 diffusion 모델의 대표적인 태스크들을 다루는 공식 예제를 제공하고 있습니다. *공식* 예제는 현재 진행형으로 `diffusers` 관리자들(maintainers)에 의해 관리되고 있습니다. 또한 저희는 앞서 정의한 저희의 철학을 엄격하게 따르고자 노력하고 있습니다. 혹시 여러분께서 이러한 예시가 반드시 필요하다고 생각되신다면, 언제든지 [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) 혹은 직접 [Pull Request](https://github.com/huggingface/diffusers/compare)를 주시기 바랍니다. 저희는 언제나 환영입니다! 학습 예시들은 다양한 태스크들에 대해 diffusion 모델을 사전학습(pretrain)하거나 파인튜닝(fine-tuning)하는 법을 보여줍니다. 현재 다음과 같은 예제들을 지원하고 있습니다. - [Unconditional Training](./unconditional_training) - [Text-to-Image Training](./text2image) - [Text Inversion](./text_inversion) - [Dreambooth](./dreambooth) memory-efficient attention 연산을 수행하기 위해, 가능하면 [xFormers](../optimization/xformers)를 설치해주시기 바랍니다. 이를 통해 학습 속도를 늘리고 메모리에 대한 부담을 줄일 수 있습니다. | Task | 🤗 Accelerate | 🤗 Datasets | Colab |---|---|:---:|:---:| | [**Unconditional Image Generation**](./unconditional_training) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | [**Text-to-Image fine-tuning**](./text2image) | ✅ | ✅ | | [**Textual Inversion**](./text_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | [**Training with LoRA**](./lora) | ✅ | - | - | | [**ControlNet**](./controlnet) | ✅ | ✅ | - | | [**InstructPix2Pix**](./instructpix2pix) | ✅ | ✅ | - | | [**Custom Diffusion**](./custom_diffusion) | ✅ | ✅ | - | ## 커뮤니티 공식 예제 외에도 **커뮤니티 예제** 역시 제공하고 있습니다. 해당 예제들은 우리의 커뮤니티에 의해 관리됩니다. 커뮤니티 예쩨는 학습 예시나 추론 파이프라인으로 구성될 수 있습니다. 이러한 커뮤니티 예시들의 경우, 앞서 정의했던 철학들을 좀 더 관대하게 적용하고 있습니다. 또한 이러한 커뮤니티 예시들의 경우, 모든 이슈들에 대한 유지보수를 보장할 수는 없습니다. 유용하긴 하지만, 아직은 대중적이지 못하거나 저희의 철학에 부합하지 않는 예제들은 [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) 폴더에 담기게 됩니다. **Note**: 커뮤니티 예제는 `diffusers`에 기여(contribution)를 희망하는 분들에게 [아주 좋은 기여 수단](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)이 될 수 있습니다. ## 주목할 사항들 최신 버전의 예시 코드들의 성공적인 구동을 보장하기 위해서는, 반드시 **소스코드를 통해 `diffusers`를 설치해야 하며,** 해당 예시 코드들이 요구하는 디펜던시들 역시 설치해야 합니다. 이를 위해 새로운 가상 환경을 구축하고 다음의 명령어를 실행해야 합니다. ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` 그 다음 `cd` 명령어를 통해 해당 예제 디렉토리에 접근해서 다음 명령어를 실행하면 됩니다. ```bash pip install -r requirements.txt ```
diffusers/docs/source/ko/training/overview.md/0
{ "file_path": "diffusers/docs/source/ko/training/overview.md", "repo_id": "diffusers", "token_count": 4741 }
134
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 다양한 Stable Diffusion 포맷 불러오기 Stable Diffusion 모델들은 학습 및 저장된 프레임워크와 다운로드 위치에 따라 다양한 형식으로 제공됩니다. 이러한 형식을 🤗 Diffusers에서 사용할 수 있도록 변환하면 추론을 위한 [다양한 스케줄러 사용](schedulers), 사용자 지정 파이프라인 구축, 추론 속도 최적화를 위한 다양한 기법과 방법 등 라이브러리에서 지원하는 모든 기능을 사용할 수 있습니다. <Tip> 우리는 `.safetensors` 형식을 추천합니다. 왜냐하면 기존의 pickled 파일은 취약하고 머신에서 코드를 실행할 때 악용될 수 있는 것에 비해 훨씬 더 안전합니다. (safetensors 불러오기 가이드에서 자세히 알아보세요.) </Tip> 이 가이드에서는 다른 Stable Diffusion 형식을 🤗 Diffusers와 호환되도록 변환하는 방법을 설명합니다. ## PyTorch .ckpt 체크포인트 또는 `.ckpt` 형식은 일반적으로 모델을 저장하는 데 사용됩니다. `.ckpt` 파일은 전체 모델을 포함하며 일반적으로 크기가 몇 GB입니다. `.ckpt` 파일을 [~StableDiffusionPipeline.from_ckpt] 메서드를 사용하여 직접 불러와서 사용할 수도 있지만, 일반적으로 두 가지 형식을 모두 사용할 수 있도록 `.ckpt` 파일을 🤗 Diffusers로 변환하는 것이 더 좋습니다. `.ckpt` 파일을 변환하는 두 가지 옵션이 있습니다. Space를 사용하여 체크포인트를 변환하거나 스크립트를 사용하여 `.ckpt` 파일을 변환합니다. ### Space로 변환하기 `.ckpt` 파일을 변환하는 가장 쉽고 편리한 방법은 SD에서 Diffusers로 스페이스를 사용하는 것입니다. Space의 지침에 따라 .ckpt 파일을 변환 할 수 있습니다. 이 접근 방식은 기본 모델에서는 잘 작동하지만 더 많은 사용자 정의 모델에서는 어려움을 겪을 수 있습니다. 빈 pull request나 오류를 반환하면 Space가 실패한 것입니다. 이 경우 스크립트를 사용하여 `.ckpt` 파일을 변환해 볼 수 있습니다. ### 스크립트로 변환하기 🤗 Diffusers는 `.ckpt`  파일 변환을 위한 변환 스크립트를 제공합니다. 이 접근 방식은 위의 Space보다 더 안정적입니다. 시작하기 전에 스크립트를 실행할 🤗 Diffusers의 로컬 클론(clone)이 있는지 확인하고 Hugging Face 계정에 로그인하여 pull request를 열고 변환된 모델을 허브에 푸시할 수 있도록 하세요. ```bash hf auth login ``` 스크립트를 사용하려면: 1. 변환하려는 `.ckpt`  파일이 포함된 리포지토리를 Git으로 클론(clone)합니다. 이 예제에서는 TemporalNet .ckpt 파일을 변환해 보겠습니다: ```bash git lfs install git clone https://huggingface.co/CiaraRowles/TemporalNet ``` 2. 체크포인트를 변환할 리포지토리에서 pull request를 엽니다: ```bash cd TemporalNet && git fetch origin refs/pr/13:pr/13 git checkout pr/13 ``` 3. 변환 스크립트에서 구성할 입력 인수는 여러 가지가 있지만 가장 중요한 인수는 다음과 같습니다: - `checkpoint_path`: 변환할 `.ckpt` 파일의 경로를 입력합니다. - `original_config_file`: 원래 아키텍처의 구성을 정의하는 YAML 파일입니다. 이 파일을 찾을 수 없는 경우 `.ckpt` 파일을 찾은 GitHub 리포지토리에서 YAML 파일을 검색해 보세요. - `dump_path`: 변환된 모델의 경로 예를 들어, TemporalNet 모델은 Stable Diffusion v1.5 및 ControlNet 모델이기 때문에 ControlNet 리포지토리에서 cldm_v15.yaml 파일을 가져올 수 있습니다. 4. 이제 스크립트를 실행하여 .ckpt 파일을 변환할 수 있습니다: ```bash python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet ``` 5. 변환이 완료되면 변환된 모델을 업로드하고 결과물을 pull request [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)를 테스트하세요! ```bash git push origin pr/13:refs/pr/13 ``` ## **Keras .pb or .h5** 🧪 이 기능은 실험적인 기능입니다. 현재로서는 Stable Diffusion v1 체크포인트만 변환 KerasCV Space에서 지원됩니다. [KerasCV](https://keras.io/keras_cv/)는 [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion)  v1 및 v2에 대한 학습을 지원합니다. 그러나 추론 및 배포를 위한 Stable Diffusion 모델 실험을 제한적으로 지원하는 반면, 🤗 Diffusers는 다양한 [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16) 등 이러한 목적을 위한 보다 완벽한 기능을 갖추고 있습니다. [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space 변환은 `.pb` 또는 `.h5`을 PyTorch로 변환한 다음, 추론할 수 있도록 [`StableDiffusionPipeline`] 으로 감싸서 준비합니다. 변환된 체크포인트는 Hugging Face Hub의 리포지토리에 저장됩니다. 예제로, textual-inversion으로 학습된 `[sayakpaul/textual-inversion-kerasio](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main)` 체크포인트를 변환해 보겠습니다. 이것은 특수 토큰  `<my-funny-cat>`을 사용하여 고양이로 이미지를 개인화합니다. KerasCV Space 변환에서는 다음을 입력할 수 있습니다: - Hugging Face 토큰. - UNet 과 텍스트 인코더(text encoder) 가중치를 다운로드하는 경로입니다. 모델을 어떻게 학습할지 방식에 따라, UNet과 텍스트 인코더의 경로를 모두 제공할 필요는 없습니다. 예를 들어, textual-inversion에는 텍스트 인코더의 임베딩만 필요하고 텍스트-이미지(text-to-image) 모델 변환에는 UNet 가중치만 필요합니다. - Placeholder 토큰은 textual-inversion 모델에만 적용됩니다. - `output_repo_prefix`는 변환된 모델이 저장되는 리포지토리의 이름입니다. **Submit** (제출) 버튼을 클릭하면 KerasCV 체크포인트가 자동으로 변환됩니다! 체크포인트가 성공적으로 변환되면, 변환된 체크포인트가 포함된 새 리포지토리로 연결되는 링크가 표시됩니다. 새 리포지토리로 연결되는 링크를 따라가면 변환된 모델을 사용해 볼 수 있는 추론 위젯이 포함된 모델 카드가 생성된 KerasCV Space 변환을 확인할 수 있습니다. 코드를 사용하여 추론을 실행하려면 모델 카드의 오른쪽 상단 모서리에 있는 **Use in Diffusers**  버튼을 클릭하여 예시 코드를 복사하여 붙여넣습니다: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline") ``` 그러면 다음과 같은 이미지를 생성할 수 있습니다: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline") pipeline.to("cuda") placeholder_token = "<my-funny-cat-token>" prompt = f"two {placeholder_token} getting married, photorealistic, high quality" image = pipeline(prompt, num_inference_steps=50).images[0] ``` ## **A1111 LoRA files** [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111)은 Stable Diffusion을 위해 널리 사용되는 웹 UI로, [Civitai](https://civitai.com/) 와 같은 모델 공유 플랫폼을 지원합니다. 특히 LoRA 기법으로 학습된 모델은 학습 속도가 빠르고 완전히 파인튜닝된 모델보다 파일 크기가 훨씬 작기 때문에 인기가 높습니다. 🤗 Diffusers는 [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]:를 사용하여 A1111 LoRA 체크포인트 불러오기를 지원합니다: ```py from diffusers import DiffusionPipeline, UniPCMultistepScheduler import torch pipeline = DiffusionPipeline.from_pretrained( "andite/anything-v4.0", torch_dtype=torch.float16, safety_checker=None ).to("cuda") pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config) ``` Civitai에서 LoRA 체크포인트를 다운로드하세요; 이 예제에서는  [Howls Moving Castle,Interior/Scenery LoRA (Ghibli Stlye)](https://civitai.com/models/14605?modelVersionId=19998) 체크포인트를 사용했지만, 어떤 LoRA 체크포인트든 자유롭게 사용해 보세요! ```bash !wget https://civitai.com/api/download/models/19998 -O howls_moving_castle.safetensors ``` 메서드를 사용하여 파이프라인에 LoRA 체크포인트를 불러옵니다: ```py pipeline.load_lora_weights(".", weight_name="howls_moving_castle.safetensors") ``` 이제 파이프라인을 사용하여 이미지를 생성할 수 있습니다: ```py prompt = "masterpiece, illustration, ultra-detailed, cityscape, san francisco, golden gate bridge, california, bay area, in the snow, beautiful detailed starry sky" negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" images = pipeline( prompt=prompt, negative_prompt=negative_prompt, width=512, height=512, num_inference_steps=25, num_images_per_prompt=4, generator=torch.manual_seed(0), ).images ``` 마지막으로, 디스플레이에 이미지를 표시하는 헬퍼 함수를 만듭니다: ```py from PIL import Image def image_grid(imgs, rows=2, cols=2): w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid image_grid(images) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/a1111-lora-sf.png" /> </div>
diffusers/docs/source/ko/using-diffusers/other-formats.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/other-formats.md", "repo_id": "diffusers", "token_count": 6828 }
135
<!--版权 2025 The HuggingFace Team。保留所有权利。 根据Apache许可证,版本2.0("许可证")授权;除非符合许可证,否则不得使用此文件。您可以在 http://www.apache.org/licenses/LICENSE-2.0 获取许可证的副本。 除非适用法律要求或书面同意,根据许可证分发的软件是按"原样"分发的,没有任何形式的明示或暗示的担保或条件。有关许可证的特定语言,请参阅许可证。 --> # 社区项目 欢迎来到社区项目。这个空间致力于展示我们充满活力的社区使用`diffusers`库创建的令人难以置信的工作和创新应用。 本节旨在: - 突出使用`diffusers`构建的多样化和鼓舞人心的项目 - 促进我们社区内的知识共享 - 提供如何利用`diffusers`的实际例子 探索愉快,感谢您成为Diffusers社区的一部分! <table> <tr> <th>项目名称</th> <th>描述</th> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/carson-katri/dream-textures"> dream-textures </a></td> <td>Stable Diffusion内置到Blender</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/megvii-research/HiDiffusion"> HiDiffusion </a></td> <td>仅通过添加一行代码即可提高扩散模型的分辨率和速度</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/lllyasviel/IC-Light"> IC-Light </a></td> <td>IC-Light是一个用于操作图像照明的项目</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/InstantID/InstantID"> InstantID </a></td> <td>InstantID:零样本身份保留生成在几秒钟内</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/Sanster/IOPaint"> IOPaint </a></td> <td>由SOTA AI模型驱动的图像修复工具。从您的图片中移除任何不需要的物体、缺陷、人物,或擦除并替换(由stable_diffusion驱动)图片上的任何内容。</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/bmaltais/kohya_ss"> Kohya </a></td> <td>Kohya的Stable Diffusion训练器的Gradio GUI</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/magic-research/magic-animate"> MagicAnimate </a></td> <td>MagicAnimate:使用扩散模型进行时间一致的人体图像动画</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/levihsu/OOTDiffusion"> OOTDiffusion </a></td> <td>基于潜在扩散的虚拟试穿控制</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/vladmandic/automatic"> SD.Next </a></td> <td>SD.Next: Stable Diffusion 和其他基于Diffusion的生成图像模型的高级实现</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/ashawkey/stable-dreamfusion"> stable-dreamfusion </a></td> <td>使用 NeRF + Diffusion 进行文本到3D & 图像到3D & 网格导出</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/HVision-NKU/StoryDiffusion"> StoryDiffusion </a></td> <td>StoryDiffusion 可以通过生成一致的图像和视频来创造一个神奇的故事。</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/cumulo-autumn/StreamDiffusion"> StreamDiffusion </a></td> <td>实时交互生成的管道级解决方案</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/Netwrck/stable-diffusion-server"> Stable Diffusion Server </a></td> <td>配置用于使用一个 stable diffusion 模型进行修复/生成/img2img 的服务器</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/suzukimain/auto_diffusers"> Model Search </a></td> <td>在 Civitai 和 Hugging Face 上搜索模型</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/beinsezii/skrample"> Skrample </a></td> <td>完全模块化的调度器功能,具有一流的 diffusers 集成。</td> </tr> </table>
diffusers/docs/source/zh/community_projects.md/0
{ "file_path": "diffusers/docs/source/zh/community_projects.md", "repo_id": "diffusers", "token_count": 2255 }
136
<!--版权所有 2025 The HuggingFace Team。保留所有权利。 根据Apache许可证2.0版("许可证")授权;除非符合许可证,否则不得使用此文件。您可以在以下位置获取许可证的副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,根据许可证分发的软件按"原样"分发,无任何明示或暗示的担保或条件。有关许可证下特定语言的权限和限制,请参阅许可证。 --> # 概述 > [!WARNING] > 模块化Diffusers正在积极开发中,其API可能会发生变化。 模块化Diffusers是一个统一的管道系统,通过*管道块*简化您的工作流程。 - 块是可重用的,您只需要为您的管道创建独特的块。 - 块可以混合搭配,以适应或为特定工作流程或多个工作流程创建管道。 模块化Diffusers文档的组织如下所示。 ## 快速开始 - 一个[快速开始](./quickstart)演示了如何使用模块化Diffusers实现一个示例工作流程。 ## ModularPipelineBlocks - [States](./modular_diffusers_states)解释了数据如何在块和[`ModularPipeline`]之间共享和通信。 - [ModularPipelineBlocks](./pipeline_block)是[`ModularPipeline`]最基本的单位,本指南向您展示如何创建一个。 - [SequentialPipelineBlocks](./sequential_pipeline_blocks)是一种类型的块,它将多个块链接起来,使它们一个接一个地运行,沿着链传递数据。本指南向您展示如何创建[`~modular_pipelines.SequentialPipelineBlocks`]以及它们如何连接和一起工作。 - [LoopSequentialPipelineBlocks](./loop_sequential_pipeline_blocks)是一种类型的块,它在循环中运行一系列块。本指南向您展示如何创建[`~modular_pipelines.LoopSequentialPipelineBlocks`]。 - [AutoPipelineBlocks](./auto_pipeline_blocks)是一种类型的块,它根据输入自动选择要运行的块。本指南向您展示如何创建[`~modular_pipelines.AutoPipelineBlocks`]。 ## ModularPipeline - [ModularPipeline](./modular_pipeline)向您展示如何创建并将管道块转换为可执行的[`ModularPipeline`]。 - [ComponentsManager](./components_manager)向您展示如何跨多个管道管理和重用组件。 - [Guiders](./guiders)向您展示如何在管道中使用不同的指导方法。
diffusers/docs/source/zh/modular_diffusers/overview.md/0
{ "file_path": "diffusers/docs/source/zh/modular_diffusers/overview.md", "repo_id": "diffusers", "token_count": 1430 }
137
<!--版权所有 2024 The HuggingFace Team。保留所有权利。 根据 Apache 许可证 2.0 版(“许可证”)授权;除非符合许可证,否则不得使用此文件。 您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,不附带任何明示或暗示的担保或条件。有关许可证的特定语言,请参阅许可证。 --> # 编译和卸载量化模型 优化模型通常涉及[推理速度](./fp16)和[内存使用](./memory)之间的权衡。例如,虽然[缓存](./cache)可以提高推理速度,但它也会增加内存消耗,因为它需要存储中间注意力层的输出。一种更平衡的优化策略结合了量化模型、[torch.compile](./fp16#torchcompile) 和各种[卸载方法](./memory#offloading)。 > [!TIP] > 查看 [torch.compile](./fp16#torchcompile) 指南以了解更多关于编译以及如何在此处应用的信息。例如,区域编译可以显著减少编译时间,而不会放弃任何加速。 对于图像生成,结合量化和[模型卸载](./memory#model-offloading)通常可以在质量、速度和内存之间提供最佳权衡。组卸载对于图像生成效果不佳,因为如果计算内核更快完成,通常不可能*完全*重叠数据传输。这会导致 CPU 和 GPU 之间的一些通信开销。 对于视频生成,结合量化和[组卸载](./memory#group-offloading)往往更好,因为视频模型更受计算限制。 下表提供了优化策略组合及其对 Flux 延迟和内存使用的影响的比较。 | 组合 | 延迟 (s) | 内存使用 (GB) | |---|---|---| | 量化 | 32.602 | 14.9453 | | 量化, torch.compile | 25.847 | 14.9448 | | 量化, torch.compile, 模型 CPU 卸载 | 32.312 | 12.2369 | <small>这些结果是在 Flux 上使用 RTX 4090 进行基准测试的。transformer 和 text_encoder 组件已量化。如果您有兴趣评估自己的模型,请参考[基准测试脚本](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d)。</small> 本指南将向您展示如何使用 [bitsandbytes](../quantization/bitsandbytes#torchcompile) 编译和卸载量化模型。确保您正在使用 [PyTorch nightly](https://pytorch.org/get-started/locally/) 和最新版本的 bitsandbytes。 ```bash pip install -U bitsandbytes ``` ## 量化和 torch.compile 首先通过[量化](../quantization/overview)模型来减少存储所需的内存,并[编译](./fp16#torchcompile)它以加速推理。 配置 [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `capture_dynamic_output_shape_ops = True` 以在编译 bitsandbytes 模型时处理动态输出。 ```py import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig torch._dynamo.config.capture_dynamic_output_shape_ops = True # 量化 pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, components_to_quantize=["transformer", "text_encoder_2"], ) pipeline = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ).to("cuda") # 编译 pipeline.transformer.to(memory_format=torch.channels_last) pipeline.transformer.compile(mode="max-autotune", fullgraph=True) pipeline(""" cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ ).images[0] ``` ## 量化、torch.compile 和卸载 除了量化和 torch.compile,如果您需要进一步减少内存使用,可以尝试卸载。卸载根据需要将各种层或模型组件从 CPU 移动到 GPU 进行计算。 在卸载期间配置 [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `cache_size_limit` 以避免过多的重新编译,并设置 `capture_dynamic_output_shape_ops = True` 以在编译 bitsandbytes 模型时处理动态输出。 <hfoptions id="offloading"> <hfoption id="model CPU offloading"> [模型 CPU 卸载](./memory#model-offloading) 将单个管道组件(如 transformer 模型)在需要计算时移动到 GPU。否则,它会被卸载到 CPU。 ```py import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig torch._dynamo.config.cache_size_limit = 1000 torch._dynamo.config.capture_dynamic_output_shape_ops = True # 量化 pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, components_to_quantize=["transformer", "text_encoder_2"], ) pipeline = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ).to("cuda") # 模型 CPU 卸载 pipeline.enable_model_cpu_offload() # 编译 pipeline.transformer.compile() pipeline( "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ).images[0] ``` </hfoption> <hfoption id="group offloading"> [组卸载](./memory#group-offloading) 将单个管道组件(如变换器模型)的内部层移动到 GPU 进行计算,并在不需要时将其卸载。同时,它使用 [CUDA 流](./memory#cuda-stream) 功能来预取下一层以执行。 通过重叠计算和数据传输,它比模型 CPU 卸载更快,同时还能节省内存。 ```py # pip install ftfy import torch from diffusers import AutoModel, DiffusionPipeline from diffusers.hooks import apply_group_offloading from diffusers.utils import export_to_video from diffusers.quantizers import PipelineQuantizationConfig from transformers import UMT5EncoderModel torch._dynamo.config.cache_size_limit = 1000 torch._dynamo.config.capture_dynamic_output_shape_ops = True # 量化 pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, components_to_quantize=["transformer", "text_encoder"], ) text_encoder = UMT5EncoderModel.from_pretrained( "Wan-AI/Wan2.1-T2V-14B-Diffusers", subfolder="text_encoder", torch_dtype=torch.bfloat16 ) pipeline = DiffusionPipeline.from_pretrained( "Wan-AI/Wan2.1-T2V-14B-Diffusers", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ).to("cuda") # 组卸载 onload_device = torch.device("cuda") offload_device = torch.device("cpu") pipeline.transformer.enable_group_offload( onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True, non_blocking=True ) pipeline.vae.enable_group_offload( onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True, non_blocking=True ) apply_group_offloading( pipeline.text_encoder, onload_device=onload_device, offload_type="leaf_level", use_stream=True, non_blocking=True ) # 编译 pipeline.transformer.compile() prompt = """ The camera rushes from far to near in a low-angle shot, revealing a white ferret on a log. It plays, leaps into the water, and emerges, as the camera zooms in for a close-up. Water splashes berry bushes nearby, while moss, snow, and leaves blanket the ground. Birch trees and a light blue sky frame the scene, with ferns in the foreground. Side lighting casts dynamic shadows and warm highlights. Medium composition, front view, low angle, with depth of field. """ negative_prompt = """ Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards """ output = pipeline( prompt=prompt, negative_prompt=negative_prompt, num_frames=81, guidance_scale=5.0, ).frames[0] export_to_video(output, "output.mp4", fps=16) ``` </hfoption> </hfoptions>
diffusers/docs/source/zh/optimization/speed-memory-optims.md/0
{ "file_path": "diffusers/docs/source/zh/optimization/speed-memory-optims.md", "repo_id": "diffusers", "token_count": 4097 }
138
<!--版权声明 2025 由 HuggingFace 团队所有。保留所有权利。 根据 Apache 许可证 2.0 版("许可证")授权;除非符合许可证要求,否则不得使用本文件。 您可以通过以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言权限和限制。 --> # 文本反转(Textual Inversion) [文本反转](https://hf.co/papers/2208.01618)是一种训练技术,仅需少量示例图像即可个性化图像生成模型。该技术通过学习和更新文本嵌入(新嵌入会绑定到提示中必须使用的特殊词汇)来匹配您提供的示例图像。 如果在显存有限的GPU上训练,建议在训练命令中启用`gradient_checkpointing`和`mixed_precision`参数。您还可以通过[xFormers](../optimization/xformers)使用内存高效注意力机制来减少内存占用。JAX/Flax训练也支持在TPU和GPU上进行高效训练,但不支持梯度检查点或xFormers。在配置与PyTorch相同的情况下,Flax训练脚本的速度至少应快70%! 本指南将探索[textual_inversion.py](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py)脚本,帮助您更熟悉其工作原理,并了解如何根据自身需求进行调整。 运行脚本前,请确保从源码安装库: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` 进入包含训练脚本的示例目录,并安装所需依赖: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/textual_inversion pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/textual_inversion pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate 是一个帮助您在多GPU/TPU或混合精度环境下训练的工具库。它会根据硬件和环境自动配置训练设置。查看🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour)了解更多。 </Tip> 初始化🤗 Accelerate环境: ```bash accelerate config ``` 要设置默认的🤗 Accelerate环境(不选择任何配置): ```bash accelerate config default ``` 如果您的环境不支持交互式shell(如notebook),可以使用: ```py from accelerate.utils import write_basic_config write_basic_config() ``` 最后,如果想在自定义数据集上训练模型,请参阅[创建训练数据集](create_dataset)指南,了解如何创建适用于训练脚本的数据集。 <Tip> 以下部分重点介绍训练脚本中需要理解的关键修改点,但未涵盖脚本所有细节。如需深入了解,可随时查阅[脚本源码](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py),如有疑问欢迎反馈。 </Tip> ## 脚本参数 训练脚本包含众多参数,便于您定制训练过程。所有参数及其说明都列在[`parse_args()`](https://github.com/huggingface/diffusers/blob/839c2a5ece0af4e75530cb520d77bc7ed8acf474/examples/textual_inversion/textual_inversion.py#L176)函数中。Diffusers为每个参数提供了默认值(如训练批次大小和学习率),但您可以通过训练命令自由调整这些值。 例如,将梯度累积步数增加到默认值1以上: ```bash accelerate launch textual_inversion.py \ --gradient_accumulation_steps=4 ``` 其他需要指定的基础重要参数包括: - `--pretrained_model_name_or_path`:Hub上的模型名称或本地预训练模型路径 - `--train_data_dir`:包含训练数据集(示例图像)的文件夹路径 - `--output_dir`:训练模型保存位置 - `--push_to_hub`:是否将训练好的模型推送至Hub - `--checkpointing_steps`:训练过程中保存检查点的频率;若训练意外中断,可通过在命令中添加`--resume_from_checkpoint`从该检查点恢复训练 - `--num_vectors`:学习嵌入的向量数量;增加此参数可提升模型效果,但会提高训练成本 - `--placeholder_token`:绑定学习嵌入的特殊词汇(推理时需在提示中使用该词) - `--initializer_token`:大致描述训练目标的单字词汇(如物体或风格) - `--learnable_property`:训练目标是学习新"风格"(如梵高画风)还是"物体"(如您的宠物狗) ## 训练脚本 与其他训练脚本不同,textual_inversion.py包含自定义数据集类[`TextualInversionDataset`](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L487),用于创建数据集。您可以自定义图像尺寸、占位符词汇、插值方法、是否裁剪图像等。如需修改数据集创建方式,可调整`TextualInversionDataset`类。 接下来,在[`main()`](https://github.com/huggingface/diffusers/blob/839c2a5ece0af4e75530cb520d77bc7ed8acf474/examples/textual_inversion/textual_inversion.py#L573)函数中可找到数据集预处理代码和训练循环。 脚本首先加载[tokenizer](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L616)、[scheduler和模型](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L622): ```py # 加载tokenizer if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # 加载scheduler和模型 noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) ``` 随后将特殊[占位符词汇](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L632)加入tokenizer,并调整嵌入层以适配新词汇。 接着,脚本通过`TextualInversionDataset`[创建数据集](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L716): ```py train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=(" ".join(tokenizer.convert_ids_to_tokens(placeholder_token_ids))), repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers ) ``` 最后,[训练循环](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L784)处理从预测噪声残差到更新特殊占位符词汇嵌入权重的所有流程。 如需深入了解训练循环工作原理,请参阅[理解管道、模型与调度器](../using-diffusers/write_own_pipeline)教程,该教程解析了去噪过程的基本模式。 ## 启动脚本 完成所有修改或确认默认配置后,即可启动训练脚本!🚀 本指南将下载[猫玩具](https://huggingface.co/datasets/diffusers/cat_toy_example)的示例图像并存储在目录中。当然,您也可以创建和使用自己的数据集(参见[创建训练数据集](create_dataset)指南)。 ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download( "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes" ) ``` 设置环境变量`MODEL_NAME`为Hub上的模型ID或本地模型路径,`DATA_DIR`为刚下载的猫图像路径。脚本会将以下文件保存至您的仓库: - `learned_embeds.bin`:与示例图像对应的学习嵌入向量 - `token_identifier.txt`:特殊占位符词汇 - `type_of_concept.txt`:训练概念类型("object"或"style") <Tip warning={true}> 在单块V100 GPU上完整训练约需1小时。 </Tip> 启动脚本前还有最后一步。如果想实时观察训练过程,可以定期保存生成图像。在训练命令中添加以下参数: ```bash --validation_prompt="A <cat-toy> train" --num_validation_images=4 --validation_steps=100 ``` <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export DATA_DIR="./cat" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" \ --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 \ --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` </hfoption> <hfoption id="Flax"> ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="./cat" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" \ --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 \ --scale_lr \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` </hfoption> </hfoptions> 训练完成后,可以像这样使用新模型进行推理: <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```py from diffusers import StableDiffusionPipeline import torch pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipeline.load_textual_inversion("sd-concepts-library/cat-toy") image = pipeline("A <cat-toy> train", num_inference_steps=50).images[0] image.save("cat-train.png") ``` </hfoption> <hfoption id="Flax"> Flax不支持[`~loaders.TextualInversionLoaderMixin.load_textual_inversion`]方法,但textual_inversion_flax.py脚本会在训练后[保存](https://github.com/huggingface/diffusers/blob/c0f058265161178f2a88849e92b37ffdc81f1dcc/examples/textual_inversion/textual_inversion_flax.py#L636C2-L636C2)学习到的嵌入作为模型的一部分。这意味着您可以像使用其他Flax模型一样进行推理: ```py import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline model_path = "path-to-your-trained-model" pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) prompt = "A <cat-toy> train" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # 分片输入和随机数生成器 params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("cat-train.png") ``` </hfoption> </hfoptions> ## 后续步骤 恭喜您成功训练了自己的文本反转模型!🎉 如需了解更多使用技巧,以下指南可能会有所帮助: - 学习如何[加载文本反转嵌入](../using-diffusers/loading_adapters),并将其用作负面嵌入 - 学习如何将[文本反转](textual_inversion_inference)应用于Stable Diffusion 1/2和Stable Diffusion XL的推理
diffusers/docs/source/zh/training/text_inversion.md/0
{ "file_path": "diffusers/docs/source/zh/training/text_inversion.md", "repo_id": "diffusers", "token_count": 6739 }
139
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import PIL.Image import torch from torchvision import transforms from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils.torch_utils import randn_tensor trans = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess(image): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] image = [trans(img.convert("RGB")) for img in image] image = torch.stack(image) return image class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() # make sure scheduler can always be converted to DDIM scheduler = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=unet, scheduler=scheduler) def check_inputs(self, strength): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) init_latents = image.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents print("add noise to latents at timestep", timestep) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.no_grad() def __call__( self, image: Union[torch.Tensor, PIL.Image.Image] = None, strength: float = 0.8, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, eta: float = 0.0, num_inference_steps: int = 50, use_clipped_model_output: Optional[bool] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. use_clipped_model_output (`bool`, *optional*, defaults to `None`): if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed downstream to the scheduler. So use `None` for schedulers which don't support this argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # 1. Check inputs. Raise error if not correct self.check_inputs(strength) # 2. Preprocess image image = preprocess(image) # 3. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device) latent_timestep = timesteps[:1].repeat(batch_size) # 4. Prepare latent variables latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator) image = latents # 5. Denoising loop for t in self.progress_bar(timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step( model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator, ).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=image)
diffusers/examples/community/ddim_noise_comparative_analysis.py/0
{ "file_path": "diffusers/examples/community/ddim_noise_comparative_analysis.py", "repo_id": "diffusers", "token_count": 3415 }
140
# Copyright 2025 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a single file implementation of LMD+. See README.md for examples. import ast import gc import inspect import math import warnings from collections.abc import Iterable from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.nn.functional as F from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.configuration_utils import FrozenDict from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.attention import Attention, GatedSelfAttentionDense from diffusers.models.attention_processor import AttnProcessor2_0 from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines import DiffusionPipeline from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained( ... "longlian/lmd_plus", ... custom_pipeline="llm_grounded_diffusion", ... custom_revision="main", ... variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe.enable_model_cpu_offload() >>> # Generate an image described by the prompt and >>> # insert objects described by text at the region defined by bounding boxes >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage" >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]] >>> phrases = ["a waterfall", "a modern high speed train"] >>> images = pipe( ... prompt=prompt, ... phrases=phrases, ... boxes=boxes, ... gligen_scheduled_sampling_beta=0.4, ... output_type="pil", ... num_inference_steps=50, ... lmd_guidance_kwargs={} ... ).images >>> images[0].save("./lmd_plus_generation.jpg") >>> # Generate directly from a text prompt and an LLM response >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage" >>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\""" [('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])] Background prompt: A beautiful forest with fall foliage Negative prompt: \""") >> images = pipe( ... prompt=prompt, ... negative_prompt=neg_prompt, ... phrases=phrases, ... boxes=boxes, ... gligen_scheduled_sampling_beta=0.4, ... output_type="pil", ... num_inference_steps=50, ... lmd_guidance_kwargs={} ... ).images >>> images[0].save("./lmd_plus_generation.jpg") images[0] ``` """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name # All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)] # Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`. DEFAULT_GUIDANCE_ATTN_KEYS = [ ("mid", 0, 0, 0), ("up", 1, 0, 0), ("up", 1, 1, 0), ("up", 1, 2, 0), ] def convert_attn_keys(key): """Convert the attention key from tuple format to the torch state format""" if key[0] == "mid": assert key[1] == 0, f"mid block only has one block but the index is {key[1]}" return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor" return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor" DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS] def scale_proportion(obj_box, H, W): # Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5". x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H) box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H) x_max, y_max = x_min + box_w, y_min + box_h x_min, y_min = max(x_min, 0), max(y_min, 0) x_max, y_max = min(x_max, W), min(y_max, H) return x_min, y_min, x_max, y_max # Adapted from the parent class `AttnProcessor2_0` class AttnProcessorWithHook(AttnProcessor2_0): def __init__( self, attn_processor_key, hidden_size, cross_attention_dim, hook=None, fast_attn=True, enabled=True, ): super().__init__() self.attn_processor_key = attn_processor_key self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.hook = hook self.fast_attn = fast_attn self.enabled = enabled def __call__( self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, scale: float = 1.0, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) args = () if USE_PEFT_BACKEND else (scale,) query = attn.to_q(hidden_states, *args) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states, *args) value = attn.to_v(encoder_hidden_states, *args) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads if (self.hook is not None and self.enabled) or not self.fast_attn: query_batch_dim = attn.head_to_batch_dim(query) key_batch_dim = attn.head_to_batch_dim(key) value_batch_dim = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask) if self.hook is not None and self.enabled: # Call the hook with query, key, value, and attention maps self.hook( self.attn_processor_key, query_batch_dim, key_batch_dim, value_batch_dim, attention_probs, ) if self.fast_attn: query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attention_mask is not None: # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) else: hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LLMGroundedDiffusionPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, ): r""" Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://huggingface.co/papers/2305.13655. This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control. Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. requires_safety_checker (bool): Whether a safety checker is needed for this pipeline. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] objects_text = "Objects: " bg_prompt_text = "Background prompt: " bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip() neg_prompt_text = "Negative prompt: " neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip() def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): # This is copied from StableDiffusionPipeline, with hook initizations for LMD+. super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Initialize the attention hooks for LLM-grounded Diffusion self.register_attn_hooks(unet) self._saved_attn = None def attn_hook(self, name, query, key, value, attention_probs): if name in DEFAULT_GUIDANCE_ATTN_KEYS: self._saved_attn[name] = attention_probs @classmethod def convert_box(cls, box, height, width): # box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max x_min, y_min = box[0] / width, box[1] / height w_box, h_box = box[2] / width, box[3] / height x_max, y_max = x_min + w_box, y_min + h_box return x_min, y_min, x_max, y_max @classmethod def _parse_response_with_negative(cls, text): if not text: raise ValueError("LLM response is empty") if cls.objects_text in text: text = text.split(cls.objects_text)[1] text_split = text.split(cls.bg_prompt_text_no_trailing_space) if len(text_split) == 2: gen_boxes, text_rem = text_split else: raise ValueError(f"LLM response is incomplete: {text}") text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space) if len(text_split) == 2: bg_prompt, neg_prompt = text_split else: raise ValueError(f"LLM response is incomplete: {text}") try: gen_boxes = ast.literal_eval(gen_boxes) except SyntaxError as e: # Sometimes the response is in plain text if "No objects" in gen_boxes or gen_boxes.strip() == "": gen_boxes = [] else: raise e bg_prompt = bg_prompt.strip() neg_prompt = neg_prompt.strip() # LLM may return "None" to mean no negative prompt provided. if neg_prompt == "None": neg_prompt = "" return gen_boxes, bg_prompt, neg_prompt @classmethod def parse_llm_response(cls, response, canvas_height=512, canvas_width=512): # Infer from spec gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response) gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0]) phrases = [name for name, _ in gen_boxes] boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes] return phrases, boxes, bg_prompt, neg_prompt def check_inputs( self, prompt, height, width, callback_steps, phrases, boxes, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, phrase_indices=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt is None and phrase_indices is None: raise ValueError("If the prompt is None, the phrase_indices cannot be None") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if len(phrases) != len(boxes): raise ValueError( "length of `phrases` and `boxes` has to be same, but" f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}" ) def register_attn_hooks(self, unet): """Registering hooks to obtain the attention maps for guidance""" attn_procs = {} for name in unet.attn_processors.keys(): # Only obtain the queries and keys from cross-attention if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"): # Keep the same attn_processors for self-attention (no hooks for self-attention) attn_procs[name] = unet.attn_processors[name] continue cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] attn_procs[name] = AttnProcessorWithHook( attn_processor_key=name, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, hook=self.attn_hook, fast_attn=True, # Not enabled by default enabled=False, ) unet.set_attn_processor(attn_procs) def enable_fuser(self, enabled=True): for module in self.unet.modules(): if isinstance(module, GatedSelfAttentionDense): module.enabled = enabled def enable_attn_hook(self, enabled=True): for module in self.unet.attn_processors.values(): if isinstance(module, AttnProcessorWithHook): module.enabled = enabled def get_token_map(self, prompt, padding="do_not_pad", verbose=False): """Get a list of mapping: prompt index to str (prompt in a list of token str)""" fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np") input_ids = fg_prompt_tokens["input_ids"][0] token_map = [] for ind, item in enumerate(input_ids.tolist()): token = self.tokenizer._convert_id_to_token(item) if verbose: logger.info(f"{ind}, {token} ({item})") token_map.append(token) return token_map def get_phrase_indices( self, prompt, phrases, token_map=None, add_suffix_if_not_found=False, verbose=False, ): for obj in phrases: # Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix if obj not in prompt: prompt += "| " + obj if token_map is None: # We allow using a pre-computed token map. token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose) token_map_str = " ".join(token_map) phrase_indices = [] for obj in phrases: phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose) # Remove <bos> and <eos> in substr phrase_token_map = phrase_token_map[1:-1] phrase_token_map_len = len(phrase_token_map) phrase_token_map_str = " ".join(phrase_token_map) if verbose: logger.info( "Full str:", token_map_str, "Substr:", phrase_token_map_str, "Phrase:", phrases, ) # Count the number of token before substr # The substring comes with a trailing space that needs to be removed by minus one in the index. obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" ")) obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len)) phrase_indices.append(obj_position) if add_suffix_if_not_found: return phrase_indices, prompt return phrase_indices def add_ca_loss_per_attn_map_to_loss( self, loss, attn_map, object_number, bboxes, phrase_indices, fg_top_p=0.2, bg_top_p=0.2, fg_weight=1.0, bg_weight=1.0, ): # b is the number of heads, not batch b, i, j = attn_map.shape H = W = int(math.sqrt(i)) for obj_idx in range(object_number): obj_loss = 0 mask = torch.zeros(size=(H, W), device="cuda") obj_boxes = bboxes[obj_idx] # We support two level (one box per phrase) and three level (multiple boxes per phrase) if not isinstance(obj_boxes[0], Iterable): obj_boxes = [obj_boxes] for obj_box in obj_boxes: # x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H) x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W) mask[y_min:y_max, x_min:x_max] = 1 for obj_position in phrase_indices[obj_idx]: # Could potentially optimize to compute this for loop in batch. # Could crop the ref cross attention before saving to save memory. ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W) # shape: (b, H * W) ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W) k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1) k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1) mask_1d = mask.view(1, -1) # Max-based loss function # Take the topk over spatial dimension, and then take the sum over heads dim # The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own. obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight loss += obj_loss / len(phrase_indices[obj_idx]) return loss def compute_ca_loss( self, saved_attn, bboxes, phrase_indices, guidance_attn_keys, verbose=False, **kwargs, ): """ The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss. `AttnProcessor` will put attention maps into the `save_attn_to_dict`. `index` is the timestep. `ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token). `ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens. """ loss = torch.tensor(0).float().cuda() object_number = len(bboxes) if object_number == 0: return loss for attn_key in guidance_attn_keys: # We only have 1 cross attention for mid. attn_map_integrated = saved_attn[attn_key] if not attn_map_integrated.is_cuda: attn_map_integrated = attn_map_integrated.cuda() # Example dimension: [20, 64, 77] attn_map = attn_map_integrated.squeeze(dim=0) loss = self.add_ca_loss_per_attn_map_to_loss( loss, attn_map, object_number, bboxes, phrase_indices, **kwargs ) num_attn = len(guidance_attn_keys) if num_attn > 0: loss = loss / (object_number * num_attn) return loss @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, gligen_scheduled_sampling_beta: float = 0.3, phrases: List[str] = None, boxes: List[List[float]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, lmd_guidance_kwargs: Optional[Dict[str, Any]] = {}, phrase_indices: Optional[List[int]] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. phrases (`List[str]`): The phrases to guide what to include in each of the regions defined by the corresponding `boxes`. There should only be one phrase per bounding box. boxes (`List[List[float]]`): The bounding boxes that identify rectangular regions of the image that are going to be filled with the content described by the corresponding `phrases`. Each rectangular box is defined as a `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. gligen_scheduled_sampling_beta (`float`, defaults to 0.3): Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://huggingface.co/papers/2301.07093). Scheduled Sampling factor is only varied for scheduled sampling during inference for improved quality and controllability. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. lmd_guidance_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details. phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, phrases, boxes, negative_prompt, prompt_embeds, negative_prompt_embeds, phrase_indices, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 if phrase_indices is None: phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True) elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) if phrase_indices is None: phrase_indices = [] prompt_parsed = [] for prompt_item in prompt: ( phrase_indices_parsed_item, prompt_parsed_item, ) = self.get_phrase_indices(prompt_item, add_suffix_if_not_found=True) phrase_indices.append(phrase_indices_parsed_item) prompt_parsed.append(prompt_parsed_item) prompt = prompt_parsed else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) cond_prompt_embeds = prompt_embeds # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None: image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 5.1 Prepare GLIGEN variables max_objs = 30 if len(boxes) > max_objs: warnings.warn( f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", FutureWarning, ) phrases = phrases[:max_objs] boxes = boxes[:max_objs] n_objs = len(boxes) if n_objs: # prepare batched input to the PositionNet (boxes, phrases, mask) # Get tokens for phrases from pre-trained CLIPTokenizer tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device) # For the token, we use the same pre-trained text encoder # to obtain its text feature _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output # For each entity, described in phrases, is denoted with a bounding box, # we represent the location information as (xmin,ymin,xmax,ymax) cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) if n_objs: cond_boxes[:n_objs] = torch.tensor(boxes) text_embeddings = torch.zeros( max_objs, self.unet.config.cross_attention_dim, device=device, dtype=self.text_encoder.dtype, ) if n_objs: text_embeddings[:n_objs] = _text_embeddings # Generate a mask for each object that is entity described by phrases masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) masks[:n_objs] = 1 repeat_batch = batch_size * num_images_per_prompt cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone() text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone() masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone() if do_classifier_free_guidance: repeat_batch = repeat_batch * 2 cond_boxes = torch.cat([cond_boxes] * 2) text_embeddings = torch.cat([text_embeddings] * 2) masks = torch.cat([masks] * 2) masks[: repeat_batch // 2] = 0 if cross_attention_kwargs is None: cross_attention_kwargs = {} cross_attention_kwargs["gligen"] = { "boxes": cond_boxes, "positive_embeddings": text_embeddings, "masks": masks, } num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps)) self.enable_fuser(True) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None loss_attn = torch.tensor(10000.0) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Scheduled sampling if i == num_grounding_steps: self.enable_fuser(False) if latents.shape[1] != 4: latents = torch.randn_like(latents[:, :4]) # 7.1 Perform LMD guidance if boxes: latents, loss_attn = self.latent_lmd_guidance( cond_prompt_embeds, index=i, boxes=boxes, phrase_indices=phrase_indices, t=t, latents=latents, loss=loss_attn, **lmd_guidance_kwargs, ) # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.set_grad_enabled(True) def latent_lmd_guidance( self, cond_embeddings, index, boxes, phrase_indices, t, latents, loss, *, loss_scale=20, loss_threshold=5.0, max_iter=[3] * 5 + [2] * 5 + [1] * 5, guidance_timesteps=15, cross_attention_kwargs=None, guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS, verbose=False, clear_cache=False, unet_additional_kwargs={}, guidance_callback=None, **kwargs, ): scheduler, unet = self.scheduler, self.unet iteration = 0 if index < guidance_timesteps: if isinstance(max_iter, list): max_iter = max_iter[index] if verbose: logger.info( f"time index {index}, loss: {loss.item() / loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}" ) try: self.enable_attn_hook(enabled=True) while ( loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps ): self._saved_attn = {} latents.requires_grad_(True) latent_model_input = latents latent_model_input = scheduler.scale_model_input(latent_model_input, t) unet( latent_model_input, t, encoder_hidden_states=cond_embeddings, cross_attention_kwargs=cross_attention_kwargs, **unet_additional_kwargs, ) # update latents with guidance loss = ( self.compute_ca_loss( saved_attn=self._saved_attn, bboxes=boxes, phrase_indices=phrase_indices, guidance_attn_keys=guidance_attn_keys, verbose=verbose, **kwargs, ) * loss_scale ) if torch.isnan(loss): raise RuntimeError("**Loss is NaN**") # This callback allows visualizations. if guidance_callback is not None: guidance_callback(self, latents, loss, iteration, index) self._saved_attn = None grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0] latents.requires_grad_(False) # Scaling with classifier guidance alpha_prod_t = scheduler.alphas_cumprod[t] # Classifier guidance: https://huggingface.co/papers/2105.05233 # DDIM: https://huggingface.co/papers/2010.02502 scale = (1 - alpha_prod_t) ** (0.5) latents = latents - scale * grad_cond iteration += 1 if clear_cache: gc.collect() torch.cuda.empty_cache() if verbose: logger.info( f"time index {index}, loss: {loss.item() / loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}" ) finally: self.enable_attn_hook(enabled=False) return latents, loss # Below are methods copied from StableDiffusionPipeline # The design choice of not inheriting from StableDiffusionPipeline is discussed here: https://github.com/huggingface/diffusers/pull/5993#issuecomment-1834258517 # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True, ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale def guidance_scale(self): return self._guidance_scale @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale def guidance_rescale(self): return self._guidance_rescale @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs def cross_attention_kwargs(self): return self._cross_attention_kwargs @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps def num_timesteps(self): return self._num_timesteps
diffusers/examples/community/llm_grounded_diffusion.py/0
{ "file_path": "diffusers/examples/community/llm_grounded_diffusion.py", "repo_id": "diffusers", "token_count": 32839 }
141
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Note: # This pipeline relies on a "hack" discovered by the community that allows # the generation of videos given an input image with AnimateDiff. It works # by creating a copy of the image `num_frames` times and progressively adding # more noise to the image based on the strength and latent interpolation method. import inspect from types import FunctionType from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.models.unet_motion_model import MotionAdapter from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import MotionAdapter, DiffusionPipeline, DDIMScheduler >>> from diffusers.utils import export_to_gif, load_image >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE" >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") >>> pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter, custom_pipeline="pipeline_animatediff_img2video").to("cuda") >>> pipe.scheduler = pipe.scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1) >>> image = load_image("snail.png") >>> output = pipe(image=image, prompt="A snail moving on the ground", strength=0.8, latent_interpolation_method="slerp") >>> frames = output.frames[0] >>> export_to_gif(frames, "animation.gif") ``` """ def lerp( v0: torch.Tensor, v1: torch.Tensor, t: Union[float, torch.Tensor], ) -> torch.Tensor: r""" Linear Interpolation between two tensors. Args: v0 (`torch.Tensor`): First tensor. v1 (`torch.Tensor`): Second tensor. t: (`float` or `torch.Tensor`): Interpolation factor. """ t_is_float = False input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() if isinstance(t, torch.Tensor): t = t.cpu().numpy() else: t_is_float = True t = np.array([t], dtype=v0.dtype) t = t[..., None] v0 = v0[None, ...] v1 = v1[None, ...] v2 = (1 - t) * v0 + t * v1 if t_is_float and v0.ndim > 1: assert v2.shape[0] == 1 v2 = np.squeeze(v2, axis=0) v2 = torch.from_numpy(v2).to(input_device) return v2 def slerp( v0: torch.Tensor, v1: torch.Tensor, t: Union[float, torch.Tensor], DOT_THRESHOLD: float = 0.9995, ) -> torch.Tensor: r""" Spherical Linear Interpolation between two tensors. Args: v0 (`torch.Tensor`): First tensor. v1 (`torch.Tensor`): Second tensor. t: (`float` or `torch.Tensor`): Interpolation factor. DOT_THRESHOLD (`float`): Dot product threshold exceeding which linear interpolation will be used because input tensors are close to parallel. """ t_is_float = False input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() if isinstance(t, torch.Tensor): t = t.cpu().numpy() else: t_is_float = True t = np.array([t], dtype=v0.dtype) dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) if np.abs(dot) > DOT_THRESHOLD: # v0 and v1 are close to parallel, so use linear interpolation instead v2 = lerp(v0, v1, t) else: theta_0 = np.arccos(dot) sin_theta_0 = np.sin(theta_0) theta_t = theta_0 * t sin_theta_t = np.sin(theta_t) s0 = np.sin(theta_0 - theta_t) / sin_theta_0 s1 = sin_theta_t / sin_theta_0 s0 = s0[..., None] s1 = s1[..., None] v0 = v0[None, ...] v1 = v1[None, ...] v2 = s0 * v0 + s1 * v1 if t_is_float and v0.ndim > 1: assert v2.shape[0] == 1 v2 = np.squeeze(v2, axis=0) v2 = torch.from_numpy(v2).to(input_device) return v2 # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid def tensor2vid(video: torch.Tensor, processor, output_type="np"): batch_size, channels, num_frames, height, width = video.shape outputs = [] for batch_idx in range(batch_size): batch_vid = video[batch_idx].permute(1, 0, 2, 3) batch_output = processor.postprocess(batch_vid, output_type) outputs.append(batch_output) if output_type == "np": outputs = np.stack(outputs) elif output_type == "pt": outputs = torch.stack(outputs) elif not output_type == "pil": raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") return outputs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class AnimateDiffImgToVideoPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, ): r""" Pipeline for image-to-video generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. motion_adapter ([`MotionAdapter`]): A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _optional_components = ["feature_extractor", "image_encoder"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, motion_adapter: MotionAdapter, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt ): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack( [single_negative_image_embeds] * num_images_per_prompt, dim=0 ) if self.do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: image_embeds = ip_adapter_image_embeds return image_embeds # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents batch_size, channels, num_frames, height, width = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = ( image[None, :] .reshape( ( batch_size, num_frames, -1, ) + image.shape[2:] ) .permute(0, 2, 1, 3, 4) ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 video = video.float() return video # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, latent_interpolation_method=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if latent_interpolation_method is not None: if latent_interpolation_method not in ["lerp", "slerp"] and not isinstance( latent_interpolation_method, FunctionType ): raise ValueError( "`latent_interpolation_method` must be one of `lerp`, `slerp` or a Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]" ) def prepare_latents( self, image, strength, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None, latent_interpolation_method="slerp", ): shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): if len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = latents * self.scheduler.init_noise_sigma if latent_interpolation_method == "lerp": def latent_cls(v0, v1, index): return lerp(v0, v1, index / num_frames * (1 - strength)) elif latent_interpolation_method == "slerp": def latent_cls(v0, v1, index): return slerp(v0, v1, index / num_frames * (1 - strength)) else: latent_cls = latent_interpolation_method for i in range(num_frames): latents[:, :, i, :, :] = latent_cls(latents[:, :, i, :, :], init_latents, i) else: if shape != latents.shape: # [B, C, F, H, W] raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}") latents = latents.to(device, dtype=dtype) return latents @torch.no_grad() def __call__( self, image: PipelineImageInput, prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_frames: int = 16, num_inference_steps: int = 50, timesteps: Optional[List[int]] = None, guidance_scale: float = 7.5, strength: float = 0.8, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: Optional[int] = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, latent_interpolation_method: Union[str, Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]] = "slerp", ): r""" The call function to the pipeline for generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated video. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated video. num_frames (`int`, *optional*, defaults to 16): The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds amounts to 2 seconds of video. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality videos at the expense of slower inference. strength (`float`, *optional*, defaults to 0.8): Higher strength leads to more differences between original image and generated video. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. Latents should be of shape `(batch_size, num_channel, num_frames, height, width)`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`AnimateDiffImgToVideoPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. latent_interpolation_method (`str` or `Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]]`, *optional*): Must be one of "lerp", "slerp" or a callable that takes in a random noisy latent, image latent and a frame index as input and returns an initial latent for sampling. Examples: Returns: [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_steps=callback_steps, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, latent_interpolation_method=latent_interpolation_method, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt ) # 4. Preprocess image image = self.image_processor.preprocess(image, height=height, width=width) # 5. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( image=image, strength=strength, batch_size=batch_size * num_videos_per_prompt, num_channels_latents=num_channels_latents, num_frames=num_frames, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents, latent_interpolation_method=latent_interpolation_method, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Add image embeds for IP-Adapter added_cond_kwargs = ( {"image_embeds": image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None ) # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == "latent": return AnimateDiffPipelineOutput(frames=latents) # 10. Post-processing if output_type == "latent": video = latents else: video_tensor = self.decode_latents(latents) video = tensor2vid(video_tensor, self.image_processor, output_type=output_type) # 11. Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video)
diffusers/examples/community/pipeline_animatediff_img2video.py/0
{ "file_path": "diffusers/examples/community/pipeline_animatediff_img2video.py", "repo_id": "diffusers", "token_count": 20617 }
142
import inspect import os import numpy as np import torch import torch.nn.functional as nnf from PIL import Image from torch.optim.adam import Adam from tqdm import tqdm from diffusers import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput def retrieve_timesteps( scheduler, num_inference_steps=None, device=None, timesteps=None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class NullTextPipeline(StableDiffusionPipeline): def get_noise_pred(self, latents, t, context): latents_input = torch.cat([latents] * 2) guidance_scale = 7.5 noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"] noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) latents = self.prev_step(noise_pred, t, latents) return latents def get_noise_pred_single(self, latents, t, context): noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"] return noise_pred @torch.no_grad() def image2latent(self, image_path): image = Image.open(image_path).convert("RGB") image = np.array(image) image = torch.from_numpy(image).float() / 127.5 - 1 image = image.permute(2, 0, 1).unsqueeze(0).to(self.device) latents = self.vae.encode(image)["latent_dist"].mean latents = latents * 0.18215 return latents @torch.no_grad() def latent2image(self, latents): latents = 1 / 0.18215 * latents.detach() image = self.vae.decode(latents)["sample"].detach() image = self.processor.postprocess(image, output_type="pil")[0] return image def prev_step(self, model_output, timestep, sample): prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps alpha_prod_t = self.scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction return prev_sample def next_step(self, model_output, timestep, sample): timestep, next_timestep = ( min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999), timestep, ) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep] beta_prod_t = 1 - alpha_prod_t next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction return next_sample def null_optimization(self, latents, context, num_inner_steps, epsilon): uncond_embeddings, cond_embeddings = context.chunk(2) uncond_embeddings_list = [] latent_cur = latents[-1] bar = tqdm(total=num_inner_steps * self.num_inference_steps) for i in range(self.num_inference_steps): uncond_embeddings = uncond_embeddings.clone().detach() uncond_embeddings.requires_grad = True optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0)) latent_prev = latents[len(latents) - i - 2] t = self.scheduler.timesteps[i] with torch.no_grad(): noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings) for j in range(num_inner_steps): noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings) noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond) latents_prev_rec = self.prev_step(noise_pred, t, latent_cur) loss = nnf.mse_loss(latents_prev_rec, latent_prev) optimizer.zero_grad() loss.backward() optimizer.step() loss_item = loss.item() bar.update() if loss_item < epsilon + i * 2e-5: break for j in range(j + 1, num_inner_steps): bar.update() uncond_embeddings_list.append(uncond_embeddings[:1].detach()) with torch.no_grad(): context = torch.cat([uncond_embeddings, cond_embeddings]) latent_cur = self.get_noise_pred(latent_cur, t, context) bar.close() return uncond_embeddings_list @torch.no_grad() def ddim_inversion_loop(self, latent, context): self.scheduler.set_timesteps(self.num_inference_steps) _, cond_embeddings = context.chunk(2) all_latent = [latent] latent = latent.clone().detach() with torch.no_grad(): for i in range(0, self.num_inference_steps): t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1] noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"] latent = self.next_step(noise_pred, t, latent) all_latent.append(latent) return all_latent def get_context(self, prompt): uncond_input = self.tokenizer( [""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt" ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] text_input = self.tokenizer( [prompt], padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] context = torch.cat([uncond_embeddings, text_embeddings]) return context def invert( self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50 ): self.num_inference_steps = num_inference_steps context = self.get_context(prompt) latent = self.image2latent(image_path) ddim_latents = self.ddim_inversion_loop(latent, context) if os.path.exists(image_path + ".pt"): uncond_embeddings = torch.load(image_path + ".pt") else: uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon) uncond_embeddings = torch.stack(uncond_embeddings, 0) torch.save(uncond_embeddings, image_path + ".pt") return ddim_latents[-1], uncond_embeddings @torch.no_grad() def __call__( self, prompt, uncond_embeddings, inverted_latent, num_inference_steps: int = 50, timesteps=None, guidance_scale=7.5, negative_prompt=None, num_images_per_prompt=1, generator=None, latents=None, prompt_embeds=None, negative_prompt_embeds=None, output_type="pil", ): self._guidance_scale = guidance_scale # 0. Default height and width to unet height = self.unet.config.sample_size * self.vae_scale_factor width = self.unet.config.sample_size * self.vae_scale_factor # to deal with lora scaling and other possible forward hook callback_steps = None # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameter device = self._execution_device # 3. Encode input prompt prompt_embeds, _ = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) latents = inverted_latent with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"] noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] progress_bar.update() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] else: image = latents image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=[True] * image.shape[0] ) # Offload all models self.maybe_free_model_hooks() return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
diffusers/examples/community/pipeline_null_text_inversion.py/0
{ "file_path": "diffusers/examples/community/pipeline_null_text_inversion.py", "repo_id": "diffusers", "token_count": 5423 }
143
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker pipe1_model_id = "CompVis/stable-diffusion-v1-1" pipe2_model_id = "CompVis/stable-diffusion-v1-2" pipe3_model_id = "CompVis/stable-diffusion-v1-3" pipe4_model_id = "CompVis/stable-diffusion-v1-4" class StableDiffusionComparisonPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for parallel comparison of Stable Diffusion v1-v4 This pipeline inherits from DiffusionPipeline and depends on the use of an Auth Token for downloading pre-trained checkpoints from Hugging Face Hub. If using Hugging Face Hub, pass the Model ID for Stable Diffusion v1.4 as the previous 3 checkpoints will be loaded automatically. Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionMegaSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super()._init_() self.pipe1 = StableDiffusionPipeline.from_pretrained(pipe1_model_id) self.pipe2 = StableDiffusionPipeline.from_pretrained(pipe2_model_id) self.pipe3 = StableDiffusionPipeline.from_pretrained(pipe3_model_id) self.pipe4 = StableDiffusionPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, requires_safety_checker=requires_safety_checker, ) self.register_modules(pipeline1=self.pipe1, pipeline2=self.pipe2, pipeline3=self.pipe3, pipeline4=self.pipe4) @property def layers(self) -> Dict[str, Any]: return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")} @torch.no_grad() def text2img_sd1_1( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): return self.pipe1( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) @torch.no_grad() def text2img_sd1_2( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): return self.pipe2( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) @torch.no_grad() def text2img_sd1_3( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): return self.pipe3( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) @torch.no_grad() def text2img_sd1_4( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): return self.pipe4( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) @torch.no_grad() def _call_( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. This function will generate 4 results as part of running all the 4 pipelines for SD1.1-1.4 together in a serial-processing, parallel-invocation fashion. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, optional, defaults to 512): The height in pixels of the generated image. width (`int`, optional, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, optional, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, optional, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. eta (`float`, optional, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, optional): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, optional): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, optional, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, optional, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ device = "cuda" if torch.cuda.is_available() else "cpu" self.to(device) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}.") # Get first result from Stable Diffusion Checkpoint v1.1 res1 = self.text2img_sd1_1( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) # Get first result from Stable Diffusion Checkpoint v1.2 res2 = self.text2img_sd1_2( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) # Get first result from Stable Diffusion Checkpoint v1.3 res3 = self.text2img_sd1_3( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) # Get first result from Stable Diffusion Checkpoint v1.4 res4 = self.text2img_sd1_4( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([res1[0], res2[0], res3[0], res4[0]])
diffusers/examples/community/stable_diffusion_comparison.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_comparison.py", "repo_id": "diffusers", "token_count": 7371 }
144
# Copyright 2025 Peter Willemsen <peter@codebuffet.co>. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def make_transparency_mask(size, overlap_pixels, remove_borders=[]): size_x = size[0] - overlap_pixels * 2 size_y = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels mask = np.ones((size_y, size_x), dtype=np.uint8) * 255 mask = np.pad(mask, mode="linear_ramp", pad_width=overlap_pixels, end_values=0) if "l" in remove_borders: mask = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: mask = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: mask = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: mask = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def clamp(n, smallest, largest): return max(smallest, min(n, largest)) def clamp_rect(rect: [int], min: [int], max: [int]): return ( clamp(rect[0], min[0], max[0]), clamp(rect[1], min[1], max[1]), clamp(rect[2], min[0], max[0]), clamp(rect[3], min[1], max[1]), ) def add_overlap_rect(rect: [int], overlap: int, image_size: [int]): rect = list(rect) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap rect = clamp_rect(rect, [0, 0], [image_size[0], image_size[1]]) return rect def squeeze_tile(tile, original_image, original_slice, slice_x): result = Image.new("RGB", (tile.size[0] + original_slice, tile.size[1])) result.paste( original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ), (0, 0), ) result.paste(tile, (original_slice, 0)) return result def unsqueeze_tile(tile, original_image_slice): crop_rect = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) tile = tile.crop(crop_rect) return tile def next_divisible(n, d): divisor = n % d return n - divisor class StableDiffusionTiledUpscalePipeline(StableDiffusionUpscalePipeline): r""" Pipeline for tile-based text-guided image super-resolution using Stable Diffusion 2, trading memory for compute to create gigantic images. This model inherits from [`StableDiffusionUpscalePipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. low_res_scheduler ([`SchedulerMixin`]): A scheduler used to add initial noise to the low res conditioning image. It must be an instance of [`DDPMScheduler`]. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, low_res_scheduler: DDPMScheduler, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], max_noise_level: int = 350, ): super().__init__( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, max_noise_level=max_noise_level, ) def _process_tile(self, original_image_slice, x, y, tile_size, tile_border, image, final_image, **kwargs): torch.manual_seed(0) crop_rect = ( min(image.size[0] - (tile_size + original_image_slice), x * tile_size), min(image.size[1] - (tile_size + original_image_slice), y * tile_size), min(image.size[0], (x + 1) * tile_size), min(image.size[1], (y + 1) * tile_size), ) crop_rect_with_overlap = add_overlap_rect(crop_rect, tile_border, image.size) tile = image.crop(crop_rect_with_overlap) translated_slice_x = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] translated_slice_x = translated_slice_x - (original_image_slice / 2) translated_slice_x = max(0, translated_slice_x) to_input = squeeze_tile(tile, image, original_image_slice, translated_slice_x) orig_input_size = to_input.size to_input = to_input.resize((tile_size, tile_size), Image.BICUBIC) upscaled_tile = super(StableDiffusionTiledUpscalePipeline, self).__call__(image=to_input, **kwargs).images[0] upscaled_tile = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC) upscaled_tile = unsqueeze_tile(upscaled_tile, original_image_slice) upscaled_tile = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC) remove_borders = [] if x == 0: remove_borders.append("l") elif crop_rect[2] == image.size[0]: remove_borders.append("r") if y == 0: remove_borders.append("t") elif crop_rect[3] == image.size[1]: remove_borders.append("b") transparency_mask = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=remove_borders ), mode="L", ) final_image.paste( upscaled_tile, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), transparency_mask ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], image: Union[PIL.Image.Image, List[PIL.Image.Image]], num_inference_steps: int = 75, guidance_scale: float = 9.0, noise_level: int = 50, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, tile_size: int = 128, tile_border: int = 32, original_image_slice: int = 32, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.Tensor`): `Image`, or tensor representing an image batch which will be upscaled. * num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. tile_size (`int`, *optional*): The size of the tiles. Too big can result in an OOM-error. tile_border (`int`, *optional*): The number of pixels around a tile to consider (bigger means less seams, too big can lead to an OOM-error). original_image_slice (`int`, *optional*): The amount of pixels of the original image to calculate with the current tile (bigger means more depth is preserved, less blur occurs in the final image, too big can lead to an OOM-error or loss in detail). callback (`Callable`, *optional*): A function that take a callback function with a single argument, a dict, that contains the (partially) processed image under "image", as well as the progress (0 to 1, where 1 is completed) under "progress". Returns: A PIL.Image that is 4 times larger than the original input image. """ final_image = Image.new("RGB", (image.size[0] * 4, image.size[1] * 4)) tcx = math.ceil(image.size[0] / tile_size) tcy = math.ceil(image.size[1] / tile_size) total_tile_count = tcx * tcy current_count = 0 for y in range(tcy): for x in range(tcx): self._process_tile( original_image_slice, x, y, tile_size, tile_border, image, final_image, prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, noise_level=noise_level, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image}) return final_image def main(): # Run a demo model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionTiledUpscalePipeline.from_pretrained(model_id, variant="fp16", torch_dtype=torch.float16) pipe = pipe.to("cuda") image = Image.open("../../docs/source/imgs/diffusers_library.jpg") def callback(obj): print(f"progress: {obj['progress']:.4f}") obj["image"].save("diffusers_library_progress.jpg") final_image = pipe(image=image, prompt="Black font, white background, vector", noise_level=40, callback=callback) final_image.save("diffusers_library.jpg") if __name__ == "__main__": main()
diffusers/examples/community/tiled_upscaling.py/0
{ "file_path": "diffusers/examples/community/tiled_upscaling.py", "repo_id": "diffusers", "token_count": 5901 }
145
# ControlNet training example for Stable Diffusion 3/3.5 (SD3/3.5) The `train_controlnet_sd3.py` script shows how to implement the ControlNet training procedure and adapt it for [Stable Diffusion 3](https://huggingface.co/papers/2403.03206) and [Stable Diffusion 3.5](https://stability.ai/news/introducing-stable-diffusion-3-5). ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/controlnet` folder and run ```bash pip install -r requirements_sd3.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. ## Circle filling dataset The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script. Please download the dataset and unzip it in the directory `fill50k` in the `examples/controlnet` folder. ## Training First download the SD3 model from [Hugging Face Hub](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers) or the SD3.5 model from [Hugging Face Hub](https://huggingface.co/stabilityai/stable-diffusion-3.5-medium). We will use it as a base model for the ControlNet training. > [!NOTE] > As the model is gated, before using it with diffusers you first need to go to the [Stable Diffusion 3 Medium Hugging Face page](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers) or [Stable Diffusion 3.5 Large Hugging Face page](https://huggingface.co/stabilityai/stable-diffusion-3.5-medium), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: ```bash hf auth login ``` This will also allow us to push the trained model parameters to the Hugging Face Hub platform. Our training examples use two test conditioning images. They can be downloaded by running ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` Then run the following commands to train a ControlNet model. ```bash export MODEL_DIR="stabilityai/stable-diffusion-3-medium-diffusers" export OUTPUT_DIR="sd3-controlnet-out" accelerate launch train_controlnet_sd3.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --train_data_dir="fill50k" \ --resolution=1024 \ --learning_rate=1e-5 \ --max_train_steps=15000 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=100 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 ``` To train a ControlNet model for Stable Diffusion 3.5, replace the `MODEL_DIR` with `stabilityai/stable-diffusion-3.5-medium`. To better track our training experiments, we're using flags `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 40GB A100 GPU. ### Inference Once training is done, we can perform inference like so: ```python from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel from diffusers.utils import load_image import torch base_model_path = "stabilityai/stable-diffusion-3-medium-diffusers" controlnet_path = "DavyMorgan/sd3-controlnet-out" controlnet = SD3ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusion3ControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet ) pipe.to("cuda", torch.float16) control_image = load_image("./conditioning_image_1.png").resize((1024, 1024)) prompt = "pale golden rod circle with old lace background" # generate image generator = torch.manual_seed(0) image = pipe( prompt, num_inference_steps=20, generator=generator, control_image=control_image ).images[0] image.save("./output.png") ``` Similarly, for SD3.5, replace the `base_model_path` with `stabilityai/stable-diffusion-3.5-medium` and controlnet_path `DavyMorgan/sd35-controlnet-out'. ## Notes ### GPU usage SD3 is a large model and requires a lot of GPU memory. We recommend using one GPU with at least 80GB of memory. Make sure to use the right GPU when configuring the [accelerator](https://huggingface.co/docs/transformers/en/accelerate). ## Example results ### SD3 #### After 500 steps with batch size 8 | | | |-------------------|:-------------------------:| || pale golden rod circle with old lace background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![pale golden rod circle with old lace background](https://huggingface.co/datasets/DavyMorgan/sd3-controlnet-results/resolve/main/step-500.png) | #### After 6500 steps with batch size 8: | | | |-------------------|:-------------------------:| || pale golden rod circle with old lace background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![pale golden rod circle with old lace background](https://huggingface.co/datasets/DavyMorgan/sd3-controlnet-results/resolve/main/step-6500.png) | ### SD3.5 #### After 500 steps with batch size 8 | | | |-------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------:| || pale golden rod circle with old lace background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![pale golden rod circle with old lace background](https://huggingface.co/datasets/DavyMorgan/sd3-controlnet-results/resolve/main/step-500-3.5.png) | #### After 3000 steps with batch size 8: | | | |-------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------:| || pale golden rod circle with old lace background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![pale golden rod circle with old lace background](https://huggingface.co/datasets/DavyMorgan/sd3-controlnet-results/resolve/main/step-3000-3.5.png) |
diffusers/examples/controlnet/README_sd3.md/0
{ "file_path": "diffusers/examples/controlnet/README_sd3.md", "repo_id": "diffusers", "token_count": 2839 }
146
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class CustomDiffusion(ExamplesTestsAccelerate): def test_custom_diffusion(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt <new1> --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 1.0e-05 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --modifier_token <new1> --no_safe_serialization --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_custom_diffusion_weights.bin"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "<new1>.bin"))) def test_custom_diffusion_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 --no_safe_serialization """.split() run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_custom_diffusion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=4 --checkpointing_steps=2 --no_safe_serialization """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) resume_run_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --no_safe_serialization """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
diffusers/examples/custom_diffusion/test_custom_diffusion.py/0
{ "file_path": "diffusers/examples/custom_diffusion/test_custom_diffusion.py", "repo_id": "diffusers", "token_count": 2234 }
147
import warnings from diffusers import StableDiffusionImg2ImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
diffusers/examples/inference/image_to_image.py/0
{ "file_path": "diffusers/examples/inference/image_to_image.py", "repo_id": "diffusers", "token_count": 84 }
148
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import contextlib import io import logging import math import os import random import shutil from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers import wandb from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from peft import LoraConfig, set_peft_model_state_dict from peft.utils import get_peft_model_state_dict from PIL import Image from torchvision import transforms from torchvision.transforms.functional import crop from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, ) from diffusers.loaders import StableDiffusionXLLoraLoaderMixin from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, convert_state_dict_to_diffusers, convert_unet_state_dict_to_peft from diffusers.utils.import_utils import is_xformers_available # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.25.0.dev0") logger = get_logger(__name__) VALIDATION_PROMPTS = [ "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography", "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k", "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece", ] def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def log_validation(args, unet, vae, accelerator, weight_dtype, epoch, is_final_validation=False): logger.info(f"Running validation... \n Generating images with prompts:\n {VALIDATION_PROMPTS}.") if is_final_validation: if args.mixed_precision == "fp16": vae.to(weight_dtype) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=vae, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) if not is_final_validation: pipeline.unet = accelerator.unwrap_model(unet) else: pipeline.load_lora_weights(args.output_dir, weight_name="pytorch_lora_weights.safetensors") pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None images = [] context = contextlib.nullcontext() if is_final_validation else torch.cuda.amp.autocast() guidance_scale = 5.0 num_inference_steps = 25 for prompt in VALIDATION_PROMPTS: with context: image = pipeline( prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator ).images[0] images.append(image) tracker_key = "test" if is_final_validation else "validation" for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images(tracker_key, np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { tracker_key: [ wandb.Image(image, caption=f"{i}: {VALIDATION_PROMPTS[i]}") for i, image in enumerate(images) ] } ) # Also log images without the LoRA params for comparison. if is_final_validation: pipeline.disable_lora() generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None no_lora_images = [ pipeline( prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator ).images[0] for prompt in VALIDATION_PROMPTS ] for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in no_lora_images]) tracker.writer.add_images("test_without_lora", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "test_without_lora": [ wandb.Image(image, caption=f"{i}: {VALIDATION_PROMPTS[i]}") for i, image in enumerate(no_lora_images) ] } ) def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_split_name", type=str, default="validation", help="Dataset split to be used during training. Helpful to specify for conducting experimental runs.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--run_validation", default=False, action="store_true", help="Whether to run validation inference in between training and also after training. Helps to track progress.", ) parser.add_argument( "--validation_steps", type=int, default=200, help="Run validation every X steps.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--output_dir", type=str, default="diffusion-orpo-lora-sdxl", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=1024, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--vae_encode_batch_size", type=int, default=8, help="Batch size to use for VAE encoding of the images for efficient processing.", ) parser.add_argument( "--no_hflip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--random_crop", default=False, action="store_true", help=( "Whether to random crop the input images to the resolution. If not set, the images will be center-cropped." ), ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--beta_orpo", type=float, default=0.1, help="ORPO contribution factor.", ) parser.add_argument( "--learning_rate", type=float, default=5e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--rank", type=int, default=4, help=("The dimension of the LoRA update matrices."), ) parser.add_argument( "--tracker_name", type=str, default="diffusion-orpo-lora-sdxl", help=("The name of the tracker to report results to."), ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.dataset_name is None: raise ValueError("Must provide a `dataset_name`.") env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank return args def tokenize_captions(tokenizers, examples): captions = [] for caption in examples["caption"]: captions.append(caption) tokens_one = tokenizers[0]( captions, truncation=True, padding="max_length", max_length=tokenizers[0].model_max_length, return_tensors="pt" ).input_ids tokens_two = tokenizers[1]( captions, truncation=True, padding="max_length", max_length=tokenizers[1].model_max_length, return_tensors="pt" ).input_ids return tokens_one, tokens_two @torch.no_grad() def encode_prompt(text_encoders, text_input_ids_list): prompt_embeds_list = [] for i, text_encoder in enumerate(text_encoders): text_input_ids = text_input_ids_list[i] prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return prompt_embeds, pooled_prompt_embeds def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizers tokenizer_one = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False, ) # import correct text encoder classes text_encoder_cls_one = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision ) text_encoder_cls_two = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder_one = text_encoder_cls_one.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder_two = text_encoder_cls_two.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant ) vae_path = ( args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # We only train the additional adapter LoRA layers vae.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) unet.requires_grad_(False) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet and text_encoders to device and cast to weight_dtype unet.to(accelerator.device, dtype=weight_dtype) text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) # The VAE is always in float32 to avoid NaN losses. vae.to(accelerator.device, dtype=torch.float32) # Set up LoRA. unet_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], ) # Add adapter and make sure the trainable params are in float32. unet.add_adapter(unet_lora_config) if args.mixed_precision == "fp16": for param in unet.parameters(): # only upcast trainable parameters (LoRA) into fp32 if param.requires_grad: param.data = param.to(torch.float32) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: # there are only two options here. Either are just the unet attn processor layers # or there are the unet and text encoder atten layers unet_lora_layers_to_save = None for model in models: if isinstance(model, type(accelerator.unwrap_model(unet))): unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model)) else: raise ValueError(f"unexpected save model: {model.__class__}") # make sure to pop weight so that corresponding model is not saved again weights.pop() StableDiffusionXLLoraLoaderMixin.save_lora_weights( output_dir, unet_lora_layers=unet_lora_layers_to_save, text_encoder_lora_layers=None, text_encoder_2_lora_layers=None, ) def load_model_hook(models, input_dir): unet_ = None while len(models) > 0: model = models.pop() if isinstance(model, type(accelerator.unwrap_model(unet))): unet_ = model else: raise ValueError(f"unexpected save model: {model.__class__}") lora_state_dict, network_alphas = StableDiffusionXLLoraLoaderMixin.lora_state_dict(input_dir) unet_state_dict = {f"{k.replace('unet.', '')}": v for k, v in lora_state_dict.items() if k.startswith("unet.")} unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict) incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name="default") if incompatible_keys is not None: # check only for unexpected keys unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: logger.warning( f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " f" {unexpected_keys}. " ) accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = list(filter(lambda p: p.requires_grad, unet.parameters())) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = load_dataset( args.dataset_name, cache_dir=args.cache_dir, split=args.dataset_split_name, ) # Preprocessing the datasets. train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR) train_crop = transforms.RandomCrop(args.resolution) if args.random_crop else transforms.CenterCrop(args.resolution) train_flip = transforms.RandomHorizontalFlip(p=1.0) to_tensor = transforms.ToTensor() normalize = transforms.Normalize([0.5], [0.5]) def preprocess_train(examples): all_pixel_values = [] images = [Image.open(io.BytesIO(im_bytes)).convert("RGB") for im_bytes in examples["jpg_0"]] original_sizes = [(image.height, image.width) for image in images] crop_top_lefts = [] for col_name in ["jpg_0", "jpg_1"]: images = [Image.open(io.BytesIO(im_bytes)).convert("RGB") for im_bytes in examples[col_name]] if col_name == "jpg_1": # Need to bring down the image to the same resolution. # This seems like the simplest reasonable approach. # "::-1" because PIL resize takes (width, height). images = [image.resize(original_sizes[i][::-1]) for i, image in enumerate(images)] pixel_values = [to_tensor(image) for image in images] all_pixel_values.append(pixel_values) # Double on channel dim, jpg_y then jpg_w im_tup_iterator = zip(*all_pixel_values) combined_pixel_values = [] for im_tup, label_0 in zip(im_tup_iterator, examples["label_0"]): # We randomize selection and rejection. if label_0 == 0.5: if random.random() < 0.5: label_0 = 0 else: label_0 = 1 if label_0 == 0: im_tup = im_tup[::-1] combined_im = torch.cat(im_tup, dim=0) # no batch dim # Resize. combined_im = train_resize(combined_im) # Flipping. if not args.no_hflip and random.random() < 0.5: combined_im = train_flip(combined_im) # Cropping. if not args.random_crop: y1 = max(0, int(round((combined_im.shape[1] - args.resolution) / 2.0))) x1 = max(0, int(round((combined_im.shape[2] - args.resolution) / 2.0))) combined_im = train_crop(combined_im) else: y1, x1, h, w = train_crop.get_params(combined_im, (args.resolution, args.resolution)) combined_im = crop(combined_im, y1, x1, h, w) crop_top_left = (y1, x1) crop_top_lefts.append(crop_top_left) combined_im = normalize(combined_im) combined_pixel_values.append(combined_im) examples["pixel_values"] = combined_pixel_values examples["original_sizes"] = original_sizes examples["crop_top_lefts"] = crop_top_lefts tokens_one, tokens_two = tokenize_captions([tokenizer_one, tokenizer_two], examples) examples["input_ids_one"] = tokens_one examples["input_ids_two"] = tokens_two return examples with accelerator.main_process_first(): if args.max_train_samples is not None: train_dataset = train_dataset.shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = train_dataset.with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() original_sizes = [example["original_sizes"] for example in examples] crop_top_lefts = [example["crop_top_lefts"] for example in examples] input_ids_one = torch.stack([example["input_ids_one"] for example in examples]) input_ids_two = torch.stack([example["input_ids_two"] for example in examples]) return { "pixel_values": pixel_values, "input_ids_one": input_ids_one, "input_ids_two": input_ids_two, "original_sizes": original_sizes, "crop_top_lefts": crop_top_lefts, } train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers(args.tracker_name, config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) unet.train() for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # (batch_size, 2*channels, h, w) -> (2*batch_size, channels, h, w) pixel_values = batch["pixel_values"].to(dtype=vae.dtype) feed_pixel_values = torch.cat(pixel_values.chunk(2, dim=1)) latents = [] for i in range(0, feed_pixel_values.shape[0], args.vae_encode_batch_size): latents.append( vae.encode(feed_pixel_values[i : i + args.vae_encode_batch_size]).latent_dist.sample() ) latents = torch.cat(latents, dim=0) latents = latents * vae.config.scaling_factor if args.pretrained_vae_model_name_or_path is None: latents = latents.to(weight_dtype) # Sample noise that we'll add to the latents noise = torch.randn_like(latents).chunk(2)[0].repeat(2, 1, 1, 1) # Sample a random timestep for each image bsz = latents.shape[0] // 2 timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device, dtype=torch.long ).repeat(2) # Add noise to the model input according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_model_input = noise_scheduler.add_noise(latents, noise, timesteps) # time ids def compute_time_ids(original_size, crops_coords_top_left): # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids target_size = (args.resolution, args.resolution) add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = torch.tensor([add_time_ids]) add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) return add_time_ids add_time_ids = torch.cat( [compute_time_ids(s, c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"])] ).repeat(2, 1) # Get the text embedding for conditioning prompt_embeds, pooled_prompt_embeds = encode_prompt( [text_encoder_one, text_encoder_two], [batch["input_ids_one"], batch["input_ids_two"]] ) prompt_embeds = prompt_embeds.repeat(2, 1, 1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(2, 1) # Predict the noise residual model_pred = unet( noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs={"time_ids": add_time_ids, "text_embeds": pooled_prompt_embeds}, ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # ODDS ratio loss. # In the diffusion formulation, we're assuming that the MSE loss # approximates the logp. model_losses = F.mse_loss(model_pred.float(), target.float(), reduction="none") model_losses = model_losses.mean(dim=list(range(1, len(model_losses.shape)))) model_losses_w, model_losses_l = model_losses.chunk(2) log_odds = model_losses_w - model_losses_l # Ratio loss. ratio = F.logsigmoid(log_odds) ratio_losses = args.beta_orpo * ratio # Full ORPO loss loss = model_losses_w.mean() - ratio_losses.mean() # Backprop. accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if args.run_validation and global_step % args.validation_steps == 0: log_validation( args, unet=unet, vae=vae, accelerator=accelerator, weight_dtype=weight_dtype, epoch=epoch ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Save the lora layers accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) unet = unet.to(torch.float32) unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet)) StableDiffusionXLLoraLoaderMixin.save_lora_weights( save_directory=args.output_dir, unet_lora_layers=unet_lora_state_dict, text_encoder_lora_layers=None, text_encoder_2_lora_layers=None, ) # Final validation? if args.run_validation: log_validation( args, unet=None, vae=vae, accelerator=accelerator, weight_dtype=weight_dtype, epoch=epoch, is_final_validation=True, ) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/diffusion_orpo/train_diffusion_orpo_sdxl_lora.py/0
{ "file_path": "diffusers/examples/research_projects/diffusion_orpo/train_diffusion_orpo_sdxl_lora.py", "repo_id": "diffusers", "token_count": 19746 }
149
import os from typing import List import faiss import numpy as np import torch from datasets import Dataset, load_dataset from PIL import Image from transformers import CLIPImageProcessor, CLIPModel, PretrainedConfig from diffusers import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name def normalize_images(images: List[Image.Image]): images = [np.array(image) for image in images] images = [image / 127.5 - 1 for image in images] return images def preprocess_images(images: List[np.array], feature_extractor: CLIPImageProcessor) -> torch.Tensor: """ Preprocesses a list of images into a batch of tensors. Args: images (:obj:`List[Image.Image]`): A list of images to preprocess. Returns: :obj:`torch.Tensor`: A batch of tensors. """ images = [np.array(image) for image in images] images = [(image + 1.0) / 2.0 for image in images] images = feature_extractor(images, return_tensors="pt").pixel_values return images class IndexConfig(PretrainedConfig): def __init__( self, clip_name_or_path="openai/clip-vit-large-patch14", dataset_name="Isamu136/oxford_pets_with_l14_emb", image_column="image", index_name="embeddings", index_path=None, dataset_set="train", metric_type=faiss.METRIC_L2, faiss_device=-1, **kwargs, ): super().__init__(**kwargs) self.clip_name_or_path = clip_name_or_path self.dataset_name = dataset_name self.image_column = image_column self.index_name = index_name self.index_path = index_path self.dataset_set = dataset_set self.metric_type = metric_type self.faiss_device = faiss_device class Index: """ Each index for a retrieval model is specific to the clip model used and the dataset used. """ def __init__(self, config: IndexConfig, dataset: Dataset): self.config = config self.dataset = dataset self.index_initialized = False self.index_name = config.index_name self.index_path = config.index_path self.init_index() def set_index_name(self, index_name: str): self.index_name = index_name def init_index(self): if not self.index_initialized: if self.index_path and self.index_name: try: self.dataset.add_faiss_index( column=self.index_name, metric_type=self.config.metric_type, device=self.config.faiss_device ) self.index_initialized = True except Exception as e: print(e) logger.info("Index not initialized") if self.index_name in self.dataset.features: self.dataset.add_faiss_index(column=self.index_name) self.index_initialized = True def build_index( self, model=None, feature_extractor: CLIPImageProcessor = None, torch_dtype=torch.float32, ): if not self.index_initialized: model = model or CLIPModel.from_pretrained(self.config.clip_name_or_path).to(dtype=torch_dtype) feature_extractor = feature_extractor or CLIPImageProcessor.from_pretrained(self.config.clip_name_or_path) self.dataset = get_dataset_with_emb_from_clip_model( self.dataset, model, feature_extractor, image_column=self.config.image_column, index_name=self.config.index_name, ) self.init_index() def retrieve_imgs(self, vec, k: int = 20): vec = np.array(vec).astype(np.float32) return self.dataset.get_nearest_examples(self.index_name, vec, k=k) def retrieve_imgs_batch(self, vec, k: int = 20): vec = np.array(vec).astype(np.float32) return self.dataset.get_nearest_examples_batch(self.index_name, vec, k=k) def retrieve_indices(self, vec, k: int = 20): vec = np.array(vec).astype(np.float32) return self.dataset.search(self.index_name, vec, k=k) def retrieve_indices_batch(self, vec, k: int = 20): vec = np.array(vec).astype(np.float32) return self.dataset.search_batch(self.index_name, vec, k=k) class Retriever: def __init__( self, config: IndexConfig, index: Index = None, dataset: Dataset = None, model=None, feature_extractor: CLIPImageProcessor = None, ): self.config = config self.index = index or self._build_index(config, dataset, model=model, feature_extractor=feature_extractor) @classmethod def from_pretrained( cls, retriever_name_or_path: str, index: Index = None, dataset: Dataset = None, model=None, feature_extractor: CLIPImageProcessor = None, **kwargs, ): config = kwargs.pop("config", None) or IndexConfig.from_pretrained(retriever_name_or_path, **kwargs) return cls(config, index=index, dataset=dataset, model=model, feature_extractor=feature_extractor) @staticmethod def _build_index( config: IndexConfig, dataset: Dataset = None, model=None, feature_extractor: CLIPImageProcessor = None ): dataset = dataset or load_dataset(config.dataset_name) dataset = dataset[config.dataset_set] index = Index(config, dataset) index.build_index(model=model, feature_extractor=feature_extractor) return index def save_pretrained(self, save_directory): os.makedirs(save_directory, exist_ok=True) if self.config.index_path is None: index_path = os.path.join(save_directory, "hf_dataset_index.faiss") self.index.dataset.get_index(self.config.index_name).save(index_path) self.config.index_path = index_path self.config.save_pretrained(save_directory) def init_retrieval(self): logger.info("initializing retrieval") self.index.init_index() def retrieve_imgs(self, embeddings: np.ndarray, k: int): return self.index.retrieve_imgs(embeddings, k) def retrieve_imgs_batch(self, embeddings: np.ndarray, k: int): return self.index.retrieve_imgs_batch(embeddings, k) def retrieve_indices(self, embeddings: np.ndarray, k: int): return self.index.retrieve_indices(embeddings, k) def retrieve_indices_batch(self, embeddings: np.ndarray, k: int): return self.index.retrieve_indices_batch(embeddings, k) def __call__( self, embeddings, k: int = 20, ): return self.index.retrieve_imgs(embeddings, k) def map_txt_to_clip_feature(clip_model, tokenizer, prompt): text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > tokenizer.model_max_length: removed_text = tokenizer.batch_decode(text_input_ids[:, tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : tokenizer.model_max_length] text_embeddings = clip_model.get_text_features(text_input_ids.to(clip_model.device)) text_embeddings = text_embeddings / torch.linalg.norm(text_embeddings, dim=-1, keepdim=True) text_embeddings = text_embeddings[:, None, :] return text_embeddings[0][0].cpu().detach().numpy() def map_img_to_model_feature(model, feature_extractor, imgs, device): for i, image in enumerate(imgs): if not image.mode == "RGB": imgs[i] = image.convert("RGB") imgs = normalize_images(imgs) retrieved_images = preprocess_images(imgs, feature_extractor).to(device) image_embeddings = model(retrieved_images) image_embeddings = image_embeddings / torch.linalg.norm(image_embeddings, dim=-1, keepdim=True) image_embeddings = image_embeddings[None, ...] return image_embeddings.cpu().detach().numpy()[0][0] def get_dataset_with_emb_from_model(dataset, model, feature_extractor, image_column="image", index_name="embeddings"): return dataset.map( lambda example: { index_name: map_img_to_model_feature(model, feature_extractor, [example[image_column]], model.device) } ) def get_dataset_with_emb_from_clip_model( dataset, clip_model, feature_extractor, image_column="image", index_name="embeddings" ): return dataset.map( lambda example: { index_name: map_img_to_model_feature( clip_model.get_image_features, feature_extractor, [example[image_column]], clip_model.device ) } )
diffusers/examples/research_projects/rdm/retriever.py/0
{ "file_path": "diffusers/examples/research_projects/rdm/retriever.py", "repo_id": "diffusers", "token_count": 3929 }
150
# Running Stable Diffusion 3 DreamBooth LoRA training under 16GB This is an **EDUCATIONAL** project that provides utilities for DreamBooth LoRA training for [Stable Diffusion 3 (SD3)](ttps://huggingface.co/papers/2403.03206) under 16GB GPU VRAM. This means you can successfully try out this project using a [free-tier Colab Notebook](https://colab.research.google.com/github/huggingface/diffusers/blob/main/examples/research_projects/sd3_lora_colab/sd3_dreambooth_lora_16gb.ipynb) instance. 🤗 > [!NOTE] > SD3 is gated, so you need to make sure you agree to [share your contact info](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers) to access the model before using it with Diffusers. Once you have access, you need to log in so your system knows you’re authorized. Use the command below to log in: ```bash hf auth login ``` This will also allow us to push the trained model parameters to the Hugging Face Hub platform. For setup, inference code, and details on how to run the code, please follow the Colab Notebook provided above. ## How We make use of several techniques to make this possible: * Compute the embeddings from the instance prompt and serialize them for later reuse. This is implemented in the [`compute_embeddings.py`](./compute_embeddings.py) script. We use an 8bit (as introduced in [`LLM.int8()`](https://huggingface.co/papers/2208.07339)) T5 to reduce memory requirements to ~10.5GB. * In the `train_dreambooth_sd3_lora_miniature.py` script, we make use of: * 8bit Adam for optimization through the `bitsandbytes` library. * Gradient checkpointing and gradient accumulation. * FP16 precision. * Flash attention through `F.scaled_dot_product_attention()`. Computing the text embeddings is arguably the most memory-intensive part in the pipeline as SD3 employs three text encoders. If we run them in FP32, it will take about 20GB of VRAM. With FP16, we are down to 12GB. ## Gotchas This project is educational. It exists to showcase the possibility of fine-tuning a big diffusion system on consumer GPUs. But additional components might have to be added to obtain state-of-the-art performance. Below are some commonly known gotchas that users should be aware of: * Training of text encoders is purposefully disabled. * Techniques such as prior-preservation is unsupported. * Custom instance captions for instance images are unsupported, but this should be relatively easy to integrate. Hopefully, this project gives you a template to extend it further to suit your needs.
diffusers/examples/research_projects/sd3_lora_colab/README.md/0
{ "file_path": "diffusers/examples/research_projects/sd3_lora_colab/README.md", "repo_id": "diffusers", "token_count": 704 }
151
torch~=2.7.0 transformers==4.46.1 sentencepiece aiohttp py-consul prometheus_client >= 0.18.0 prometheus-fastapi-instrumentator >= 7.0.0 fastapi uvicorn
diffusers/examples/server/requirements.in/0
{ "file_path": "diffusers/examples/server/requirements.in", "repo_id": "diffusers", "token_count": 65 }
152
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class Unconditional(ExamplesTestsAccelerate): def test_train_unconditional(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 2 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 """.split() run_command(self._launch_args + test_args, return_stdout=True) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_unconditional_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: initial_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, # checkpoint-2 should have been deleted {"checkpoint-4", "checkpoint-6"}, ) def test_unconditional_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: initial_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 1 --learning_rate 1e-3 --lr_warmup_steps 5 --checkpointing_steps=2 """.split() run_command(self._launch_args + initial_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6"}, ) resume_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 2 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 1 --learning_rate 1e-3 --lr_warmup_steps 5 --resume_from_checkpoint=checkpoint-6 --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-10", "checkpoint-12"}, )
diffusers/examples/unconditional_image_generation/test_unconditional.py/0
{ "file_path": "diffusers/examples/unconditional_image_generation/test_unconditional.py", "repo_id": "diffusers", "token_count": 2492 }
153
import argparse import torch from huggingface_hub import hf_hub_download from diffusers.models.transformers.auraflow_transformer_2d import AuraFlowTransformer2DModel def load_original_state_dict(args): model_pt = hf_hub_download(repo_id=args.original_state_dict_repo_id, filename="aura_diffusion_pytorch_model.bin") state_dict = torch.load(model_pt, map_location="cpu") return state_dict def calculate_layers(state_dict_keys, key_prefix): dit_layers = set() for k in state_dict_keys: if key_prefix in k: dit_layers.add(int(k.split(".")[2])) print(f"{key_prefix}: {len(dit_layers)}") return len(dit_layers) # similar to SD3 but only for the last norm layer def swap_scale_shift(weight, dim): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def convert_transformer(state_dict): converted_state_dict = {} state_dict_keys = list(state_dict.keys()) converted_state_dict["register_tokens"] = state_dict.pop("model.register_tokens") converted_state_dict["pos_embed.pos_embed"] = state_dict.pop("model.positional_encoding") converted_state_dict["pos_embed.proj.weight"] = state_dict.pop("model.init_x_linear.weight") converted_state_dict["pos_embed.proj.bias"] = state_dict.pop("model.init_x_linear.bias") converted_state_dict["time_step_proj.linear_1.weight"] = state_dict.pop("model.t_embedder.mlp.0.weight") converted_state_dict["time_step_proj.linear_1.bias"] = state_dict.pop("model.t_embedder.mlp.0.bias") converted_state_dict["time_step_proj.linear_2.weight"] = state_dict.pop("model.t_embedder.mlp.2.weight") converted_state_dict["time_step_proj.linear_2.bias"] = state_dict.pop("model.t_embedder.mlp.2.bias") converted_state_dict["context_embedder.weight"] = state_dict.pop("model.cond_seq_linear.weight") mmdit_layers = calculate_layers(state_dict_keys, key_prefix="double_layers") single_dit_layers = calculate_layers(state_dict_keys, key_prefix="single_layers") # MMDiT blocks 🎸. for i in range(mmdit_layers): # feed-forward path_mapping = {"mlpX": "ff", "mlpC": "ff_context"} weight_mapping = {"c_fc1": "linear_1", "c_fc2": "linear_2", "c_proj": "out_projection"} for orig_k, diffuser_k in path_mapping.items(): for k, v in weight_mapping.items(): converted_state_dict[f"joint_transformer_blocks.{i}.{diffuser_k}.{v}.weight"] = state_dict.pop( f"model.double_layers.{i}.{orig_k}.{k}.weight" ) # norms path_mapping = {"modX": "norm1", "modC": "norm1_context"} for orig_k, diffuser_k in path_mapping.items(): converted_state_dict[f"joint_transformer_blocks.{i}.{diffuser_k}.linear.weight"] = state_dict.pop( f"model.double_layers.{i}.{orig_k}.1.weight" ) # attns x_attn_mapping = {"w2q": "to_q", "w2k": "to_k", "w2v": "to_v", "w2o": "to_out.0"} context_attn_mapping = {"w1q": "add_q_proj", "w1k": "add_k_proj", "w1v": "add_v_proj", "w1o": "to_add_out"} for attn_mapping in [x_attn_mapping, context_attn_mapping]: for k, v in attn_mapping.items(): converted_state_dict[f"joint_transformer_blocks.{i}.attn.{v}.weight"] = state_dict.pop( f"model.double_layers.{i}.attn.{k}.weight" ) # Single-DiT blocks. for i in range(single_dit_layers): # feed-forward mapping = {"c_fc1": "linear_1", "c_fc2": "linear_2", "c_proj": "out_projection"} for k, v in mapping.items(): converted_state_dict[f"single_transformer_blocks.{i}.ff.{v}.weight"] = state_dict.pop( f"model.single_layers.{i}.mlp.{k}.weight" ) # norms converted_state_dict[f"single_transformer_blocks.{i}.norm1.linear.weight"] = state_dict.pop( f"model.single_layers.{i}.modCX.1.weight" ) # attns x_attn_mapping = {"w1q": "to_q", "w1k": "to_k", "w1v": "to_v", "w1o": "to_out.0"} for k, v in x_attn_mapping.items(): converted_state_dict[f"single_transformer_blocks.{i}.attn.{v}.weight"] = state_dict.pop( f"model.single_layers.{i}.attn.{k}.weight" ) # Final blocks. converted_state_dict["proj_out.weight"] = state_dict.pop("model.final_linear.weight") converted_state_dict["norm_out.linear.weight"] = swap_scale_shift(state_dict.pop("model.modF.1.weight"), dim=None) return converted_state_dict @torch.no_grad() def populate_state_dict(args): original_state_dict = load_original_state_dict(args) state_dict_keys = list(original_state_dict.keys()) mmdit_layers = calculate_layers(state_dict_keys, key_prefix="double_layers") single_dit_layers = calculate_layers(state_dict_keys, key_prefix="single_layers") converted_state_dict = convert_transformer(original_state_dict) model_diffusers = AuraFlowTransformer2DModel( num_mmdit_layers=mmdit_layers, num_single_dit_layers=single_dit_layers ) model_diffusers.load_state_dict(converted_state_dict, strict=True) return model_diffusers if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--original_state_dict_repo_id", default="AuraDiffusion/auradiffusion-v0.1a0", type=str) parser.add_argument("--dump_path", default="aura-flow", type=str) parser.add_argument("--hub_id", default=None, type=str) args = parser.parse_args() model_diffusers = populate_state_dict(args) model_diffusers.save_pretrained(args.dump_path) if args.hub_id is not None: model_diffusers.push_to_hub(args.hub_id)
diffusers/scripts/convert_aura_flow_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_aura_flow_to_diffusers.py", "repo_id": "diffusers", "token_count": 2540 }
154
import argparse from contextlib import nullcontext import safetensors.torch import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download from diffusers import AutoencoderKL, FluxTransformer2DModel from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint from diffusers.utils.import_utils import is_accelerate_available """ # Transformer python scripts/convert_flux_to_diffusers.py \ --original_state_dict_repo_id "black-forest-labs/FLUX.1-schnell" \ --filename "flux1-schnell.sft" --output_path "flux-schnell" \ --transformer """ """ # VAE python scripts/convert_flux_to_diffusers.py \ --original_state_dict_repo_id "black-forest-labs/FLUX.1-schnell" \ --filename "ae.sft" --output_path "flux-schnell" \ --vae """ CTX = init_empty_weights if is_accelerate_available() else nullcontext parser = argparse.ArgumentParser() parser.add_argument("--original_state_dict_repo_id", default=None, type=str) parser.add_argument("--filename", default="flux.safetensors", type=str) parser.add_argument("--checkpoint_path", default=None, type=str) parser.add_argument("--in_channels", type=int, default=64) parser.add_argument("--out_channels", type=int, default=None) parser.add_argument("--vae", action="store_true") parser.add_argument("--transformer", action="store_true") parser.add_argument("--output_path", type=str) parser.add_argument("--dtype", type=str, default="bf16") args = parser.parse_args() dtype = torch.bfloat16 if args.dtype == "bf16" else torch.float32 def load_original_checkpoint(args): if args.original_state_dict_repo_id is not None: ckpt_path = hf_hub_download(repo_id=args.original_state_dict_repo_id, filename=args.filename) elif args.checkpoint_path is not None: ckpt_path = args.checkpoint_path else: raise ValueError(" please provide either `original_state_dict_repo_id` or a local `checkpoint_path`") original_state_dict = safetensors.torch.load_file(ckpt_path) return original_state_dict # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation def swap_scale_shift(weight): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def convert_flux_transformer_checkpoint_to_diffusers( original_state_dict, num_layers, num_single_layers, inner_dim, mlp_ratio=4.0 ): converted_state_dict = {} ## time_text_embed.timestep_embedder <- time_in converted_state_dict["time_text_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop( "time_in.in_layer.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop( "time_in.in_layer.bias" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop( "time_in.out_layer.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop( "time_in.out_layer.bias" ) ## time_text_embed.text_embedder <- vector_in converted_state_dict["time_text_embed.text_embedder.linear_1.weight"] = original_state_dict.pop( "vector_in.in_layer.weight" ) converted_state_dict["time_text_embed.text_embedder.linear_1.bias"] = original_state_dict.pop( "vector_in.in_layer.bias" ) converted_state_dict["time_text_embed.text_embedder.linear_2.weight"] = original_state_dict.pop( "vector_in.out_layer.weight" ) converted_state_dict["time_text_embed.text_embedder.linear_2.bias"] = original_state_dict.pop( "vector_in.out_layer.bias" ) # guidance has_guidance = any("guidance" in k for k in original_state_dict) if has_guidance: converted_state_dict["time_text_embed.guidance_embedder.linear_1.weight"] = original_state_dict.pop( "guidance_in.in_layer.weight" ) converted_state_dict["time_text_embed.guidance_embedder.linear_1.bias"] = original_state_dict.pop( "guidance_in.in_layer.bias" ) converted_state_dict["time_text_embed.guidance_embedder.linear_2.weight"] = original_state_dict.pop( "guidance_in.out_layer.weight" ) converted_state_dict["time_text_embed.guidance_embedder.linear_2.bias"] = original_state_dict.pop( "guidance_in.out_layer.bias" ) # context_embedder converted_state_dict["context_embedder.weight"] = original_state_dict.pop("txt_in.weight") converted_state_dict["context_embedder.bias"] = original_state_dict.pop("txt_in.bias") # x_embedder converted_state_dict["x_embedder.weight"] = original_state_dict.pop("img_in.weight") converted_state_dict["x_embedder.bias"] = original_state_dict.pop("img_in.bias") # double transformer blocks for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." # norms. ## norm1 converted_state_dict[f"{block_prefix}norm1.linear.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mod.lin.weight" ) converted_state_dict[f"{block_prefix}norm1.linear.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mod.lin.bias" ) ## norm1_context converted_state_dict[f"{block_prefix}norm1_context.linear.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mod.lin.weight" ) converted_state_dict[f"{block_prefix}norm1_context.linear.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mod.lin.bias" ) # Q, K, V sample_q, sample_k, sample_v = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.img_attn.qkv.weight"), 3, dim=0 ) context_q, context_k, context_v = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.txt_attn.qkv.weight"), 3, dim=0 ) sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.img_attn.qkv.bias"), 3, dim=0 ) context_q_bias, context_k_bias, context_v_bias = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.txt_attn.qkv.bias"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([sample_v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([sample_v_bias]) converted_state_dict[f"{block_prefix}attn.add_q_proj.weight"] = torch.cat([context_q]) converted_state_dict[f"{block_prefix}attn.add_q_proj.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"{block_prefix}attn.add_k_proj.weight"] = torch.cat([context_k]) converted_state_dict[f"{block_prefix}attn.add_k_proj.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"{block_prefix}attn.add_v_proj.weight"] = torch.cat([context_v]) converted_state_dict[f"{block_prefix}attn.add_v_proj.bias"] = torch.cat([context_v_bias]) # qk_norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.norm.key_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_q.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_k.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.norm.key_norm.scale" ) # ff img_mlp converted_state_dict[f"{block_prefix}ff.net.0.proj.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.0.weight" ) converted_state_dict[f"{block_prefix}ff.net.0.proj.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.0.bias" ) converted_state_dict[f"{block_prefix}ff.net.2.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.2.weight" ) converted_state_dict[f"{block_prefix}ff.net.2.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.2.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.0.weight" ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.0.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.2.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.2.weight" ) converted_state_dict[f"{block_prefix}ff_context.net.2.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.2.bias" ) # output projections. converted_state_dict[f"{block_prefix}attn.to_out.0.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_out.0.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.proj.bias" ) converted_state_dict[f"{block_prefix}attn.to_add_out.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_add_out.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.proj.bias" ) # single transformer blocks for i in range(num_single_layers): block_prefix = f"single_transformer_blocks.{i}." # norm.linear <- single_blocks.0.modulation.lin converted_state_dict[f"{block_prefix}norm.linear.weight"] = original_state_dict.pop( f"single_blocks.{i}.modulation.lin.weight" ) converted_state_dict[f"{block_prefix}norm.linear.bias"] = original_state_dict.pop( f"single_blocks.{i}.modulation.lin.bias" ) # Q, K, V, mlp mlp_hidden_dim = int(inner_dim * mlp_ratio) split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) q, k, v, mlp = torch.split(original_state_dict.pop(f"single_blocks.{i}.linear1.weight"), split_size, dim=0) q_bias, k_bias, v_bias, mlp_bias = torch.split( original_state_dict.pop(f"single_blocks.{i}.linear1.bias"), split_size, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([v_bias]) converted_state_dict[f"{block_prefix}proj_mlp.weight"] = torch.cat([mlp]) converted_state_dict[f"{block_prefix}proj_mlp.bias"] = torch.cat([mlp_bias]) # qk norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = original_state_dict.pop( f"single_blocks.{i}.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = original_state_dict.pop( f"single_blocks.{i}.norm.key_norm.scale" ) # output projections. converted_state_dict[f"{block_prefix}proj_out.weight"] = original_state_dict.pop( f"single_blocks.{i}.linear2.weight" ) converted_state_dict[f"{block_prefix}proj_out.bias"] = original_state_dict.pop( f"single_blocks.{i}.linear2.bias" ) converted_state_dict["proj_out.weight"] = original_state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = original_state_dict.pop("final_layer.linear.bias") converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.weight") ) converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.bias") ) return converted_state_dict def main(args): original_ckpt = load_original_checkpoint(args) has_guidance = any("guidance" in k for k in original_ckpt) if args.transformer: num_layers = 19 num_single_layers = 38 inner_dim = 3072 mlp_ratio = 4.0 converted_transformer_state_dict = convert_flux_transformer_checkpoint_to_diffusers( original_ckpt, num_layers, num_single_layers, inner_dim, mlp_ratio=mlp_ratio ) transformer = FluxTransformer2DModel( in_channels=args.in_channels, out_channels=args.out_channels, guidance_embeds=has_guidance ) transformer.load_state_dict(converted_transformer_state_dict, strict=True) print( f"Saving Flux Transformer in Diffusers format. Variant: {'guidance-distilled' if has_guidance else 'timestep-distilled'}" ) transformer.to(dtype).save_pretrained(f"{args.output_path}/transformer") if args.vae: config = AutoencoderKL.load_config("stabilityai/stable-diffusion-3-medium-diffusers", subfolder="vae") vae = AutoencoderKL.from_config(config, scaling_factor=0.3611, shift_factor=0.1159).to(torch.bfloat16) converted_vae_state_dict = convert_ldm_vae_checkpoint(original_ckpt, vae.config) vae.load_state_dict(converted_vae_state_dict, strict=True) vae.to(dtype).save_pretrained(f"{args.output_path}/vae") if __name__ == "__main__": main(args)
diffusers/scripts/convert_flux_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_flux_to_diffusers.py", "repo_id": "diffusers", "token_count": 6443 }
155
import argparse from contextlib import nullcontext import torch from accelerate import init_empty_weights from safetensors.torch import load_file from transformers import T5EncoderModel, T5Tokenizer from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel from diffusers.utils.import_utils import is_accelerate_available CTX = init_empty_weights if is_accelerate_available() else nullcontext TOKENIZER_MAX_LENGTH = 256 parser = argparse.ArgumentParser() parser.add_argument("--transformer_checkpoint_path", default=None, type=str) parser.add_argument("--vae_encoder_checkpoint_path", default=None, type=str) parser.add_argument("--vae_decoder_checkpoint_path", default=None, type=str) parser.add_argument("--output_path", required=True, type=str) parser.add_argument("--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving") parser.add_argument("--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory") parser.add_argument("--dtype", type=str, default=None) args = parser.parse_args() # This is specific to `AdaLayerNormContinuous`: # Diffusers implementation split the linear projection into the scale, shift while Mochi split it into shift, scale def swap_scale_shift(weight, dim): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def swap_proj_gate(weight): proj, gate = weight.chunk(2, dim=0) new_weight = torch.cat([gate, proj], dim=0) return new_weight def convert_mochi_transformer_checkpoint_to_diffusers(ckpt_path): original_state_dict = load_file(ckpt_path, device="cpu") new_state_dict = {} # Convert patch_embed new_state_dict["patch_embed.proj.weight"] = original_state_dict.pop("x_embedder.proj.weight") new_state_dict["patch_embed.proj.bias"] = original_state_dict.pop("x_embedder.proj.bias") # Convert time_embed new_state_dict["time_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop("t_embedder.mlp.0.weight") new_state_dict["time_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop("t_embedder.mlp.0.bias") new_state_dict["time_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop("t_embedder.mlp.2.weight") new_state_dict["time_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop("t_embedder.mlp.2.bias") new_state_dict["time_embed.pooler.to_kv.weight"] = original_state_dict.pop("t5_y_embedder.to_kv.weight") new_state_dict["time_embed.pooler.to_kv.bias"] = original_state_dict.pop("t5_y_embedder.to_kv.bias") new_state_dict["time_embed.pooler.to_q.weight"] = original_state_dict.pop("t5_y_embedder.to_q.weight") new_state_dict["time_embed.pooler.to_q.bias"] = original_state_dict.pop("t5_y_embedder.to_q.bias") new_state_dict["time_embed.pooler.to_out.weight"] = original_state_dict.pop("t5_y_embedder.to_out.weight") new_state_dict["time_embed.pooler.to_out.bias"] = original_state_dict.pop("t5_y_embedder.to_out.bias") new_state_dict["time_embed.caption_proj.weight"] = original_state_dict.pop("t5_yproj.weight") new_state_dict["time_embed.caption_proj.bias"] = original_state_dict.pop("t5_yproj.bias") # Convert transformer blocks num_layers = 48 for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." old_prefix = f"blocks.{i}." # norm1 new_state_dict[block_prefix + "norm1.linear.weight"] = original_state_dict.pop(old_prefix + "mod_x.weight") new_state_dict[block_prefix + "norm1.linear.bias"] = original_state_dict.pop(old_prefix + "mod_x.bias") if i < num_layers - 1: new_state_dict[block_prefix + "norm1_context.linear.weight"] = original_state_dict.pop( old_prefix + "mod_y.weight" ) new_state_dict[block_prefix + "norm1_context.linear.bias"] = original_state_dict.pop( old_prefix + "mod_y.bias" ) else: new_state_dict[block_prefix + "norm1_context.linear_1.weight"] = original_state_dict.pop( old_prefix + "mod_y.weight" ) new_state_dict[block_prefix + "norm1_context.linear_1.bias"] = original_state_dict.pop( old_prefix + "mod_y.bias" ) # Visual attention qkv_weight = original_state_dict.pop(old_prefix + "attn.qkv_x.weight") q, k, v = qkv_weight.chunk(3, dim=0) new_state_dict[block_prefix + "attn1.to_q.weight"] = q new_state_dict[block_prefix + "attn1.to_k.weight"] = k new_state_dict[block_prefix + "attn1.to_v.weight"] = v new_state_dict[block_prefix + "attn1.norm_q.weight"] = original_state_dict.pop( old_prefix + "attn.q_norm_x.weight" ) new_state_dict[block_prefix + "attn1.norm_k.weight"] = original_state_dict.pop( old_prefix + "attn.k_norm_x.weight" ) new_state_dict[block_prefix + "attn1.to_out.0.weight"] = original_state_dict.pop( old_prefix + "attn.proj_x.weight" ) new_state_dict[block_prefix + "attn1.to_out.0.bias"] = original_state_dict.pop(old_prefix + "attn.proj_x.bias") # Context attention qkv_weight = original_state_dict.pop(old_prefix + "attn.qkv_y.weight") q, k, v = qkv_weight.chunk(3, dim=0) new_state_dict[block_prefix + "attn1.add_q_proj.weight"] = q new_state_dict[block_prefix + "attn1.add_k_proj.weight"] = k new_state_dict[block_prefix + "attn1.add_v_proj.weight"] = v new_state_dict[block_prefix + "attn1.norm_added_q.weight"] = original_state_dict.pop( old_prefix + "attn.q_norm_y.weight" ) new_state_dict[block_prefix + "attn1.norm_added_k.weight"] = original_state_dict.pop( old_prefix + "attn.k_norm_y.weight" ) if i < num_layers - 1: new_state_dict[block_prefix + "attn1.to_add_out.weight"] = original_state_dict.pop( old_prefix + "attn.proj_y.weight" ) new_state_dict[block_prefix + "attn1.to_add_out.bias"] = original_state_dict.pop( old_prefix + "attn.proj_y.bias" ) # MLP new_state_dict[block_prefix + "ff.net.0.proj.weight"] = swap_proj_gate( original_state_dict.pop(old_prefix + "mlp_x.w1.weight") ) new_state_dict[block_prefix + "ff.net.2.weight"] = original_state_dict.pop(old_prefix + "mlp_x.w2.weight") if i < num_layers - 1: new_state_dict[block_prefix + "ff_context.net.0.proj.weight"] = swap_proj_gate( original_state_dict.pop(old_prefix + "mlp_y.w1.weight") ) new_state_dict[block_prefix + "ff_context.net.2.weight"] = original_state_dict.pop( old_prefix + "mlp_y.w2.weight" ) # Output layers new_state_dict["norm_out.linear.weight"] = swap_scale_shift( original_state_dict.pop("final_layer.mod.weight"), dim=0 ) new_state_dict["norm_out.linear.bias"] = swap_scale_shift(original_state_dict.pop("final_layer.mod.bias"), dim=0) new_state_dict["proj_out.weight"] = original_state_dict.pop("final_layer.linear.weight") new_state_dict["proj_out.bias"] = original_state_dict.pop("final_layer.linear.bias") new_state_dict["pos_frequencies"] = original_state_dict.pop("pos_frequencies") print("Remaining Keys:", original_state_dict.keys()) return new_state_dict def convert_mochi_vae_state_dict_to_diffusers(encoder_ckpt_path, decoder_ckpt_path): encoder_state_dict = load_file(encoder_ckpt_path, device="cpu") decoder_state_dict = load_file(decoder_ckpt_path, device="cpu") new_state_dict = {} # ==== Decoder ===== prefix = "decoder." # Convert conv_in new_state_dict[f"{prefix}conv_in.weight"] = decoder_state_dict.pop("blocks.0.0.weight") new_state_dict[f"{prefix}conv_in.bias"] = decoder_state_dict.pop("blocks.0.0.bias") # Convert block_in (MochiMidBlock3D) for i in range(3): # layers_per_block[-1] = 3 new_state_dict[f"{prefix}block_in.resnets.{i}.norm1.norm_layer.weight"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.0.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.norm1.norm_layer.bias"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.0.bias" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv1.conv.weight"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.2.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv1.conv.bias"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.2.bias" ) new_state_dict[f"{prefix}block_in.resnets.{i}.norm2.norm_layer.weight"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.3.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.norm2.norm_layer.bias"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.3.bias" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv2.conv.weight"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.5.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv2.conv.bias"] = decoder_state_dict.pop( f"blocks.0.{i + 1}.stack.5.bias" ) # Convert up_blocks (MochiUpBlock3D) down_block_layers = [6, 4, 3] # layers_per_block[-2], layers_per_block[-3], layers_per_block[-4] for block in range(3): for i in range(down_block_layers[block]): new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.norm1.norm_layer.weight"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.0.weight" ) new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.norm1.norm_layer.bias"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.0.bias" ) new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.conv1.conv.weight"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.2.weight" ) new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.conv1.conv.bias"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.2.bias" ) new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.norm2.norm_layer.weight"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.3.weight" ) new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.norm2.norm_layer.bias"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.3.bias" ) new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.conv2.conv.weight"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.5.weight" ) new_state_dict[f"{prefix}up_blocks.{block}.resnets.{i}.conv2.conv.bias"] = decoder_state_dict.pop( f"blocks.{block + 1}.blocks.{i}.stack.5.bias" ) new_state_dict[f"{prefix}up_blocks.{block}.proj.weight"] = decoder_state_dict.pop( f"blocks.{block + 1}.proj.weight" ) new_state_dict[f"{prefix}up_blocks.{block}.proj.bias"] = decoder_state_dict.pop( f"blocks.{block + 1}.proj.bias" ) # Convert block_out (MochiMidBlock3D) for i in range(3): # layers_per_block[0] = 3 new_state_dict[f"{prefix}block_out.resnets.{i}.norm1.norm_layer.weight"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.0.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.norm1.norm_layer.bias"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.0.bias" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv1.conv.weight"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.2.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv1.conv.bias"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.2.bias" ) new_state_dict[f"{prefix}block_out.resnets.{i}.norm2.norm_layer.weight"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.3.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.norm2.norm_layer.bias"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.3.bias" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv2.conv.weight"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.5.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv2.conv.bias"] = decoder_state_dict.pop( f"blocks.4.{i}.stack.5.bias" ) # Convert proj_out (Conv1x1 ~= nn.Linear) new_state_dict[f"{prefix}proj_out.weight"] = decoder_state_dict.pop("output_proj.weight") new_state_dict[f"{prefix}proj_out.bias"] = decoder_state_dict.pop("output_proj.bias") print("Remaining Decoder Keys:", decoder_state_dict.keys()) # ==== Encoder ===== prefix = "encoder." new_state_dict[f"{prefix}proj_in.weight"] = encoder_state_dict.pop("layers.0.weight") new_state_dict[f"{prefix}proj_in.bias"] = encoder_state_dict.pop("layers.0.bias") # Convert block_in (MochiMidBlock3D) for i in range(3): # layers_per_block[0] = 3 new_state_dict[f"{prefix}block_in.resnets.{i}.norm1.norm_layer.weight"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.0.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.norm1.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.0.bias" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv1.conv.weight"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.2.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv1.conv.bias"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.2.bias" ) new_state_dict[f"{prefix}block_in.resnets.{i}.norm2.norm_layer.weight"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.3.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.norm2.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.3.bias" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv2.conv.weight"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.5.weight" ) new_state_dict[f"{prefix}block_in.resnets.{i}.conv2.conv.bias"] = encoder_state_dict.pop( f"layers.{i + 1}.stack.5.bias" ) # Convert down_blocks (MochiDownBlock3D) down_block_layers = [3, 4, 6] # layers_per_block[1], layers_per_block[2], layers_per_block[3] for block in range(3): new_state_dict[f"{prefix}down_blocks.{block}.conv_in.conv.weight"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.0.weight" ) new_state_dict[f"{prefix}down_blocks.{block}.conv_in.conv.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.0.bias" ) for i in range(down_block_layers[block]): # Convert resnets new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.weight"] = ( encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.0.weight") ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.0.bias" ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.conv1.conv.weight"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.2.weight" ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.conv1.conv.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.2.bias" ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.weight"] = ( encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.3.weight") ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.3.bias" ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.conv2.conv.weight"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.5.weight" ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.conv2.conv.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.5.bias" ) # Convert attentions qkv_weight = encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.attn_block.attn.qkv.weight") q, k, v = qkv_weight.chunk(3, dim=0) new_state_dict[f"{prefix}down_blocks.{block}.attentions.{i}.to_q.weight"] = q new_state_dict[f"{prefix}down_blocks.{block}.attentions.{i}.to_k.weight"] = k new_state_dict[f"{prefix}down_blocks.{block}.attentions.{i}.to_v.weight"] = v new_state_dict[f"{prefix}down_blocks.{block}.attentions.{i}.to_out.0.weight"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.attn_block.attn.out.weight" ) new_state_dict[f"{prefix}down_blocks.{block}.attentions.{i}.to_out.0.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.attn_block.attn.out.bias" ) new_state_dict[f"{prefix}down_blocks.{block}.norms.{i}.norm_layer.weight"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.attn_block.norm.weight" ) new_state_dict[f"{prefix}down_blocks.{block}.norms.{i}.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.attn_block.norm.bias" ) # Convert block_out (MochiMidBlock3D) for i in range(3): # layers_per_block[-1] = 3 # Convert resnets new_state_dict[f"{prefix}block_out.resnets.{i}.norm1.norm_layer.weight"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.0.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.norm1.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.0.bias" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv1.conv.weight"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.2.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv1.conv.bias"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.2.bias" ) new_state_dict[f"{prefix}block_out.resnets.{i}.norm2.norm_layer.weight"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.3.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.norm2.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.3.bias" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv2.conv.weight"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.5.weight" ) new_state_dict[f"{prefix}block_out.resnets.{i}.conv2.conv.bias"] = encoder_state_dict.pop( f"layers.{i + 7}.stack.5.bias" ) # Convert attentions qkv_weight = encoder_state_dict.pop(f"layers.{i + 7}.attn_block.attn.qkv.weight") q, k, v = qkv_weight.chunk(3, dim=0) new_state_dict[f"{prefix}block_out.attentions.{i}.to_q.weight"] = q new_state_dict[f"{prefix}block_out.attentions.{i}.to_k.weight"] = k new_state_dict[f"{prefix}block_out.attentions.{i}.to_v.weight"] = v new_state_dict[f"{prefix}block_out.attentions.{i}.to_out.0.weight"] = encoder_state_dict.pop( f"layers.{i + 7}.attn_block.attn.out.weight" ) new_state_dict[f"{prefix}block_out.attentions.{i}.to_out.0.bias"] = encoder_state_dict.pop( f"layers.{i + 7}.attn_block.attn.out.bias" ) new_state_dict[f"{prefix}block_out.norms.{i}.norm_layer.weight"] = encoder_state_dict.pop( f"layers.{i + 7}.attn_block.norm.weight" ) new_state_dict[f"{prefix}block_out.norms.{i}.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{i + 7}.attn_block.norm.bias" ) # Convert output layers new_state_dict[f"{prefix}norm_out.norm_layer.weight"] = encoder_state_dict.pop("output_norm.weight") new_state_dict[f"{prefix}norm_out.norm_layer.bias"] = encoder_state_dict.pop("output_norm.bias") new_state_dict[f"{prefix}proj_out.weight"] = encoder_state_dict.pop("output_proj.weight") print("Remaining Encoder Keys:", encoder_state_dict.keys()) return new_state_dict def main(args): if args.dtype is None: dtype = None if args.dtype == "fp16": dtype = torch.float16 elif args.dtype == "bf16": dtype = torch.bfloat16 elif args.dtype == "fp32": dtype = torch.float32 else: raise ValueError(f"Unsupported dtype: {args.dtype}") transformer = None vae = None if args.transformer_checkpoint_path is not None: converted_transformer_state_dict = convert_mochi_transformer_checkpoint_to_diffusers( args.transformer_checkpoint_path ) transformer = MochiTransformer3DModel() transformer.load_state_dict(converted_transformer_state_dict, strict=True) if dtype is not None: transformer = transformer.to(dtype=dtype) if args.vae_encoder_checkpoint_path is not None and args.vae_decoder_checkpoint_path is not None: vae = AutoencoderKLMochi(latent_channels=12, out_channels=3) converted_vae_state_dict = convert_mochi_vae_state_dict_to_diffusers( args.vae_encoder_checkpoint_path, args.vae_decoder_checkpoint_path ) vae.load_state_dict(converted_vae_state_dict, strict=True) if dtype is not None: vae = vae.to(dtype=dtype) text_encoder_id = "google/t5-v1_1-xxl" tokenizer = T5Tokenizer.from_pretrained(text_encoder_id, model_max_length=TOKENIZER_MAX_LENGTH) text_encoder = T5EncoderModel.from_pretrained(text_encoder_id, cache_dir=args.text_encoder_cache_dir) # Apparently, the conversion does not work anymore without this :shrug: for param in text_encoder.parameters(): param.data = param.data.contiguous() pipe = MochiPipeline( scheduler=FlowMatchEulerDiscreteScheduler(invert_sigmas=True), vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, ) pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub) if __name__ == "__main__": main(args)
diffusers/scripts/convert_mochi_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_mochi_to_diffusers.py", "repo_id": "diffusers", "token_count": 11197 }
156